ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
e3abe368-daaa-479e-a903-fb97f4bc69c4 | cpp | google/quiche | aes_128_gcm_12_decrypter | quiche/quic/core/crypto/aes_128_gcm_12_decrypter.cc | quiche/quic/core/crypto/aes_128_gcm_12_decrypter_test.cc | #include "quiche/quic/core/crypto/aes_128_gcm_12_decrypter.h"
#include "openssl/aead.h"
#include "openssl/tls1.h"
namespace quic {
namespace {
const size_t kKeySize = 16;
const size_t kNonceSize = 12;
}
Aes128Gcm12Decrypter::Aes128Gcm12Decrypter()
: AesBaseDecrypter(EVP_aead_aes_128_gcm, kKeySize, kAuthTagSize, kNonceSize,
false) {
static_assert(kKeySize <= kMaxKeySize, "key size too big");
static_assert(kNonceSize <= kMaxNonceSize, "nonce size too big");
}
Aes128Gcm12Decrypter::~Aes128Gcm12Decrypter() {}
uint32_t Aes128Gcm12Decrypter::cipher_id() const {
return TLS1_CK_AES_128_GCM_SHA256;
}
} | #include "quiche/quic/core/crypto/aes_128_gcm_12_decrypter.h"
#include <memory>
#include <string>
#include "absl/base/macros.h"
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace {
struct TestGroupInfo {
size_t key_len;
size_t iv_len;
size_t pt_len;
size_t aad_len;
size_t tag_len;
};
struct TestVector {
const char* key;
const char* iv;
const char* ct;
const char* aad;
const char* tag;
const char* pt;
};
const TestGroupInfo test_group_info[] = {
{128, 96, 0, 0, 128}, {128, 96, 0, 128, 128}, {128, 96, 128, 0, 128},
{128, 96, 408, 160, 128}, {128, 96, 408, 720, 128}, {128, 96, 104, 0, 128},
};
const TestVector test_group_0[] = {
{"cf063a34d4a9a76c2c86787d3f96db71", "113b9785971864c83b01c787", "", "",
"72ac8493e3a5228b5d130a69d2510e42", ""},
{
"a49a5e26a2f8cb63d05546c2a62f5343", "907763b19b9b4ab6bd4f0281", "", "",
"a2be08210d8c470a8df6e8fbd79ec5cf",
nullptr
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_1[] = {
{
"d1f6af919cde85661208bdce0c27cb22", "898c6929b435017bf031c3c5", "",
"7c5faa40e636bbc91107e68010c92b9f", "ae45f11777540a2caeb128be8092468a",
nullptr
},
{"2370e320d4344208e0ff5683f243b213", "04dbb82f044d30831c441228", "",
"d43a8e5089eea0d026c03a85178b27da", "2a049c049d25aa95969b451d93c31c6e",
""},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_2[] = {
{"e98b72a9881a84ca6b76e0f43e68647a", "8b23299fde174053f3d652ba",
"5a3c1cf1985dbb8bed818036fdd5ab42", "", "23c7ab0f952b7091cd324835043b5eb5",
"28286a321293253c3e0aa2704a278032"},
{"33240636cd3236165f1a553b773e728e", "17c4d61493ecdc8f31700b12",
"47bb7e23f7bdfe05a8091ac90e4f8b2e", "", "b723c70e931d9785f40fd4ab1d612dc9",
"95695a5b12f2870b9cc5fdc8f218a97d"},
{
"5164df856f1e9cac04a79b808dc5be39", "e76925d5355e0584ce871b2b",
"0216c899c88d6e32c958c7e553daa5bc", "",
"a145319896329c96df291f64efbe0e3a",
nullptr
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_3[] = {
{"af57f42c60c0fc5a09adb81ab86ca1c3", "a2dc01871f37025dc0fc9a79",
"b9a535864f48ea7b6b1367914978f9bfa087d854bb0e269bed8d279d2eea1210e48947"
"338b22f9bad09093276a331e9c79c7f4",
"41dc38988945fcb44faf2ef72d0061289ef8efd8",
"4f71e72bde0018f555c5adcce062e005",
"3803a0727eeb0ade441e0ec107161ded2d425ec0d102f21f51bf2cf9947c7ec4aa7279"
"5b2f69b041596e8817d0a3c16f8fadeb"},
{"ebc753e5422b377d3cb64b58ffa41b61", "2e1821efaced9acf1f241c9b",
"069567190554e9ab2b50a4e1fbf9c147340a5025fdbd201929834eaf6532325899ccb9"
"f401823e04b05817243d2142a3589878",
"b9673412fd4f88ba0e920f46dd6438ff791d8eef",
"534d9234d2351cf30e565de47baece0b",
"39077edb35e9c5a4b1e4c2a6b9bb1fce77f00f5023af40333d6d699014c2bcf4209c18"
"353a18017f5b36bfc00b1f6dcb7ed485"},
{
"52bdbbf9cf477f187ec010589cb39d58", "d3be36d3393134951d324b31",
"700188da144fa692cf46e4a8499510a53d90903c967f7f13e8a1bd8151a74adc4fe63e"
"32b992760b3a5f99e9a47838867000a9",
"93c4fc6a4135f54d640b0c976bf755a06a292c33",
"8ca4e38aa3dfa6b1d0297021ccf3ea5f",
nullptr
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_4[] = {
{"da2bb7d581493d692380c77105590201", "44aa3e7856ca279d2eb020c6",
"9290d430c9e89c37f0446dbd620c9a6b34b1274aeb6f911f75867efcf95b6feda69f1a"
"f4ee16c761b3c9aeac3da03aa9889c88",
"4cd171b23bddb3a53cdf959d5c1710b481eb3785a90eb20a2345ee00d0bb7868c367ab"
"12e6f4dd1dee72af4eee1d197777d1d6499cc541f34edbf45cda6ef90b3c024f9272d7"
"2ec1909fb8fba7db88a4d6f7d3d925980f9f9f72",
"9e3ac938d3eb0cadd6f5c9e35d22ba38",
"9bbf4c1a2742f6ac80cb4e8a052e4a8f4f07c43602361355b717381edf9fabd4cb7e3a"
"d65dbd1378b196ac270588dd0621f642"},
{"d74e4958717a9d5c0e235b76a926cae8", "0b7471141e0c70b1995fd7b1",
"e701c57d2330bf066f9ff8cf3ca4343cafe4894651cd199bdaaa681ba486b4a65c5a22"
"b0f1420be29ea547d42c713bc6af66aa",
"4a42b7aae8c245c6f1598a395316e4b8484dbd6e64648d5e302021b1d3fa0a38f46e22"
"bd9c8080b863dc0016482538a8562a4bd0ba84edbe2697c76fd039527ac179ec5506cf"
"34a6039312774cedebf4961f3978b14a26509f96",
"e192c23cb036f0b31592989119eed55d",
"840d9fb95e32559fb3602e48590280a172ca36d9b49ab69510f5bd552bfab7a306f85f"
"f0a34bc305b88b804c60b90add594a17"},
{
"1986310c725ac94ecfe6422e75fc3ee7", "93ec4214fa8e6dc4e3afc775",
"b178ec72f85a311ac4168f42a4b2c23113fbea4b85f4b9dabb74e143eb1b8b0a361e02"
"43edfd365b90d5b325950df0ada058f9",
"e80b88e62c49c958b5e0b8b54f532d9ff6aa84c8a40132e93e55b59fc24e8decf28463"
"139f155d1e8ce4ee76aaeefcd245baa0fc519f83a5fb9ad9aa40c4b21126013f576c42"
"72c2cb136c8fd091cc4539877a5d1e72d607f960",
"8b347853f11d75e81e8a95010be81f17",
nullptr
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_5[] = {
{"387218b246c1a8257748b56980e50c94", "dd7e014198672be39f95b69d",
"cdba9e73eaf3d38eceb2b04a8d", "", "ecf90f4a47c9c626d6fb2c765d201556",
"48f5b426baca03064554cc2b30"},
{"294de463721e359863887c820524b3d4", "3338b35c9d57a5d28190e8c9",
"2f46634e74b8e4c89812ac83b9", "", "dabd506764e68b82a7e720aa18da0abe",
"46a2e55c8e264df211bd112685"},
{"28ead7fd2179e0d12aa6d5d88c58c2dc", "5055347f18b4d5add0ae5c41",
"142d8210c3fb84774cdbd0447a", "", "5fd321d9cdb01952dc85f034736c2a7d",
"3b95b981086ee73cc4d0cc1422"},
{
"7d7b6c988137b8d470c57bf674a09c87", "9edf2aa970d016ac962e1fd8",
"a85b66c3cb5eab91d5bdc8bc0e", "", "dc054efc01f3afd21d9c2484819f569a",
nullptr
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector* const test_group_array[] = {
test_group_0, test_group_1, test_group_2,
test_group_3, test_group_4, test_group_5,
};
}
namespace quic {
namespace test {
QuicData* DecryptWithNonce(Aes128Gcm12Decrypter* decrypter,
absl::string_view nonce,
absl::string_view associated_data,
absl::string_view ciphertext) {
uint64_t packet_number;
absl::string_view nonce_prefix(nonce.data(),
nonce.size() - sizeof(packet_number));
decrypter->SetNoncePrefix(nonce_prefix);
memcpy(&packet_number, nonce.data() + nonce_prefix.size(),
sizeof(packet_number));
std::unique_ptr<char[]> output(new char[ciphertext.length()]);
size_t output_length = 0;
const bool success = decrypter->DecryptPacket(
packet_number, associated_data, ciphertext, output.get(), &output_length,
ciphertext.length());
if (!success) {
return nullptr;
}
return new QuicData(output.release(), output_length, true);
}
class Aes128Gcm12DecrypterTest : public QuicTest {};
TEST_F(Aes128Gcm12DecrypterTest, Decrypt) {
for (size_t i = 0; i < ABSL_ARRAYSIZE(test_group_array); i++) {
SCOPED_TRACE(i);
const TestVector* test_vectors = test_group_array[i];
const TestGroupInfo& test_info = test_group_info[i];
for (size_t j = 0; test_vectors[j].key != nullptr; j++) {
bool has_pt = test_vectors[j].pt;
std::string key;
std::string iv;
std::string ct;
std::string aad;
std::string tag;
std::string pt;
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].key, &key));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].iv, &iv));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].ct, &ct));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].aad, &aad));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].tag, &tag));
if (has_pt) {
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].pt, &pt));
}
EXPECT_EQ(test_info.key_len, key.length() * 8);
EXPECT_EQ(test_info.iv_len, iv.length() * 8);
EXPECT_EQ(test_info.pt_len, ct.length() * 8);
EXPECT_EQ(test_info.aad_len, aad.length() * 8);
EXPECT_EQ(test_info.tag_len, tag.length() * 8);
if (has_pt) {
EXPECT_EQ(test_info.pt_len, pt.length() * 8);
}
ASSERT_LE(static_cast<size_t>(Aes128Gcm12Decrypter::kAuthTagSize),
tag.length());
tag.resize(Aes128Gcm12Decrypter::kAuthTagSize);
std::string ciphertext = ct + tag;
Aes128Gcm12Decrypter decrypter;
ASSERT_TRUE(decrypter.SetKey(key));
std::unique_ptr<QuicData> decrypted(DecryptWithNonce(
&decrypter, iv,
aad.length() ? aad : absl::string_view(), ciphertext));
if (!decrypted) {
EXPECT_FALSE(has_pt);
continue;
}
EXPECT_TRUE(has_pt);
ASSERT_EQ(pt.length(), decrypted->length());
quiche::test::CompareCharArraysWithHexError(
"plaintext", decrypted->data(), pt.length(), pt.data(), pt.length());
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/aes_128_gcm_12_decrypter.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/aes_128_gcm_12_decrypter_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
8972e675-3107-459c-ae27-431e066074a2 | cpp | google/googletest | gtest_pred_impl | googletest/include/gtest/gtest_pred_impl.h | googletest/test/gtest_pred_impl_unittest.cc | #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GOOGLETEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#include "gtest/gtest-assertion-result.h"
#include "gtest/internal/gtest-internal.h"
#include "gtest/internal/gtest-port.h"
namespace testing {
#define GTEST_ASSERT_(expression, on_failure) \
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
if (const ::testing::AssertionResult gtest_ar = (expression)) \
; \
else \
on_failure(gtest_ar.failure_message())
template <typename Pred, typename T1>
AssertionResult AssertPred1Helper(const char* pred_text, const char* e1,
Pred pred, const T1& v1) {
if (pred(v1)) return AssertionSuccess();
return AssertionFailure()
<< pred_text << "(" << e1 << ") evaluates to false, where"
<< "\n"
<< e1 << " evaluates to " << ::testing::PrintToString(v1);
}
#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure) \
GTEST_ASSERT_(pred_format(#v1, v1), on_failure)
#define GTEST_PRED1_(pred, v1, on_failure) \
GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, #v1, pred, v1), on_failure)
#define EXPECT_PRED_FORMAT1(pred_format, v1) \
GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED1(pred, v1) GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT1(pred_format, v1) \
GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED1(pred, v1) GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_)
template <typename Pred, typename T1, typename T2>
AssertionResult AssertPred2Helper(const char* pred_text, const char* e1,
const char* e2, Pred pred, const T1& v1,
const T2& v2) {
if (pred(v1, v2)) return AssertionSuccess();
return AssertionFailure()
<< pred_text << "(" << e1 << ", " << e2
<< ") evaluates to false, where"
<< "\n"
<< e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
<< e2 << " evaluates to " << ::testing::PrintToString(v2);
}
#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure) \
GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), on_failure)
#define GTEST_PRED2_(pred, v1, v2, on_failure) \
GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, #v1, #v2, pred, v1, v2), \
on_failure)
#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \
GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED2(pred, v1, v2) \
GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \
GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED2(pred, v1, v2) \
GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_)
template <typename Pred, typename T1, typename T2, typename T3>
AssertionResult AssertPred3Helper(const char* pred_text, const char* e1,
const char* e2, const char* e3, Pred pred,
const T1& v1, const T2& v2, const T3& v3) {
if (pred(v1, v2, v3)) return AssertionSuccess();
return AssertionFailure()
<< pred_text << "(" << e1 << ", " << e2 << ", " << e3
<< ") evaluates to false, where"
<< "\n"
<< e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
<< e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n"
<< e3 << " evaluates to " << ::testing::PrintToString(v3);
}
#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure) \
GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), on_failure)
#define GTEST_PRED3_(pred, v1, v2, v3, on_failure) \
GTEST_ASSERT_( \
::testing::AssertPred3Helper(#pred, #v1, #v2, #v3, pred, v1, v2, v3), \
on_failure)
#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \
GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED3(pred, v1, v2, v3) \
GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \
GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED3(pred, v1, v2, v3) \
GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_)
template <typename Pred, typename T1, typename T2, typename T3, typename T4>
AssertionResult AssertPred4Helper(const char* pred_text, const char* e1,
const char* e2, const char* e3,
const char* e4, Pred pred, const T1& v1,
const T2& v2, const T3& v3, const T4& v4) {
if (pred(v1, v2, v3, v4)) return AssertionSuccess();
return AssertionFailure()
<< pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ", " << e4
<< ") evaluates to false, where"
<< "\n"
<< e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
<< e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n"
<< e3 << " evaluates to " << ::testing::PrintToString(v3) << "\n"
<< e4 << " evaluates to " << ::testing::PrintToString(v4);
}
#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure) \
GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), on_failure)
#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure) \
GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, #v1, #v2, #v3, #v4, pred, \
v1, v2, v3, v4), \
on_failure)
#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED4(pred, v1, v2, v3, v4) \
GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED4(pred, v1, v2, v3, v4) \
GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
template <typename Pred, typename T1, typename T2, typename T3, typename T4,
typename T5>
AssertionResult AssertPred5Helper(const char* pred_text, const char* e1,
const char* e2, const char* e3,
const char* e4, const char* e5, Pred pred,
const T1& v1, const T2& v2, const T3& v3,
const T4& v4, const T5& v5) {
if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess();
return AssertionFailure()
<< pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ", " << e4
<< ", " << e5 << ") evaluates to false, where"
<< "\n"
<< e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
<< e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n"
<< e3 << " evaluates to " << ::testing::PrintToString(v3) << "\n"
<< e4 << " evaluates to " << ::testing::PrintToString(v4) << "\n"
<< e5 << " evaluates to " << ::testing::PrintToString(v5);
}
#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure) \
GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \
on_failure)
#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure) \
GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, #v1, #v2, #v3, #v4, #v5, \
pred, v1, v2, v3, v4, v5), \
on_failure)
#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \
GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \
GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
}
#endif | #include <iostream>
#include <ostream>
#include "gtest/gtest-spi.h"
#include "gtest/gtest.h"
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
template <typename T1>
bool PredFunction1(T1 v1) {
return v1 > 0;
}
bool PredFunction1Int(int v1) { return v1 > 0; }
bool PredFunction1Bool(Bool v1) { return v1 > 0; }
struct PredFunctor1 {
template <typename T1>
bool operator()(const T1& v1) {
return v1 > 0;
}
};
template <typename T1>
testing::AssertionResult PredFormatFunction1(const char* e1, const T1& v1) {
if (PredFunction1(v1)) return testing::AssertionSuccess();
return testing::AssertionFailure()
<< e1 << " is expected to be positive, but evaluates to " << v1 << ".";
}
struct PredFormatFunctor1 {
template <typename T1>
testing::AssertionResult operator()(const char* e1, const T1& v1) const {
return PredFormatFunction1(e1, v1);
}
};
class Predicate1Test : public testing::Test {
protected:
void SetUp() override {
expected_to_finish_ = true;
finished_ = false;
n1_ = 0;
}
void TearDown() override {
EXPECT_EQ(1, n1_) << "The predicate assertion didn't evaluate argument 2 "
"exactly once.";
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpectedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
static bool expected_to_finish_;
static bool finished_;
static int n1_;
};
bool Predicate1Test::expected_to_finish_;
bool Predicate1Test::finished_;
int Predicate1Test::n1_;
typedef Predicate1Test EXPECT_PRED_FORMAT1Test;
typedef Predicate1Test ASSERT_PRED_FORMAT1Test;
typedef Predicate1Test EXPECT_PRED1Test;
typedef Predicate1Test ASSERT_PRED1Test;
TEST_F(EXPECT_PRED1Test, FunctionOnBuiltInTypeSuccess) {
EXPECT_PRED1(PredFunction1Int, ++n1_);
finished_ = true;
}
TEST_F(EXPECT_PRED1Test, FunctionOnUserTypeSuccess) {
EXPECT_PRED1(PredFunction1Bool, Bool(++n1_));
finished_ = true;
}
TEST_F(EXPECT_PRED1Test, FunctorOnBuiltInTypeSuccess) {
EXPECT_PRED1(PredFunctor1(), ++n1_);
finished_ = true;
}
TEST_F(EXPECT_PRED1Test, FunctorOnUserTypeSuccess) {
EXPECT_PRED1(PredFunctor1(), Bool(++n1_));
finished_ = true;
}
TEST_F(EXPECT_PRED1Test, FunctionOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED1(PredFunction1Int, n1_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED1Test, FunctionOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED1(PredFunction1Bool, Bool(n1_++));
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED1Test, FunctorOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED1(PredFunctor1(), n1_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED1Test, FunctorOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED1(PredFunctor1(), Bool(n1_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED1Test, FunctionOnBuiltInTypeSuccess) {
ASSERT_PRED1(PredFunction1Int, ++n1_);
finished_ = true;
}
TEST_F(ASSERT_PRED1Test, FunctionOnUserTypeSuccess) {
ASSERT_PRED1(PredFunction1Bool, Bool(++n1_));
finished_ = true;
}
TEST_F(ASSERT_PRED1Test, FunctorOnBuiltInTypeSuccess) {
ASSERT_PRED1(PredFunctor1(), ++n1_);
finished_ = true;
}
TEST_F(ASSERT_PRED1Test, FunctorOnUserTypeSuccess) {
ASSERT_PRED1(PredFunctor1(), Bool(++n1_));
finished_ = true;
}
TEST_F(ASSERT_PRED1Test, FunctionOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED1(PredFunction1Int, n1_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED1Test, FunctionOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED1(PredFunction1Bool, Bool(n1_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED1Test, FunctorOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED1(PredFunctor1(), n1_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED1Test, FunctorOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED1(PredFunctor1(), Bool(n1_++));
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT1Test, FunctionOnBuiltInTypeSuccess) {
EXPECT_PRED_FORMAT1(PredFormatFunction1, ++n1_);
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT1Test, FunctionOnUserTypeSuccess) {
EXPECT_PRED_FORMAT1(PredFormatFunction1, Bool(++n1_));
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT1Test, FunctorOnBuiltInTypeSuccess) {
EXPECT_PRED_FORMAT1(PredFormatFunctor1(), ++n1_);
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT1Test, FunctorOnUserTypeSuccess) {
EXPECT_PRED_FORMAT1(PredFormatFunctor1(), Bool(++n1_));
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT1Test, FunctionOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT1(PredFormatFunction1, n1_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT1Test, FunctionOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT1(PredFormatFunction1, Bool(n1_++));
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT1Test, FunctorOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT1(PredFormatFunctor1(), n1_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT1Test, FunctorOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT1(PredFormatFunctor1(), Bool(n1_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT1Test, FunctionOnBuiltInTypeSuccess) {
ASSERT_PRED_FORMAT1(PredFormatFunction1, ++n1_);
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT1Test, FunctionOnUserTypeSuccess) {
ASSERT_PRED_FORMAT1(PredFormatFunction1, Bool(++n1_));
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT1Test, FunctorOnBuiltInTypeSuccess) {
ASSERT_PRED_FORMAT1(PredFormatFunctor1(), ++n1_);
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT1Test, FunctorOnUserTypeSuccess) {
ASSERT_PRED_FORMAT1(PredFormatFunctor1(), Bool(++n1_));
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT1Test, FunctionOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT1(PredFormatFunction1, n1_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT1Test, FunctionOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT1(PredFormatFunction1, Bool(n1_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT1Test, FunctorOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT1(PredFormatFunctor1(), n1_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT1Test, FunctorOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT1(PredFormatFunctor1(), Bool(n1_++));
finished_ = true;
},
"");
}
template <typename T1, typename T2>
bool PredFunction2(T1 v1, T2 v2) {
return v1 + v2 > 0;
}
bool PredFunction2Int(int v1, int v2) { return v1 + v2 > 0; }
bool PredFunction2Bool(Bool v1, Bool v2) { return v1 + v2 > 0; }
struct PredFunctor2 {
template <typename T1, typename T2>
bool operator()(const T1& v1, const T2& v2) {
return v1 + v2 > 0;
}
};
template <typename T1, typename T2>
testing::AssertionResult PredFormatFunction2(const char* e1, const char* e2,
const T1& v1, const T2& v2) {
if (PredFunction2(v1, v2)) return testing::AssertionSuccess();
return testing::AssertionFailure()
<< e1 << " + " << e2
<< " is expected to be positive, but evaluates to " << v1 + v2 << ".";
}
struct PredFormatFunctor2 {
template <typename T1, typename T2>
testing::AssertionResult operator()(const char* e1, const char* e2,
const T1& v1, const T2& v2) const {
return PredFormatFunction2(e1, e2, v1, v2);
}
};
class Predicate2Test : public testing::Test {
protected:
void SetUp() override {
expected_to_finish_ = true;
finished_ = false;
n1_ = n2_ = 0;
}
void TearDown() override {
EXPECT_EQ(1, n1_) << "The predicate assertion didn't evaluate argument 2 "
"exactly once.";
EXPECT_EQ(1, n2_) << "The predicate assertion didn't evaluate argument 3 "
"exactly once.";
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpectedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
static bool expected_to_finish_;
static bool finished_;
static int n1_;
static int n2_;
};
bool Predicate2Test::expected_to_finish_;
bool Predicate2Test::finished_;
int Predicate2Test::n1_;
int Predicate2Test::n2_;
typedef Predicate2Test EXPECT_PRED_FORMAT2Test;
typedef Predicate2Test ASSERT_PRED_FORMAT2Test;
typedef Predicate2Test EXPECT_PRED2Test;
typedef Predicate2Test ASSERT_PRED2Test;
TEST_F(EXPECT_PRED2Test, FunctionOnBuiltInTypeSuccess) {
EXPECT_PRED2(PredFunction2Int, ++n1_, ++n2_);
finished_ = true;
}
TEST_F(EXPECT_PRED2Test, FunctionOnUserTypeSuccess) {
EXPECT_PRED2(PredFunction2Bool, Bool(++n1_), Bool(++n2_));
finished_ = true;
}
TEST_F(EXPECT_PRED2Test, FunctorOnBuiltInTypeSuccess) {
EXPECT_PRED2(PredFunctor2(), ++n1_, ++n2_);
finished_ = true;
}
TEST_F(EXPECT_PRED2Test, FunctorOnUserTypeSuccess) {
EXPECT_PRED2(PredFunctor2(), Bool(++n1_), Bool(++n2_));
finished_ = true;
}
TEST_F(EXPECT_PRED2Test, FunctionOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED2(PredFunction2Int, n1_++, n2_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED2Test, FunctionOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED2(PredFunction2Bool, Bool(n1_++), Bool(n2_++));
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED2Test, FunctorOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED2(PredFunctor2(), n1_++, n2_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED2Test, FunctorOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED2(PredFunctor2(), Bool(n1_++), Bool(n2_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED2Test, FunctionOnBuiltInTypeSuccess) {
ASSERT_PRED2(PredFunction2Int, ++n1_, ++n2_);
finished_ = true;
}
TEST_F(ASSERT_PRED2Test, FunctionOnUserTypeSuccess) {
ASSERT_PRED2(PredFunction2Bool, Bool(++n1_), Bool(++n2_));
finished_ = true;
}
TEST_F(ASSERT_PRED2Test, FunctorOnBuiltInTypeSuccess) {
ASSERT_PRED2(PredFunctor2(), ++n1_, ++n2_);
finished_ = true;
}
TEST_F(ASSERT_PRED2Test, FunctorOnUserTypeSuccess) {
ASSERT_PRED2(PredFunctor2(), Bool(++n1_), Bool(++n2_));
finished_ = true;
}
TEST_F(ASSERT_PRED2Test, FunctionOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED2(PredFunction2Int, n1_++, n2_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED2Test, FunctionOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED2(PredFunction2Bool, Bool(n1_++), Bool(n2_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED2Test, FunctorOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED2(PredFunctor2(), n1_++, n2_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED2Test, FunctorOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED2(PredFunctor2(), Bool(n1_++), Bool(n2_++));
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT2Test, FunctionOnBuiltInTypeSuccess) {
EXPECT_PRED_FORMAT2(PredFormatFunction2, ++n1_, ++n2_);
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT2Test, FunctionOnUserTypeSuccess) {
EXPECT_PRED_FORMAT2(PredFormatFunction2, Bool(++n1_), Bool(++n2_));
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT2Test, FunctorOnBuiltInTypeSuccess) {
EXPECT_PRED_FORMAT2(PredFormatFunctor2(), ++n1_, ++n2_);
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT2Test, FunctorOnUserTypeSuccess) {
EXPECT_PRED_FORMAT2(PredFormatFunctor2(), Bool(++n1_), Bool(++n2_));
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT2Test, FunctionOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT2(PredFormatFunction2, n1_++, n2_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT2Test, FunctionOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT2(PredFormatFunction2, Bool(n1_++), Bool(n2_++));
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT2Test, FunctorOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT2(PredFormatFunctor2(), n1_++, n2_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT2Test, FunctorOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT2(PredFormatFunctor2(), Bool(n1_++), Bool(n2_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT2Test, FunctionOnBuiltInTypeSuccess) {
ASSERT_PRED_FORMAT2(PredFormatFunction2, ++n1_, ++n2_);
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT2Test, FunctionOnUserTypeSuccess) {
ASSERT_PRED_FORMAT2(PredFormatFunction2, Bool(++n1_), Bool(++n2_));
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT2Test, FunctorOnBuiltInTypeSuccess) {
ASSERT_PRED_FORMAT2(PredFormatFunctor2(), ++n1_, ++n2_);
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT2Test, FunctorOnUserTypeSuccess) {
ASSERT_PRED_FORMAT2(PredFormatFunctor2(), Bool(++n1_), Bool(++n2_));
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT2Test, FunctionOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT2(PredFormatFunction2, n1_++, n2_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT2Test, FunctionOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT2(PredFormatFunction2, Bool(n1_++), Bool(n2_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT2Test, FunctorOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT2(PredFormatFunctor2(), n1_++, n2_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT2Test, FunctorOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT2(PredFormatFunctor2(), Bool(n1_++), Bool(n2_++));
finished_ = true;
},
"");
}
template <typename T1, typename T2, typename T3>
bool PredFunction3(T1 v1, T2 v2, T3 v3) {
return v1 + v2 + v3 > 0;
}
bool PredFunction3Int(int v1, int v2, int v3) { return v1 + v2 + v3 > 0; }
bool PredFunction3Bool(Bool v1, Bool v2, Bool v3) { return v1 + v2 + v3 > 0; }
struct PredFunctor3 {
template <typename T1, typename T2, typename T3>
bool operator()(const T1& v1, const T2& v2, const T3& v3) {
return v1 + v2 + v3 > 0;
}
};
template <typename T1, typename T2, typename T3>
testing::AssertionResult PredFormatFunction3(const char* e1, const char* e2,
const char* e3, const T1& v1,
const T2& v2, const T3& v3) {
if (PredFunction3(v1, v2, v3)) return testing::AssertionSuccess();
return testing::AssertionFailure()
<< e1 << " + " << e2 << " + " << e3
<< " is expected to be positive, but evaluates to " << v1 + v2 + v3
<< ".";
}
struct PredFormatFunctor3 {
template <typename T1, typename T2, typename T3>
testing::AssertionResult operator()(const char* e1, const char* e2,
const char* e3, const T1& v1,
const T2& v2, const T3& v3) const {
return PredFormatFunction3(e1, e2, e3, v1, v2, v3);
}
};
class Predicate3Test : public testing::Test {
protected:
void SetUp() override {
expected_to_finish_ = true;
finished_ = false;
n1_ = n2_ = n3_ = 0;
}
void TearDown() override {
EXPECT_EQ(1, n1_) << "The predicate assertion didn't evaluate argument 2 "
"exactly once.";
EXPECT_EQ(1, n2_) << "The predicate assertion didn't evaluate argument 3 "
"exactly once.";
EXPECT_EQ(1, n3_) << "The predicate assertion didn't evaluate argument 4 "
"exactly once.";
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpectedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
static bool expected_to_finish_;
static bool finished_;
static int n1_;
static int n2_;
static int n3_;
};
bool Predicate3Test::expected_to_finish_;
bool Predicate3Test::finished_;
int Predicate3Test::n1_;
int Predicate3Test::n2_;
int Predicate3Test::n3_;
typedef Predicate3Test EXPECT_PRED_FORMAT3Test;
typedef Predicate3Test ASSERT_PRED_FORMAT3Test;
typedef Predicate3Test EXPECT_PRED3Test;
typedef Predicate3Test ASSERT_PRED3Test;
TEST_F(EXPECT_PRED3Test, FunctionOnBuiltInTypeSuccess) {
EXPECT_PRED3(PredFunction3Int, ++n1_, ++n2_, ++n3_);
finished_ = true;
}
TEST_F(EXPECT_PRED3Test, FunctionOnUserTypeSuccess) {
EXPECT_PRED3(PredFunction3Bool, Bool(++n1_), Bool(++n2_), Bool(++n3_));
finished_ = true;
}
TEST_F(EXPECT_PRED3Test, FunctorOnBuiltInTypeSuccess) {
EXPECT_PRED3(PredFunctor3(), ++n1_, ++n2_, ++n3_);
finished_ = true;
}
TEST_F(EXPECT_PRED3Test, FunctorOnUserTypeSuccess) {
EXPECT_PRED3(PredFunctor3(), Bool(++n1_), Bool(++n2_), Bool(++n3_));
finished_ = true;
}
TEST_F(EXPECT_PRED3Test, FunctionOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED3(PredFunction3Int, n1_++, n2_++, n3_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED3Test, FunctionOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED3(PredFunction3Bool, Bool(n1_++), Bool(n2_++), Bool(n3_++));
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED3Test, FunctorOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED3(PredFunctor3(), n1_++, n2_++, n3_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED3Test, FunctorOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED3(PredFunctor3(), Bool(n1_++), Bool(n2_++), Bool(n3_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED3Test, FunctionOnBuiltInTypeSuccess) {
ASSERT_PRED3(PredFunction3Int, ++n1_, ++n2_, ++n3_);
finished_ = true;
}
TEST_F(ASSERT_PRED3Test, FunctionOnUserTypeSuccess) {
ASSERT_PRED3(PredFunction3Bool, Bool(++n1_), Bool(++n2_), Bool(++n3_));
finished_ = true;
}
TEST_F(ASSERT_PRED3Test, FunctorOnBuiltInTypeSuccess) {
ASSERT_PRED3(PredFunctor3(), ++n1_, ++n2_, ++n3_);
finished_ = true;
}
TEST_F(ASSERT_PRED3Test, FunctorOnUserTypeSuccess) {
ASSERT_PRED3(PredFunctor3(), Bool(++n1_), Bool(++n2_), Bool(++n3_));
finished_ = true;
}
TEST_F(ASSERT_PRED3Test, FunctionOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED3(PredFunction3Int, n1_++, n2_++, n3_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED3Test, FunctionOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED3(PredFunction3Bool, Bool(n1_++), Bool(n2_++), Bool(n3_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED3Test, FunctorOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED3(PredFunctor3(), n1_++, n2_++, n3_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED3Test, FunctorOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED3(PredFunctor3(), Bool(n1_++), Bool(n2_++), Bool(n3_++));
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT3Test, FunctionOnBuiltInTypeSuccess) {
EXPECT_PRED_FORMAT3(PredFormatFunction3, ++n1_, ++n2_, ++n3_);
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT3Test, FunctionOnUserTypeSuccess) {
EXPECT_PRED_FORMAT3(PredFormatFunction3, Bool(++n1_), Bool(++n2_),
Bool(++n3_));
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT3Test, FunctorOnBuiltInTypeSuccess) {
EXPECT_PRED_FORMAT3(PredFormatFunctor3(), ++n1_, ++n2_, ++n3_);
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT3Test, FunctorOnUserTypeSuccess) {
EXPECT_PRED_FORMAT3(PredFormatFunctor3(), Bool(++n1_), Bool(++n2_),
Bool(++n3_));
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT3Test, FunctionOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT3(PredFormatFunction3, n1_++, n2_++, n3_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT3Test, FunctionOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT3(PredFormatFunction3, Bool(n1_++), Bool(n2_++),
Bool(n3_++));
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT3Test, FunctorOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT3(PredFormatFunctor3(), n1_++, n2_++, n3_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT3Test, FunctorOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT3(PredFormatFunctor3(), Bool(n1_++), Bool(n2_++),
Bool(n3_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT3Test, FunctionOnBuiltInTypeSuccess) {
ASSERT_PRED_FORMAT3(PredFormatFunction3, ++n1_, ++n2_, ++n3_);
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT3Test, FunctionOnUserTypeSuccess) {
ASSERT_PRED_FORMAT3(PredFormatFunction3, Bool(++n1_), Bool(++n2_),
Bool(++n3_));
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT3Test, FunctorOnBuiltInTypeSuccess) {
ASSERT_PRED_FORMAT3(PredFormatFunctor3(), ++n1_, ++n2_, ++n3_);
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT3Test, FunctorOnUserTypeSuccess) {
ASSERT_PRED_FORMAT3(PredFormatFunctor3(), Bool(++n1_), Bool(++n2_),
Bool(++n3_));
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT3Test, FunctionOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT3(PredFormatFunction3, n1_++, n2_++, n3_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT3Test, FunctionOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT3(PredFormatFunction3, Bool(n1_++), Bool(n2_++),
Bool(n3_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT3Test, FunctorOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT3(PredFormatFunctor3(), n1_++, n2_++, n3_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT3Test, FunctorOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT3(PredFormatFunctor3(), Bool(n1_++), Bool(n2_++),
Bool(n3_++));
finished_ = true;
},
"");
}
template <typename T1, typename T2, typename T3, typename T4>
bool PredFunction4(T1 v1, T2 v2, T3 v3, T4 v4) {
return v1 + v2 + v3 + v4 > 0;
}
bool PredFunction4Int(int v1, int v2, int v3, int v4) {
return v1 + v2 + v3 + v4 > 0;
}
bool PredFunction4Bool(Bool v1, Bool v2, Bool v3, Bool v4) {
return v1 + v2 + v3 + v4 > 0;
}
struct PredFunctor4 {
template <typename T1, typename T2, typename T3, typename T4>
bool operator()(const T1& v1, const T2& v2, const T3& v3, const T4& v4) {
return v1 + v2 + v3 + v4 > 0;
}
};
template <typename T1, typename T2, typename T3, typename T4>
testing::AssertionResult PredFormatFunction4(const char* e1, const char* e2,
const char* e3, const char* e4,
const T1& v1, const T2& v2,
const T3& v3, const T4& v4) {
if (PredFunction4(v1, v2, v3, v4)) return testing::AssertionSuccess();
return testing::AssertionFailure()
<< e1 << " + " << e2 << " + " << e3 << " + " << e4
<< " is expected to be positive, but evaluates to "
<< v1 + v2 + v3 + v4 << ".";
}
struct PredFormatFunctor4 {
template <typename T1, typename T2, typename T3, typename T4>
testing::AssertionResult operator()(const char* e1, const char* e2,
const char* e3, const char* e4,
const T1& v1, const T2& v2, const T3& v3,
const T4& v4) const {
return PredFormatFunction4(e1, e2, e3, e4, v1, v2, v3, v4);
}
};
class Predicate4Test : public testing::Test {
protected:
void SetUp() override {
expected_to_finish_ = true;
finished_ = false;
n1_ = n2_ = n3_ = n4_ = 0;
}
void TearDown() override {
EXPECT_EQ(1, n1_) << "The predicate assertion didn't evaluate argument 2 "
"exactly once.";
EXPECT_EQ(1, n2_) << "The predicate assertion didn't evaluate argument 3 "
"exactly once.";
EXPECT_EQ(1, n3_) << "The predicate assertion didn't evaluate argument 4 "
"exactly once.";
EXPECT_EQ(1, n4_) << "The predicate assertion didn't evaluate argument 5 "
"exactly once.";
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpectedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
static bool expected_to_finish_;
static bool finished_;
static int n1_;
static int n2_;
static int n3_;
static int n4_;
};
bool Predicate4Test::expected_to_finish_;
bool Predicate4Test::finished_;
int Predicate4Test::n1_;
int Predicate4Test::n2_;
int Predicate4Test::n3_;
int Predicate4Test::n4_;
typedef Predicate4Test EXPECT_PRED_FORMAT4Test;
typedef Predicate4Test ASSERT_PRED_FORMAT4Test;
typedef Predicate4Test EXPECT_PRED4Test;
typedef Predicate4Test ASSERT_PRED4Test;
TEST_F(EXPECT_PRED4Test, FunctionOnBuiltInTypeSuccess) {
EXPECT_PRED4(PredFunction4Int, ++n1_, ++n2_, ++n3_, ++n4_);
finished_ = true;
}
TEST_F(EXPECT_PRED4Test, FunctionOnUserTypeSuccess) {
EXPECT_PRED4(PredFunction4Bool, Bool(++n1_), Bool(++n2_), Bool(++n3_),
Bool(++n4_));
finished_ = true;
}
TEST_F(EXPECT_PRED4Test, FunctorOnBuiltInTypeSuccess) {
EXPECT_PRED4(PredFunctor4(), ++n1_, ++n2_, ++n3_, ++n4_);
finished_ = true;
}
TEST_F(EXPECT_PRED4Test, FunctorOnUserTypeSuccess) {
EXPECT_PRED4(PredFunctor4(), Bool(++n1_), Bool(++n2_), Bool(++n3_),
Bool(++n4_));
finished_ = true;
}
TEST_F(EXPECT_PRED4Test, FunctionOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED4(PredFunction4Int, n1_++, n2_++, n3_++, n4_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED4Test, FunctionOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED4(PredFunction4Bool, Bool(n1_++), Bool(n2_++), Bool(n3_++),
Bool(n4_++));
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED4Test, FunctorOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED4(PredFunctor4(), n1_++, n2_++, n3_++, n4_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED4Test, FunctorOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED4(PredFunctor4(), Bool(n1_++), Bool(n2_++), Bool(n3_++),
Bool(n4_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED4Test, FunctionOnBuiltInTypeSuccess) {
ASSERT_PRED4(PredFunction4Int, ++n1_, ++n2_, ++n3_, ++n4_);
finished_ = true;
}
TEST_F(ASSERT_PRED4Test, FunctionOnUserTypeSuccess) {
ASSERT_PRED4(PredFunction4Bool, Bool(++n1_), Bool(++n2_), Bool(++n3_),
Bool(++n4_));
finished_ = true;
}
TEST_F(ASSERT_PRED4Test, FunctorOnBuiltInTypeSuccess) {
ASSERT_PRED4(PredFunctor4(), ++n1_, ++n2_, ++n3_, ++n4_);
finished_ = true;
}
TEST_F(ASSERT_PRED4Test, FunctorOnUserTypeSuccess) {
ASSERT_PRED4(PredFunctor4(), Bool(++n1_), Bool(++n2_), Bool(++n3_),
Bool(++n4_));
finished_ = true;
}
TEST_F(ASSERT_PRED4Test, FunctionOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED4(PredFunction4Int, n1_++, n2_++, n3_++, n4_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED4Test, FunctionOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED4(PredFunction4Bool, Bool(n1_++), Bool(n2_++), Bool(n3_++),
Bool(n4_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED4Test, FunctorOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED4(PredFunctor4(), n1_++, n2_++, n3_++, n4_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED4Test, FunctorOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED4(PredFunctor4(), Bool(n1_++), Bool(n2_++), Bool(n3_++),
Bool(n4_++));
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT4Test, FunctionOnBuiltInTypeSuccess) {
EXPECT_PRED_FORMAT4(PredFormatFunction4, ++n1_, ++n2_, ++n3_, ++n4_);
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT4Test, FunctionOnUserTypeSuccess) {
EXPECT_PRED_FORMAT4(PredFormatFunction4, Bool(++n1_), Bool(++n2_),
Bool(++n3_), Bool(++n4_));
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT4Test, FunctorOnBuiltInTypeSuccess) {
EXPECT_PRED_FORMAT4(PredFormatFunctor4(), ++n1_, ++n2_, ++n3_, ++n4_);
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT4Test, FunctorOnUserTypeSuccess) {
EXPECT_PRED_FORMAT4(PredFormatFunctor4(), Bool(++n1_), Bool(++n2_),
Bool(++n3_), Bool(++n4_));
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT4Test, FunctionOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT4(PredFormatFunction4, n1_++, n2_++, n3_++, n4_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT4Test, FunctionOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT4(PredFormatFunction4, Bool(n1_++), Bool(n2_++),
Bool(n3_++), Bool(n4_++));
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT4Test, FunctorOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT4(PredFormatFunctor4(), n1_++, n2_++, n3_++, n4_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT4Test, FunctorOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT4(PredFormatFunctor4(), Bool(n1_++), Bool(n2_++),
Bool(n3_++), Bool(n4_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT4Test, FunctionOnBuiltInTypeSuccess) {
ASSERT_PRED_FORMAT4(PredFormatFunction4, ++n1_, ++n2_, ++n3_, ++n4_);
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT4Test, FunctionOnUserTypeSuccess) {
ASSERT_PRED_FORMAT4(PredFormatFunction4, Bool(++n1_), Bool(++n2_),
Bool(++n3_), Bool(++n4_));
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT4Test, FunctorOnBuiltInTypeSuccess) {
ASSERT_PRED_FORMAT4(PredFormatFunctor4(), ++n1_, ++n2_, ++n3_, ++n4_);
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT4Test, FunctorOnUserTypeSuccess) {
ASSERT_PRED_FORMAT4(PredFormatFunctor4(), Bool(++n1_), Bool(++n2_),
Bool(++n3_), Bool(++n4_));
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT4Test, FunctionOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT4(PredFormatFunction4, n1_++, n2_++, n3_++, n4_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT4Test, FunctionOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT4(PredFormatFunction4, Bool(n1_++), Bool(n2_++),
Bool(n3_++), Bool(n4_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT4Test, FunctorOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT4(PredFormatFunctor4(), n1_++, n2_++, n3_++, n4_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT4Test, FunctorOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT4(PredFormatFunctor4(), Bool(n1_++), Bool(n2_++),
Bool(n3_++), Bool(n4_++));
finished_ = true;
},
"");
}
template <typename T1, typename T2, typename T3, typename T4, typename T5>
bool PredFunction5(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5) {
return v1 + v2 + v3 + v4 + v5 > 0;
}
bool PredFunction5Int(int v1, int v2, int v3, int v4, int v5) {
return v1 + v2 + v3 + v4 + v5 > 0;
}
bool PredFunction5Bool(Bool v1, Bool v2, Bool v3, Bool v4, Bool v5) {
return v1 + v2 + v3 + v4 + v5 > 0;
}
struct PredFunctor5 {
template <typename T1, typename T2, typename T3, typename T4, typename T5>
bool operator()(const T1& v1, const T2& v2, const T3& v3, const T4& v4,
const T5& v5) {
return v1 + v2 + v3 + v4 + v5 > 0;
}
};
template <typename T1, typename T2, typename T3, typename T4, typename T5>
testing::AssertionResult PredFormatFunction5(const char* e1, const char* e2,
const char* e3, const char* e4,
const char* e5, const T1& v1,
const T2& v2, const T3& v3,
const T4& v4, const T5& v5) {
if (PredFunction5(v1, v2, v3, v4, v5)) return testing::AssertionSuccess();
return testing::AssertionFailure()
<< e1 << " + " << e2 << " + " << e3 << " + " << e4 << " + " << e5
<< " is expected to be positive, but evaluates to "
<< v1 + v2 + v3 + v4 + v5 << ".";
}
struct PredFormatFunctor5 {
template <typename T1, typename T2, typename T3, typename T4, typename T5>
testing::AssertionResult operator()(const char* e1, const char* e2,
const char* e3, const char* e4,
const char* e5, const T1& v1,
const T2& v2, const T3& v3, const T4& v4,
const T5& v5) const {
return PredFormatFunction5(e1, e2, e3, e4, e5, v1, v2, v3, v4, v5);
}
};
class Predicate5Test : public testing::Test {
protected:
void SetUp() override {
expected_to_finish_ = true;
finished_ = false;
n1_ = n2_ = n3_ = n4_ = n5_ = 0;
}
void TearDown() override {
EXPECT_EQ(1, n1_) << "The predicate assertion didn't evaluate argument 2 "
"exactly once.";
EXPECT_EQ(1, n2_) << "The predicate assertion didn't evaluate argument 3 "
"exactly once.";
EXPECT_EQ(1, n3_) << "The predicate assertion didn't evaluate argument 4 "
"exactly once.";
EXPECT_EQ(1, n4_) << "The predicate assertion didn't evaluate argument 5 "
"exactly once.";
EXPECT_EQ(1, n5_) << "The predicate assertion didn't evaluate argument 6 "
"exactly once.";
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpectedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
static bool expected_to_finish_;
static bool finished_;
static int n1_;
static int n2_;
static int n3_;
static int n4_;
static int n5_;
};
bool Predicate5Test::expected_to_finish_;
bool Predicate5Test::finished_;
int Predicate5Test::n1_;
int Predicate5Test::n2_;
int Predicate5Test::n3_;
int Predicate5Test::n4_;
int Predicate5Test::n5_;
typedef Predicate5Test EXPECT_PRED_FORMAT5Test;
typedef Predicate5Test ASSERT_PRED_FORMAT5Test;
typedef Predicate5Test EXPECT_PRED5Test;
typedef Predicate5Test ASSERT_PRED5Test;
TEST_F(EXPECT_PRED5Test, FunctionOnBuiltInTypeSuccess) {
EXPECT_PRED5(PredFunction5Int, ++n1_, ++n2_, ++n3_, ++n4_, ++n5_);
finished_ = true;
}
TEST_F(EXPECT_PRED5Test, FunctionOnUserTypeSuccess) {
EXPECT_PRED5(PredFunction5Bool, Bool(++n1_), Bool(++n2_), Bool(++n3_),
Bool(++n4_), Bool(++n5_));
finished_ = true;
}
TEST_F(EXPECT_PRED5Test, FunctorOnBuiltInTypeSuccess) {
EXPECT_PRED5(PredFunctor5(), ++n1_, ++n2_, ++n3_, ++n4_, ++n5_);
finished_ = true;
}
TEST_F(EXPECT_PRED5Test, FunctorOnUserTypeSuccess) {
EXPECT_PRED5(PredFunctor5(), Bool(++n1_), Bool(++n2_), Bool(++n3_),
Bool(++n4_), Bool(++n5_));
finished_ = true;
}
TEST_F(EXPECT_PRED5Test, FunctionOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED5(PredFunction5Int, n1_++, n2_++, n3_++, n4_++, n5_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED5Test, FunctionOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED5(PredFunction5Bool, Bool(n1_++), Bool(n2_++), Bool(n3_++),
Bool(n4_++), Bool(n5_++));
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED5Test, FunctorOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED5(PredFunctor5(), n1_++, n2_++, n3_++, n4_++, n5_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED5Test, FunctorOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED5(PredFunctor5(), Bool(n1_++), Bool(n2_++), Bool(n3_++),
Bool(n4_++), Bool(n5_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED5Test, FunctionOnBuiltInTypeSuccess) {
ASSERT_PRED5(PredFunction5Int, ++n1_, ++n2_, ++n3_, ++n4_, ++n5_);
finished_ = true;
}
TEST_F(ASSERT_PRED5Test, FunctionOnUserTypeSuccess) {
ASSERT_PRED5(PredFunction5Bool, Bool(++n1_), Bool(++n2_), Bool(++n3_),
Bool(++n4_), Bool(++n5_));
finished_ = true;
}
TEST_F(ASSERT_PRED5Test, FunctorOnBuiltInTypeSuccess) {
ASSERT_PRED5(PredFunctor5(), ++n1_, ++n2_, ++n3_, ++n4_, ++n5_);
finished_ = true;
}
TEST_F(ASSERT_PRED5Test, FunctorOnUserTypeSuccess) {
ASSERT_PRED5(PredFunctor5(), Bool(++n1_), Bool(++n2_), Bool(++n3_),
Bool(++n4_), Bool(++n5_));
finished_ = true;
}
TEST_F(ASSERT_PRED5Test, FunctionOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED5(PredFunction5Int, n1_++, n2_++, n3_++, n4_++, n5_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED5Test, FunctionOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED5(PredFunction5Bool, Bool(n1_++), Bool(n2_++), Bool(n3_++),
Bool(n4_++), Bool(n5_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED5Test, FunctorOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED5(PredFunctor5(), n1_++, n2_++, n3_++, n4_++, n5_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED5Test, FunctorOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED5(PredFunctor5(), Bool(n1_++), Bool(n2_++), Bool(n3_++),
Bool(n4_++), Bool(n5_++));
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT5Test, FunctionOnBuiltInTypeSuccess) {
EXPECT_PRED_FORMAT5(PredFormatFunction5, ++n1_, ++n2_, ++n3_, ++n4_, ++n5_);
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT5Test, FunctionOnUserTypeSuccess) {
EXPECT_PRED_FORMAT5(PredFormatFunction5, Bool(++n1_), Bool(++n2_),
Bool(++n3_), Bool(++n4_), Bool(++n5_));
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT5Test, FunctorOnBuiltInTypeSuccess) {
EXPECT_PRED_FORMAT5(PredFormatFunctor5(), ++n1_, ++n2_, ++n3_, ++n4_, ++n5_);
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT5Test, FunctorOnUserTypeSuccess) {
EXPECT_PRED_FORMAT5(PredFormatFunctor5(), Bool(++n1_), Bool(++n2_),
Bool(++n3_), Bool(++n4_), Bool(++n5_));
finished_ = true;
}
TEST_F(EXPECT_PRED_FORMAT5Test, FunctionOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT5(PredFormatFunction5, n1_++, n2_++, n3_++, n4_++,
n5_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT5Test, FunctionOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT5(PredFormatFunction5, Bool(n1_++), Bool(n2_++),
Bool(n3_++), Bool(n4_++), Bool(n5_++));
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT5Test, FunctorOnBuiltInTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT5(PredFormatFunctor5(), n1_++, n2_++, n3_++, n4_++,
n5_++);
finished_ = true;
},
"");
}
TEST_F(EXPECT_PRED_FORMAT5Test, FunctorOnUserTypeFailure) {
EXPECT_NONFATAL_FAILURE(
{
EXPECT_PRED_FORMAT5(PredFormatFunctor5(), Bool(n1_++), Bool(n2_++),
Bool(n3_++), Bool(n4_++), Bool(n5_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT5Test, FunctionOnBuiltInTypeSuccess) {
ASSERT_PRED_FORMAT5(PredFormatFunction5, ++n1_, ++n2_, ++n3_, ++n4_, ++n5_);
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT5Test, FunctionOnUserTypeSuccess) {
ASSERT_PRED_FORMAT5(PredFormatFunction5, Bool(++n1_), Bool(++n2_),
Bool(++n3_), Bool(++n4_), Bool(++n5_));
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT5Test, FunctorOnBuiltInTypeSuccess) {
ASSERT_PRED_FORMAT5(PredFormatFunctor5(), ++n1_, ++n2_, ++n3_, ++n4_, ++n5_);
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT5Test, FunctorOnUserTypeSuccess) {
ASSERT_PRED_FORMAT5(PredFormatFunctor5(), Bool(++n1_), Bool(++n2_),
Bool(++n3_), Bool(++n4_), Bool(++n5_));
finished_ = true;
}
TEST_F(ASSERT_PRED_FORMAT5Test, FunctionOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT5(PredFormatFunction5, n1_++, n2_++, n3_++, n4_++,
n5_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT5Test, FunctionOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT5(PredFormatFunction5, Bool(n1_++), Bool(n2_++),
Bool(n3_++), Bool(n4_++), Bool(n5_++));
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT5Test, FunctorOnBuiltInTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT5(PredFormatFunctor5(), n1_++, n2_++, n3_++, n4_++,
n5_++);
finished_ = true;
},
"");
}
TEST_F(ASSERT_PRED_FORMAT5Test, FunctorOnUserTypeFailure) {
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE(
{
ASSERT_PRED_FORMAT5(PredFormatFunctor5(), Bool(n1_++), Bool(n2_++),
Bool(n3_++), Bool(n4_++), Bool(n5_++));
finished_ = true;
},
"");
} | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googletest/include/gtest/gtest_pred_impl.h | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googletest/test/gtest_pred_impl_unittest.cc | a1e255a582377e1006bb88a408ac3f933ba7c916 |
55722f3a-0c73-43b4-9bf2-18020e210eee | cpp | tensorflow/tensorflow | strided_slice | tensorflow/lite/delegates/gpu/common/tasks/strided_slice.cc | tensorflow/lite/delegates/xnnpack/strided_slice_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/strided_slice.h"
#include <string>
#include <utility>
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
namespace {
bool Is4Aligned(const SliceAttributes& attr) {
return attr.strides.c == 1 && attr.starts.c % 4 == 0;
}
int4 GetOffset(const SliceAttributes& attr, int src_width, int src_height,
int src_channels, int src_batch) {
int4 offset;
if (attr.strides.w > 0) {
offset.x = attr.starts.w;
} else {
if (attr.ends.w > 0) {
offset.x = attr.ends.w;
} else {
offset.x = src_width + attr.ends.w;
}
}
if (attr.strides.h > 0) {
offset.y = attr.starts.h;
} else {
if (attr.ends.h > 0) {
offset.y = attr.ends.h;
} else {
offset.y = src_height + attr.ends.h;
}
}
if (attr.strides.c > 0) {
offset.z = attr.starts.c;
} else {
if (attr.ends.c > 0) {
offset.z = attr.ends.c;
} else {
offset.z = src_channels + attr.ends.c;
}
}
if (Is4Aligned(attr)) {
offset.z /= 4;
}
if (attr.strides.b > 0) {
offset.w = attr.starts.b;
} else {
if (attr.ends.b > 0) {
offset.w = attr.ends.b;
} else {
offset.w = src_batch + attr.ends.b;
}
}
return offset;
}
}
StridedSlice::StridedSlice(const OperationDef& definition,
const SliceAttributes& attr)
: GPUOperation(definition), attributes_(attr) {
work_group_size_ = int3(8, 4, 1);
code_ = GetStridedSliceCode(definition_, Is4Aligned(attributes_));
}
StridedSlice::StridedSlice(StridedSlice&& operation)
: GPUOperation(std::move(operation)), attributes_(operation.attributes_) {}
StridedSlice& StridedSlice::operator=(StridedSlice&& operation) {
if (this != &operation) {
attributes_ = operation.attributes_;
GPUOperation::operator=(std::move(operation));
}
return *this;
}
std::string StridedSlice::GetStridedSliceCode(const OperationDef& op_def,
bool alignedx4) {
AddSrcTensor("src_tensor", op_def.src_tensors[0]);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
args_.AddInt("offset_x");
args_.AddInt("offset_y");
args_.AddInt("offset_z");
args_.AddInt("offset_b");
args_.AddInt("stride_x");
args_.AddInt("stride_y");
args_.AddInt("stride_z");
args_.AddInt("stride_b");
const std::string batch_id =
op_def.dst_tensors[0].HasAxis(Axis::BATCH) ? "B" : "0";
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " int S = GLOBAL_ID_2;\n";
c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || "
"S >= args.dst_tensor.Slices()) { \n";
c += " return; \n";
c += " } \n";
c += " int s_x = X * args.stride_x + args.offset_x;\n";
c += " int s_y = Y * args.stride_y + args.offset_y;\n";
if (op_def.src_tensors[0].HasAxis(Axis::BATCH)) {
c += " int s_b = " + batch_id + " * args.stride_b + args.offset_b;\n";
c += " args.src_tensor.SetBatchRef(s_b);\n";
}
if (alignedx4) {
c += " int s_z = S + args.offset_z;\n";
c += " args.src_tensor::type result = args.src_tensor.Read(s_x, s_y, "
"s_z);\n";
} else {
c += " args.src_tensor::type result;\n";
const std::string postfixes[] = {"x", "y", "z", "w"};
for (int i = 0; i < 4; ++i) {
c += " {\n";
const std::string channel = "(S * 4 + " + std::to_string(i) + ")";
c += " int s_ch = min(" + channel +
" * args.stride_z + args.offset_z, args.src_tensor.Channels() - "
"1);\n";
c += " args.src_tensor.ReadPerChannel(result." + postfixes[i] +
", s_x, s_y, s_ch);\n";
c += " }\n";
}
}
c += " args.dst_tensor.Write(result, X, Y, S);\n";
c += "}\n";
return c;
}
absl::Status StridedSlice::BindArguments(ArgumentsBinder* args) {
int4 offset = GetOffset(attributes_, src_[0]->Width(), src_[0]->Height(),
src_[0]->Channels(), src_[0]->Batch());
RETURN_IF_ERROR(args->SetInt("offset_x", offset.x));
RETURN_IF_ERROR(args->SetInt("offset_y", offset.y));
RETURN_IF_ERROR(args->SetInt("offset_z", offset.z));
RETURN_IF_ERROR(args->SetInt("offset_b", offset.w));
RETURN_IF_ERROR(args->SetInt("stride_x", attributes_.strides.w));
RETURN_IF_ERROR(args->SetInt("stride_y", attributes_.strides.h));
RETURN_IF_ERROR(args->SetInt("stride_z", attributes_.strides.c));
RETURN_IF_ERROR(args->SetInt("stride_b", attributes_.strides.b));
return absl::OkStatus();
}
int3 StridedSlice::GetGridSize() const {
const int grid_x = dst_[0]->Width() * dst_[0]->Batch();
const int grid_y = dst_[0]->Height();
const int grid_z = dst_[0]->Slices();
return int3(grid_x, grid_y, grid_z);
}
StridedSlice CreateStridedSlice(const OperationDef& definition,
const SliceAttributes& attr) {
return StridedSlice(definition, attr);
}
}
} | #include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/strided_slice_tester.h"
namespace tflite {
namespace xnnpack {
TEST_F(StridedSliceTest, 1D) {
const std::vector<int32_t> input_shape = {RandomShape()};
StridedSliceTester()
.InputShape(input_shape)
.RandomBegins(rng_)
.RandomEnds(rng_)
.Test(TensorType_FLOAT32, xnnpack_delegate_.get());
}
TEST_F(StridedSliceTest, 2D) {
const std::vector<int32_t> input_shape = {RandomShape(), RandomShape()};
StridedSliceTester()
.InputShape(input_shape)
.RandomBegins(rng_)
.RandomEnds(rng_)
.Test(TensorType_FLOAT32, xnnpack_delegate_.get());
}
TEST_F(StridedSliceTest, 3D) {
const std::vector<int32_t> input_shape = {RandomShape(), RandomShape(),
RandomShape()};
StridedSliceTester()
.InputShape(input_shape)
.RandomBegins(rng_)
.RandomEnds(rng_)
.Test(TensorType_FLOAT32, xnnpack_delegate_.get());
}
TEST_F(StridedSliceTest, 4D) {
const std::vector<int32_t> input_shape = {RandomShape(), RandomShape(),
RandomShape(), RandomShape()};
StridedSliceTester()
.InputShape(input_shape)
.RandomBegins(rng_)
.RandomEnds(rng_)
.Test(TensorType_FLOAT32, xnnpack_delegate_.get());
}
TEST_F(StridedSliceTest, 5D) {
const std::vector<int32_t> input_shape = {RandomShape(), RandomShape(),
RandomShape(), RandomShape(),
RandomShape()};
StridedSliceTester()
.InputShape(input_shape)
.RandomBegins(rng_)
.RandomEnds(rng_)
.Test(TensorType_FLOAT32, xnnpack_delegate_.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/strided_slice.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/strided_slice_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce07eab7-820d-4314-b0ea-6ab402ef04f1 | cpp | tensorflow/tensorflow | test_delegate_providers | tensorflow/lite/kernels/test_delegate_providers.cc | tensorflow/lite/kernels/test_delegate_providers_test.cc | #include "tensorflow/lite/kernels/test_delegate_providers.h"
#include <string>
#include <vector>
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/logging.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
constexpr char KernelTestDelegateProviders::kAccelerationTestConfigPath[];
constexpr char KernelTestDelegateProviders::kUseSimpleAllocator[];
constexpr char KernelTestDelegateProviders::kAllowFp16PrecisionForFp32[];
KernelTestDelegateProviders* KernelTestDelegateProviders::Get() {
static KernelTestDelegateProviders* const providers =
new KernelTestDelegateProviders();
return providers;
}
KernelTestDelegateProviders::KernelTestDelegateProviders()
: delegate_list_util_(¶ms_) {
delegate_list_util_.AddAllDelegateParams();
params_.AddParam(kAccelerationTestConfigPath,
tools::ToolParam::Create<std::string>(""));
params_.AddParam(kUseSimpleAllocator, tools::ToolParam::Create<bool>(false));
params_.AddParam(kAllowFp16PrecisionForFp32,
tools::ToolParam::Create<bool>(false));
}
bool KernelTestDelegateProviders::InitFromCmdlineArgs(int* argc,
const char** argv) {
std::vector<tflite::Flag> flags = {
Flag(
kAccelerationTestConfigPath,
[this](const std::string& val, int argv_position) {
this->params_.Set<std::string>(kAccelerationTestConfigPath, val,
argv_position);
},
"", "Acceleration test config file for SingleOpModel",
Flag::kOptional),
Flag(
kUseSimpleAllocator,
[this](const bool& val, int argv_position) {
this->params_.Set<bool>(kUseSimpleAllocator, val, argv_position);
},
false, "Use Simple Memory Allocator for SingleOpModel",
Flag::kOptional),
Flag(
kAllowFp16PrecisionForFp32,
[this](const bool& val, int argv_position) {
this->params_.Set<bool>(kAllowFp16PrecisionForFp32, val,
argv_position);
},
false, "Compare result in fp16 precision for fp32 operations",
Flag::kOptional)};
delegate_list_util_.AppendCmdlineFlags(flags);
bool parse_result = tflite::Flags::Parse(argc, argv, flags);
if (!parse_result || params_.Get<bool>("help")) {
std::string usage = Flags::Usage(argv[0], flags);
TFLITE_LOG(ERROR) << usage;
parse_result = false;
}
return parse_result;
}
} | #include "tensorflow/lite/kernels/test_delegate_providers.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace {
TEST(KernelTestDelegateProvidersTest, DelegateProvidersParams) {
KernelTestDelegateProviders providers;
const auto& params = providers.ConstParams();
EXPECT_TRUE(params.HasParam("use_xnnpack"));
EXPECT_TRUE(params.HasParam("use_nnapi"));
EXPECT_TRUE(params.HasParam("allow_fp16_precision_for_fp32"));
int argc = 4;
const char* argv[] = {"program_name", "--use_nnapi=true",
"--allow_fp16_precision_for_fp32=true",
"--other_undefined_flag=1"};
EXPECT_TRUE(providers.InitFromCmdlineArgs(&argc, argv));
EXPECT_TRUE(params.Get<bool>("use_nnapi"));
EXPECT_TRUE(params.Get<bool>("allow_fp16_precision_for_fp32"));
EXPECT_EQ(2, argc);
EXPECT_EQ("--other_undefined_flag=1", argv[1]);
}
TEST(KernelTestDelegateProvidersTest, CreateTfLiteDelegates) {
#if !defined(__Fuchsia__) && !defined(__s390x__) && \
!defined(TFLITE_WITHOUT_XNNPACK)
KernelTestDelegateProviders providers;
providers.MutableParams()->Set<bool>("use_xnnpack", true);
EXPECT_GE(providers.CreateAllDelegates().size(), 1);
tools::ToolParams local_params;
local_params.Merge(providers.ConstParams());
local_params.Set<bool>("use_xnnpack", false);
EXPECT_TRUE(providers.CreateAllDelegates(local_params).empty());
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/test_delegate_providers.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/test_delegate_providers_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c33ef068-c164-4cee-846d-e00f6dbb5c81 | cpp | tensorflow/tensorflow | all_to_all | tensorflow/core/common_runtime/all_to_all.cc | tensorflow/core/common_runtime/all_to_all_test.cc | #include "tensorflow/core/common_runtime/all_to_all.h"
#include <utility>
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/collective_util.h"
#include "tensorflow/core/common_runtime/copy_tensor.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
AllToAll::AllToAll()
: col_ctx_(nullptr), col_params_(nullptr), done_(nullptr), counter_(0) {}
StatusCallback AllToAll::CheckCounterAndCallDone() {
return [this](const Status& s) {
Status final_status;
{
mutex_lock l(mu_);
status_.Update(s);
++counter_;
if (counter_ < 2 * col_params_->group.group_size) {
return;
}
CHECK_LE(counter_, 2 * col_params_->group.group_size);
final_status = status_;
}
if (!final_status.ok()) {
done_(final_status);
return;
}
if (col_ctx_->output->SharesBufferWith(output_buffer_)) {
done_(final_status);
} else {
CollectiveRemoteAccessLocal::MemCpyAsync(
col_ctx_->op_ctx->op_device_context(),
col_ctx_->op_ctx->op_device_context(), col_ctx_->device,
col_ctx_->device, col_ctx_->op_ctx->input_alloc_attr(0),
col_ctx_->op_ctx->output_alloc_attr(0), &output_buffer_,
col_ctx_->output, 0, done_);
}
};
}
Status AllToAll::InitializeCollectiveContext(
std::shared_ptr<CollectiveContext> col_ctx) {
if (col_ctx->input->dim_size(0) != col_ctx->col_params->group.group_size) {
return errors::InvalidArgument("input to all-to-all first dimension size (",
col_ctx->input->dim_size(0),
") must be the same as the group size (",
col_ctx->col_params->group.group_size, ")");
}
DCHECK(col_ctx->dev_mgr);
col_ctx_ = col_ctx;
col_params_ = col_ctx->col_params.get();
return collective_util::InitializeDeviceAndLocality(
col_ctx->dev_mgr, col_ctx->device_name, &col_ctx->device,
&col_ctx->device_locality);
}
void AllToAll::Run(StatusCallback done) {
done_ = std::move(done);
input_chunks_.reserve(col_params_->group.group_size);
output_chunks_.reserve(col_params_->group.group_size);
if (col_ctx_->input->SharesBufferWith(*col_ctx_->output)) {
output_buffer_ = Tensor(
col_ctx_->device->GetAllocator(col_ctx_->op_ctx->output_alloc_attr(0)),
col_ctx_->output->dtype(), col_ctx_->output->shape());
} else {
output_buffer_ = *col_ctx_->output;
}
for (int i = 0; i < col_params_->group.group_size; ++i) {
input_chunks_.push_back(col_ctx_->input->SubSlice(i));
int output_index = col_params_->group.members[i].rank;
output_chunks_.push_back(output_buffer_.SubSlice(output_index));
}
for (int i = 0; i < col_params_->group.group_size; ++i) {
auto default_rank = col_params_->default_rank;
DispatchSend(default_rank, i, &input_chunks_[i], CheckCounterAndCallDone());
DispatchRecv(i, default_rank, &output_chunks_[i],
CheckCounterAndCallDone());
}
}
void AllToAll::DispatchSend(int src_rank, int target_rank, const Tensor* tensor,
const StatusCallback& done) {
string send_buf_key =
strings::StrCat(col_ctx_->exec_key, src_rank, target_rank);
col_ctx_->col_exec->remote_access()->PostToPeer(
col_params_->group.members[target_rank].device.name(),
col_params_->group.members[target_rank].task, send_buf_key,
col_ctx_->device, col_ctx_->op_ctx->op_device_context(),
col_ctx_->op_ctx->output_alloc_attr(0), tensor, col_ctx_->device_locality,
col_ctx_->op_ctx->cancellation_manager(), done);
}
void AllToAll::DispatchRecv(int src_rank, int target_rank, Tensor* tensor,
const StatusCallback& done) {
string recv_buf_key =
strings::StrCat(col_ctx_->exec_key, src_rank, target_rank);
col_ctx_->col_exec->remote_access()->RecvFromPeer(
col_params_->group.members[src_rank].device.name(),
col_params_->group.members[src_rank].task,
col_params_->group.members[src_rank].is_local, recv_buf_key,
col_ctx_->device, col_ctx_->op_ctx->op_device_context(),
col_ctx_->op_ctx->output_alloc_attr(0), tensor, col_ctx_->device_locality,
0, col_ctx_->op_ctx->cancellation_manager(), done);
}
namespace {
REGISTER_COLLECTIVE(AllToAll, AllToAll);
}
} | #include "tensorflow/core/common_runtime/all_to_all.h"
#include "tensorflow/core/common_runtime/collective_test_util.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class AllToAllTest : public ::testing::Test {
protected:
std::unique_ptr<CollectiveTestEnv> test_env_;
};
TEST_F(AllToAllTest, Success) {
test_env_ = CreateCollectiveTestEnv( 1,
3, DEVICE_CPU);
std::vector<Tensor> tensors = {
test::AsTensor<double>({1., 2., 3.}),
test::AsTensor<double>({4., 5., 6.}),
test::AsTensor<double>({7., 8., 9.}),
};
BlockingCounter counter(3);
for (int i = 0; i < 3; ++i) {
SchedClosure([this, &tensors, i, &counter]() {
auto col_params = CreateCollectiveParams(*test_env_, i, "AllToAll",
ALL_TO_ALL_COLLECTIVE, DT_DOUBLE,
tensors[i].shape());
Device* device = nullptr;
TF_CHECK_OK(test_env_->device_mgr->LookupDevice(
col_params->group.members[i].device.name(), &device));
TF_CHECK_OK(RunCollective(test_env_.get(), col_params.get(), device,
&tensors[i], &tensors[i]));
counter.DecrementCount();
});
}
counter.Wait();
test::ExpectTensorEqual<double>(tensors[0],
test::AsTensor<double>({1., 4., 7.}));
test::ExpectTensorEqual<double>(tensors[1],
test::AsTensor<double>({2., 5., 8.}));
test::ExpectTensorEqual<double>(tensors[2],
test::AsTensor<double>({3., 6., 9.}));
}
TEST_F(AllToAllTest, SuccessDifferentRank) {
test_env_ = CreateCollectiveTestEnv( 1,
3, DEVICE_CPU);
std::vector<Tensor> tensors = {
test::AsTensor<double>({1., 2., 3.}),
test::AsTensor<double>({4., 5., 6.}),
test::AsTensor<double>({7., 8., 9.}),
};
std::vector<std::vector<int32>> device_ranks = {{2, 1, 0}};
BlockingCounter counter(3);
for (int i = 0; i < 3; ++i) {
SchedClosure([this, &tensors, &device_ranks, i, &counter]() {
auto col_params = CreateCollectiveParams(
*test_env_, i, "AllToAll", ALL_TO_ALL_COLLECTIVE, DT_DOUBLE,
tensors[i].shape(), device_ranks);
Device* device = nullptr;
TF_CHECK_OK(test_env_->device_mgr->LookupDevice(
col_params->group.members[i].device.name(), &device));
TF_CHECK_OK(RunCollective(test_env_.get(), col_params.get(), device,
&tensors[i], &tensors[i]));
counter.DecrementCount();
});
}
counter.Wait();
test::ExpectTensorEqual<double>(tensors[0],
test::AsTensor<double>({7., 4., 1.}));
test::ExpectTensorEqual<double>(tensors[1],
test::AsTensor<double>({8., 5., 2.}));
test::ExpectTensorEqual<double>(tensors[2],
test::AsTensor<double>({9., 6., 3.}));
}
TEST_F(AllToAllTest, Failure) {
test_env_ = CreateCollectiveTestEnv( 1,
3, DEVICE_CPU);
test_env_->remote_access->set_fail_after(1);
std::vector<Tensor> tensors = {
test::AsTensor<double>({1., 2., 3.}),
test::AsTensor<double>({4., 5., 6.}),
test::AsTensor<double>({7., 8., 9.}),
};
int num_failures = 0;
mutex mu;
BlockingCounter counter(3);
for (int i = 0; i < 3; ++i) {
SchedClosure([this, &mu, &num_failures, &tensors, i, &counter]() {
auto col_params = CreateCollectiveParams(*test_env_, i, "AllToAll",
ALL_TO_ALL_COLLECTIVE, DT_DOUBLE,
tensors[i].shape());
Device* device = nullptr;
TF_CHECK_OK(test_env_->device_mgr->LookupDevice(
col_params->group.members[i].device.name(), &device));
Status status = RunCollective(test_env_.get(), col_params.get(), device,
&tensors[i], &tensors[i]);
if (!status.ok()) {
mutex_lock l(mu);
++num_failures;
}
counter.DecrementCount();
});
}
counter.Wait();
EXPECT_GT(num_failures, 0);
}
TEST_F(AllToAllTest, WrongFirstDimensionSize) {
test_env_ = CreateCollectiveTestEnv( 1,
3, DEVICE_CPU);
std::vector<Tensor> tensors = {
test::AsTensor<double>({1., 2.}),
test::AsTensor<double>({4., 5.}),
test::AsTensor<double>({7., 8.}),
};
BlockingCounter counter(3);
for (int i = 0; i < 3; ++i) {
SchedClosure([this, &tensors, i, &counter]() {
auto col_params = CreateCollectiveParams(*test_env_, i, "AllToAll",
ALL_TO_ALL_COLLECTIVE, DT_DOUBLE,
tensors[i].shape());
Device* device = nullptr;
TF_CHECK_OK(test_env_->device_mgr->LookupDevice(
col_params->group.members[i].device.name(), &device));
Status status = RunCollective(test_env_.get(), col_params.get(), device,
&tensors[i], &tensors[i]);
counter.DecrementCount();
EXPECT_TRUE(errors::IsInvalidArgument(status));
});
}
counter.Wait();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/all_to_all.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/all_to_all_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
75b5b842-ea24-4b95-a9a0-6dbf9817814e | cpp | tensorflow/tensorflow | benchmark | tensorflow/compiler/aot/benchmark.cc | tensorflow/compiler/aot/benchmark_test.cc | #include "tensorflow/compiler/aot/benchmark.h"
#include <sys/time.h>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace tfcompile {
namespace benchmark {
static uint64 NowMicros() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<uint64>(tv.tv_sec) * 1000000 + tv.tv_usec;
}
void DumpStatsToStdout(const Stats& stats) {
std::vector<int64_t> sorted_us(stats.per_iter_us);
std::sort(sorted_us.begin(), sorted_us.end());
const size_t count_us = sorted_us.size();
double sum_us = 0;
size_t count_us_trimmed = 0;
double sum_us_trimmed = 0;
size_t count_us_best = 0;
double sum_us_best = 0;
static constexpr float trim_ratio = 0.25;
static constexpr float best_ratio = 0.1;
const size_t count_trimmed = count_us * trim_ratio;
const size_t count_best = count_us * best_ratio;
for (size_t i = 0; i < sorted_us.size(); ++i) {
const int64_t us = sorted_us[i];
sum_us += us;
if (i >= count_trimmed && i < count_us - count_trimmed) {
sum_us_trimmed += us;
++count_us_trimmed;
}
if (i < count_best) {
sum_us_best += us;
++count_us_best;
}
}
const int kBufSize = 1000;
char buf[kBufSize];
snprintf(buf, kBufSize, "Mean with %2.0f%% trimmed:", trim_ratio * 100);
std::string label_trimmed(buf);
snprintf(buf, kBufSize, "Mean of %2.0f%% best:", best_ratio * 100);
std::string label_best(buf);
std::vector<std::pair<std::string, double>> groups = {
{"Best:", sorted_us.front()},
{"Worst:", sorted_us.back()},
{"Median:", sorted_us[count_us / 2]},
{"Mean:", sum_us / count_us},
{std::move(label_trimmed), sum_us_trimmed / count_us_trimmed},
{std::move(label_best), sum_us_best / count_us_best},
};
int max_label_size = 0;
double max_us = 0;
for (const auto& g : groups) {
if (g.first.size() > max_label_size) {
max_label_size = g.first.size();
}
if (g.second > max_us) {
max_us = g.second;
}
}
int max_digits = 1;
while (max_us >= 10.0) {
max_us /= 10.0;
++max_digits;
}
printf("Benchmark ran %zu iterations over %lld us\n", count_us,
static_cast<long long>(stats.total_us));
for (const auto& g : groups) {
printf(" %-*s %*.3f us\n", max_label_size, g.first.c_str(), max_digits + 4,
g.second);
}
}
void Benchmark(const Options& options, const BenchmarkFn& fn, Stats* stats) {
const int64_t max_us = (options.max_micros <= 0 && options.max_iters <= 0)
? Options::kDefaultMicros
: options.max_micros;
printf("Running benchmark for %lld us\n", static_cast<long long>(max_us));
const int64_t start_us = NowMicros();
int64_t iters = 0;
while (true) {
const int64_t iter_start_us = NowMicros();
fn();
const int64_t end_us = NowMicros();
stats->per_iter_us.push_back(end_us - iter_start_us);
const int64_t total_us = end_us - start_us;
++iters;
if ((max_us > 0 && total_us >= max_us) ||
(options.max_iters > 0 && iters >= options.max_iters)) {
stats->total_us = total_us;
break;
}
}
}
}
}
} | #include "tensorflow/compiler/aot/benchmark.h"
#include "tensorflow/compiler/aot/test_graph_tfadd.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace tfcompile {
namespace benchmark {
namespace {
TEST(Benchmark, Benchmark) {
AddComp add;
Options options;
options.max_iters = 1;
Stats stats1;
Benchmark(options, [&] { add.Run(); }, &stats1);
EXPECT_EQ(stats1.per_iter_us.size(), 1);
options.max_iters = 5;
Stats stats5;
Benchmark(options, [&] { add.Run(); }, &stats5);
EXPECT_EQ(stats5.per_iter_us.size(), 5);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/aot/benchmark.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/aot/benchmark_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d5cba145-5a94-41c9-ab9e-482062e3c0d9 | cpp | tensorflow/tensorflow | tfprof_show | tensorflow/core/profiler/internal/tfprof_show.cc | tensorflow/core/profiler/internal/tfprof_show_test.cc | #include "tensorflow/core/profiler/internal/tfprof_show.h"
#include <memory>
#include <set>
#include <utility>
#include <vector>
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/regexp.h"
namespace tensorflow {
namespace tfprof {
const GraphNodeProto& TFShow::Show(const string& prefix, const Options& opts) {
if (opts.output_type == kOutput[0]) {
Timeline timeline(opts.step, opts.output_options.at(kTimelineOpts[0]));
return ShowInternal(opts, &timeline)->proto();
} else {
const ShowNode* ret = ShowInternal(opts, nullptr);
if (opts.output_type == kOutput[1]) {
absl::PrintF("%s", (prefix + ret->formatted_str));
fflush(stdout);
} else if (opts.output_type == kOutput[2]) {
Status s = WriteStringToFile(Env::Default(),
opts.output_options.at(kFileOpts[0]),
prefix + ret->formatted_str);
if (!s.ok()) {
absl::FPrintF(stderr, "%s\n", s.ToString());
}
} else if (opts.output_type == kOutput[3] ||
opts.output_type == kOutput[4]) {
} else {
absl::FPrintF(stderr, "Unknown output type: %s\n", opts.output_type);
}
return ret->proto();
}
}
bool TFShow::LookUpCheckPoint(const string& name,
std::unique_ptr<TFProfTensor>* tensor) {
if (name == kTFProfRoot || !ckpt_reader_ || !tensor) {
return false;
}
std::unique_ptr<Tensor> out_tensor;
TF_Status* status = TF_NewStatus();
ckpt_reader_->GetTensor(name, &out_tensor, status);
if (TF_GetCode(status) != TF_OK) {
absl::FPrintF(stderr, "%s\n", TF_Message(status));
TF_DeleteStatus(status);
return false;
}
*tensor = std::make_unique<TFProfTensor>(std::move(out_tensor));
TF_DeleteStatus(status);
return true;
}
bool TFShow::ShouldShow(const ShowNode* node, const Options& opts,
int depth) const {
if (node->name() == kTFProfRoot) return true;
if (node->proto().total_requested_bytes() < opts.min_bytes ||
node->proto().total_peak_bytes() < opts.min_peak_bytes ||
node->proto().total_residual_bytes() < opts.min_residual_bytes ||
node->proto().total_output_bytes() < opts.min_output_bytes ||
node->proto().total_exec_micros() < opts.min_micros ||
node->proto().total_accelerator_exec_micros() <
opts.min_accelerator_micros ||
node->proto().total_cpu_exec_micros() < opts.min_cpu_micros ||
node->proto().parameters() < opts.min_params ||
node->proto().float_ops() < opts.min_float_ops ||
node->proto().run_count() < opts.min_occurrence ||
depth > opts.max_depth || !ShouldShowIfExtra(node, opts, depth)) {
return false;
}
bool show = false;
if (opts.show_name_regexes.size() == 1 && opts.show_name_regexes[0] == ".*") {
show = true;
} else {
for (const string& regex : opts.show_name_regexes) {
if (RE2::FullMatch(node->name(), regex)) {
show = true;
break;
}
}
}
if (!show) return false;
for (const string& regex : opts.hide_name_regexes) {
if (RE2::FullMatch(node->name(), regex)) return false;
}
return true;
}
bool TFShow::ShouldTrim(const ShowNode* node,
const std::vector<string>& regexes) const {
for (const string& regex : regexes) {
if (RE2::FullMatch(node->name(), regex)) {
return true;
}
}
return false;
}
bool TFShow::ReAccount(ShowNode* node, const Options& opts) {
node->ReInit(opts.step);
if (opts.account_type_regexes.size() == 1 &&
opts.account_type_regexes[0] == ".*") {
return true;
}
for (const string& regex : opts.account_type_regexes) {
for (const string& type : node->node->op_types()) {
if (RE2::FullMatch(type, regex)) {
return true;
}
}
}
return false;
}
string TFShow::FormatNodeMemory(ShowNode* node, int64_t bytes,
int64_t total_bytes) const {
string memory = FormatMemory(total_bytes);
if (node->account) {
memory = FormatMemory(bytes) + "/" + memory;
} else {
memory = "--/" + memory;
}
return memory;
}
string TFShow::FormatNode(ShowNode* node, const Options& opts) const {
std::vector<string> info;
if (opts.select.find(kShown[2]) != opts.select.end()) {
const string shape = FormatShapes(node->node->shape());
if (!shape.empty()) {
info.push_back(shape);
}
string params = FormatNumber(node->proto().total_parameters()) + " params";
if (node->account) {
params = FormatNumber(node->proto().parameters()) + "/" + params;
} else {
params = "--/" + params;
}
info.push_back(params);
}
if (opts.select.find(kShown[3]) != opts.select.end()) {
string fops = FormatNumber(node->proto().total_float_ops()) + " flops";
if (node->account) {
fops = FormatNumber(node->proto().float_ops()) + "/" + fops;
} else {
fops = "--/" + fops;
}
info.push_back(fops);
}
if (opts.select.find(kShown[0]) != opts.select.end()) {
info.push_back(FormatNodeMemory(node, node->proto().requested_bytes(),
node->proto().total_requested_bytes()));
}
if (opts.select.find(kShown[11]) != opts.select.end()) {
info.push_back(FormatNodeMemory(node, node->proto().peak_bytes(),
node->proto().total_peak_bytes()));
}
if (opts.select.find(kShown[12]) != opts.select.end()) {
info.push_back(FormatNodeMemory(node, node->proto().residual_bytes(),
node->proto().total_residual_bytes()));
}
if (opts.select.find(kShown[13]) != opts.select.end()) {
info.push_back(FormatNodeMemory(node, node->proto().output_bytes(),
node->proto().total_output_bytes()));
}
if (opts.select.find(kShown[1]) != opts.select.end()) {
info.push_back(FormatTotalExecTime(node, opts));
info.push_back(FormatAcceleratorExecTime(node, opts));
info.push_back(FormatCPUExecTime(node, opts));
}
if (opts.select.find(kShown[9]) != opts.select.end() &&
opts.select.find(kShown[1]) == opts.select.end()) {
info.push_back(FormatAcceleratorExecTime(node, opts));
}
if (opts.select.find(kShown[10]) != opts.select.end() &&
opts.select.find(kShown[1]) == opts.select.end()) {
info.push_back(FormatCPUExecTime(node, opts));
}
if (opts.select.find(kShown[5]) != opts.select.end()) {
if (node->proto().devices_size() > 0) {
info.push_back(absl::StrJoin(node->proto().devices(), "|"));
}
}
if (opts.select.find(kShown[6]) != opts.select.end()) {
const std::set<string>& op_types = node->node->op_types();
info.push_back(absl::StrJoin(op_types, "|"));
}
if (opts.select.find(kShown[7]) != opts.select.end()) {
string run = FormatNumber(node->proto().total_run_count());
if (node->account) {
run = FormatNumber(node->proto().run_count()) + "/" + run;
} else {
run = "--/" + run;
}
string definition = FormatNumber(node->proto().total_definition_count());
if (node->account) {
definition = "1/" + definition;
} else {
definition = "--/" + definition;
}
info.push_back(run + "|" + definition);
}
if (opts.select.find(kShown[8]) != opts.select.end()) {
std::vector<string> shape_vec;
for (const auto& s : node->node->input_shapes()) {
if (s.second.empty()) {
shape_vec.push_back(absl::StrFormat("%d:unknown", s.first));
} else {
shape_vec.push_back(
absl::StrFormat("%d:%s", s.first, absl::StrJoin(s.second, "x")));
}
}
info.push_back(absl::StrJoin(shape_vec, "|"));
}
return absl::StrFormat("%s (%s)", node->name(), absl::StrJoin(info, ", "));
}
string TFShow::FormatLegend(const Options& opts) const {
std::vector<string> legends;
if (opts.select.find(kShown[2]) != opts.select.end()) {
legends.push_back("# parameters");
}
if (opts.select.find(kShown[3]) != opts.select.end()) {
legends.push_back("# float_ops");
}
if (opts.select.find(kShown[0]) != opts.select.end()) {
legends.push_back("requested bytes");
}
if (opts.select.find(kShown[11]) != opts.select.end()) {
legends.push_back("peak bytes");
}
if (opts.select.find(kShown[12]) != opts.select.end()) {
legends.push_back("residual bytes");
}
if (opts.select.find(kShown[13]) != opts.select.end()) {
legends.push_back("output bytes");
}
if (opts.select.find(kShown[1]) != opts.select.end()) {
legends.push_back("total execution time");
legends.push_back("accelerator execution time");
legends.push_back("cpu execution time");
}
if (opts.select.find(kShown[9]) != opts.select.end() &&
opts.select.find(kShown[1]) == opts.select.end()) {
legends.push_back("accelerator execution time");
}
if (opts.select.find(kShown[10]) != opts.select.end() &&
opts.select.find(kShown[1]) == opts.select.end()) {
legends.push_back("cpu execution time");
}
if (opts.select.find(kShown[5]) != opts.select.end()) {
legends.push_back("assigned devices");
}
if (opts.select.find(kShown[6]) != opts.select.end()) {
legends.push_back("op types");
}
if (opts.select.find(kShown[7]) != opts.select.end()) {
legends.push_back("op count (run|defined)");
}
if (opts.select.find(kShown[8]) != opts.select.end()) {
legends.push_back("input shapes");
}
return absl::StrFormat("node name | %s\n", absl::StrJoin(legends, " | "));
}
}
} | #include <memory>
#include <utility>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/internal/tfprof_constants.h"
#include "tensorflow/core/profiler/internal/tfprof_stats.h"
#include "tensorflow/core/profiler/internal/tfprof_utils.h"
#include "tensorflow/core/profiler/tfprof_log.pb.h"
#include "tensorflow/core/profiler/tfprof_options.h"
#include "tensorflow/core/profiler/tfprof_output.pb.h"
namespace tensorflow {
namespace tfprof {
string CheckAndRemoveDoc(const string& doc) {
auto pos = doc.find("Profile:");
CHECK(pos != doc.npos);
return doc.substr(pos + 9);
}
class TFProfShowTest : public ::testing::Test {
protected:
TFProfShowTest() {
string graph_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/graph.pbtxt");
std::unique_ptr<tensorflow::GraphDef> graph_pb(new tensorflow::GraphDef());
TF_CHECK_OK(
ReadProtoFile(Env::Default(), graph_path, graph_pb.get(), false));
std::unique_ptr<tensorflow::RunMetadata> run_meta_pb(
new tensorflow::RunMetadata());
string run_meta_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/run_meta");
TF_CHECK_OK(
ReadProtoFile(Env::Default(), run_meta_path, run_meta_pb.get(), true));
std::unique_ptr<OpLogProto> op_log_pb(new OpLogProto());
string op_log_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/tfprof_log");
TF_CHECK_OK(ReadBinaryProto(Env::Default(), op_log_path, op_log_pb.get()));
string ckpt_path = io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/ckpt");
TF_Status* status = TF_NewStatus();
std::unique_ptr<checkpoint::CheckpointReader> ckpt_reader(
new checkpoint::CheckpointReader(ckpt_path, status));
CHECK(TF_GetCode(status) == TF_OK);
TF_DeleteStatus(status);
tf_stats_ =
std::make_unique<TFStats>(std::move(graph_pb), std::move(run_meta_pb),
std::move(op_log_pb), std::move(ckpt_reader));
tf_stats_->BuildAllViews();
}
string TestToFromProto(const string& cmd, const Options& opts,
bool show_multi_node = false) {
string profile_file = io::JoinPath(testing::TmpDir(), "profile");
tf_stats_->WriteProfile(profile_file);
TFStats new_stats(profile_file, nullptr);
new_stats.BuildAllViews();
if (show_multi_node) {
new_stats.ShowMultiGraphNode(cmd, opts);
} else {
new_stats.ShowGraphNode(cmd, opts);
}
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(),
opts.output_options.at("outfile"), &dump_str));
return dump_str;
}
std::unique_ptr<TFStats> tf_stats_;
};
TEST_F(TFProfShowTest, DumpScopeMode) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(
5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, "name",
{"VariableV2"},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "peak_bytes", "residual_bytes", "output_bytes",
"micros", "accelerator_micros", "cpu_micros", "float_ops"},
"file", {{"outfile", dump_file}});
tf_stats_->ShowGraphNode("scope", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file, &dump_str));
EXPECT_EQ(
"node name | # parameters | # float_ops | requested bytes | peak bytes | "
"residual bytes | output bytes | total execution time | accelerator "
"execution time | cpu execution time\n_TFProfRoot (--/451 params, --/0 "
"flops, --/2.56KB, --/2.56KB, --/2.56KB, --/2.56KB, --/13us, --/0us, "
"--/13us)\n DW (3x3x3x6, 162/162 params, 0/0 flops, 1.28KB/1.28KB, "
"1.28KB/1.28KB, 1.28KB/1.28KB, 1.28KB/1.28KB, 2us/2us, 0us/0us, "
"2us/2us)\n DW2 (2x2x6x12, 288/288 params, 0/0 flops, 1.28KB/1.28KB, "
"1.28KB/1.28KB, 1.28KB/1.28KB, 1.28KB/1.28KB, 11us/11us, 0us/0us, "
"11us/11us)\n ScalarW (1, 1/1 params, 0/0 flops, 0B/0B, 0B/0B, 0B/0B, "
"0B/0B, 0us/0us, 0us/0us, 0us/0us)\n",
CheckAndRemoveDoc(dump_str));
EXPECT_EQ(dump_str, TestToFromProto("scope", opts));
}
TEST_F(TFProfShowTest, DumpAcceleratorAndCPUMicros) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, "cpu_micros",
{".*"},
{".*"}, {""}, {".*"}, {""}, false,
{"accelerator_micros", "cpu_micros"}, "file",
{{"outfile", dump_file}});
tf_stats_->ShowGraphNode("scope", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file, &dump_str));
EXPECT_EQ(
"node name | accelerator execution time | cpu execution "
"time\n_TFProfRoot (--/404us, --/4.54ms)\n Conv2D (226us/226us, "
"4.07ms/4.07ms)\n Conv2D_1 (178us/178us, 419us/419us)\n "
"_retval_Conv2D_1_0_0 (0us/0us, 41us/41us)\n DW2 (0us/0us, 11us/11us)\n "
" DW2/Assign (0us/0us, 0us/0us)\n DW2/Initializer (0us/0us, "
"0us/0us)\n DW2/Initializer/random_normal (0us/0us, 0us/0us)\n "
" DW2/Initializer/random_normal/RandomStandardNormal (0us/0us, "
"0us/0us)\n DW2/Initializer/random_normal/mean (0us/0us, "
"0us/0us)\n DW2/Initializer/random_normal/mul (0us/0us, "
"0us/0us)\n DW2/Initializer/random_normal/shape (0us/0us, "
"0us/0us)\n DW2/Initializer/random_normal/stddev (0us/0us, "
"0us/0us)\n DW2/read (0us/0us, 0us/0us)\n DW (0us/0us, 2us/2us)\n "
"DW/Assign (0us/0us, 0us/0us)\n DW/Initializer (0us/0us, 0us/0us)\n "
" DW/Initializer/random_normal (0us/0us, 0us/0us)\n "
"DW/Initializer/random_normal/RandomStandardNormal (0us/0us, 0us/0us)\n "
" DW/Initializer/random_normal/mean (0us/0us, 0us/0us)\n "
"DW/Initializer/random_normal/mul (0us/0us, 0us/0us)\n "
"DW/Initializer/random_normal/shape (0us/0us, 0us/0us)\n "
"DW/Initializer/random_normal/stddev (0us/0us, 0us/0us)\n DW/read "
"(0us/0us, 0us/0us)\n zeros (0us/0us, 2us/2us)\n ScalarW (0us/0us, "
"0us/0us)\n ScalarW/Assign (0us/0us, 0us/0us)\n "
"ScalarW/Initializer (0us/0us, 0us/0us)\n "
"ScalarW/Initializer/random_normal (0us/0us, 0us/0us)\n "
"ScalarW/Initializer/random_normal/RandomStandardNormal (0us/0us, "
"0us/0us)\n ScalarW/Initializer/random_normal/mean (0us/0us, "
"0us/0us)\n ScalarW/Initializer/random_normal/mul (0us/0us, "
"0us/0us)\n ScalarW/Initializer/random_normal/shape (0us/0us, "
"0us/0us)\n ScalarW/Initializer/random_normal/stddev (0us/0us, "
"0us/0us)\n ScalarW/read (0us/0us, 0us/0us)\n init (0us/0us, "
"0us/0us)\n",
CheckAndRemoveDoc(dump_str));
EXPECT_EQ(dump_str, TestToFromProto("scope", opts));
}
TEST_F(TFProfShowTest, DumpOpMode) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(
5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, "params",
{".*"},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops", "occurrence", "input_shapes"},
"file", {{"outfile", dump_file}});
tf_stats_->ShowMultiGraphNode("op", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file, &dump_str));
EXPECT_EQ(
"nodename|requestedbytes|totalexecutiontime|acceleratorexecutiontime|"
"cpuexecutiontime|#parameters|#float_ops|opoccurrence(run|defined)|"
"inputshapes\nVariableV22.56KB(100.00%,8.40%),13us(100.00%,0.26%),0us("
"100.00%,0.00%),13us(100.00%,0.29%),451params(100.00%,100.00%),0float_"
"ops(100.00%,0.00%),2|3\n\ninput_type:\t(run*2|defined*3)\texec_time:"
"13us\n\nAdd0B(0.00%,0.00%),0us(99.74%,0.00%),0us(100.00%,0.00%),0us(99."
"71%,0.00%),0params(0.00%,0.00%),0float_ops(100.00%,0.00%),0|3\n\ninput_"
"type:0:1,\t1:1\t(run*0|defined*1)\texec_time:0us\ninput_type:0:2x2x6x12,"
"\t1:1\t(run*0|defined*1)\texec_time:0us\ninput_type:0:3x3x3x6,\t1:1\t("
"run*0|defined*1)\texec_time:0us\n\nAssign0B(0.00%,0.00%),0us(99.74%,0."
"00%),0us(100.00%,0.00%),0us(99.71%,0.00%),0params(0.00%,0.00%),0float_"
"ops(100.00%,0.00%),0|3\n\ninput_type:0:1,\t1:1\t(run*0|defined*1)\texec_"
"time:0us\ninput_type:0:2x2x6x12,\t1:2x2x6x12\t(run*0|defined*1)\texec_"
"time:0us\ninput_type:0:3x3x3x6,\t1:3x3x3x6\t(run*0|defined*1)\texec_"
"time:0us\n\nConst0B(0.00%,0.00%),2us(99.74%,0.04%),0us(100.00%,0.00%),"
"2us(99.71%,0.04%),0params(0.00%,0.00%),0float_ops(100.00%,0.00%),1|"
"10\n\ninput_type:\t(run*1|defined*10)\texec_time:2us\n\nConv2D27.90KB("
"91.60%,91.60%),4.89ms(99.70%,98.87%),404us(100.00%,100.00%),4.49ms(99."
"67%,98.77%),0params(0.00%,0.00%),10.44kfloat_ops(100.00%,100.00%),2|"
"2\n\ninput_type:0:2x3x3x6,\t1:2x2x6x12\t(run*1|defined*1)\texec_time:"
"597us\ninput_type:0:2x6x6x3,\t1:3x3x3x6\t(run*1|defined*1)\texec_time:4."
"29ms\n\nIdentity0B(0.00%,0.00%),0us(0.83%,0.00%),0us(0.00%,0.00%),0us(0."
"90%,0.00%),0params(0.00%,0.00%),0float_ops(0.00%,0.00%),0|3\n\ninput_"
"type:0:1\t(run*0|defined*1)\texec_time:0us\ninput_type:0:2x2x6x12\t(run*"
"0|defined*1)\texec_time:0us\ninput_type:0:3x3x3x6\t(run*0|defined*1)"
"\texec_time:0us\n\n",
StringReplace(CheckAndRemoveDoc(dump_str), " ", ""));
EXPECT_EQ(dump_str, TestToFromProto("op", opts, true));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/internal/tfprof_show.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/internal/tfprof_show_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
72c4e89d-737d-4456-b334-74f2ee5903af | cpp | tensorflow/tensorflow | remove_successive_transpose | tensorflow/lite/toco/graph_transformations/remove_successive_transpose.cc | tensorflow/lite/toco/graph_transformations/tests/remove_successive_transpose_test.cc | #include <string>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
namespace {
bool TransformsToIdentity(std::vector<int> const& perm1,
std::vector<int> const& perm2) {
if (perm2.size() != perm1.size() || perm1.empty()) {
return false;
}
for (size_t i = 0; i < perm1.size(); ++i) {
if (perm1[i] < 0 || perm1[i] >= static_cast<int>(perm1.size()) ||
perm2[i] < 0 || perm2[i] >= static_cast<int>(perm1.size())) {
return false;
}
if (perm1[perm2[i]] != static_cast<int>(i)) {
return false;
}
}
return true;
}
void ReplaceOpInputsWith(Model* model, const std::string& lookfor,
const std::string& replacewith) {
for (const auto& op : model->operators) {
for (size_t i = 0; i < op->inputs.size(); ++i) {
if (op->inputs[i] == lookfor) {
op->inputs[i] = replacewith;
}
}
}
}
}
::tensorflow::Status RemoveSuccessiveTranspose::Run(Model* model,
std::size_t op_index,
bool* modified) {
*modified = false;
auto op = model->operators.begin() + op_index;
if (op->get()->type != OperatorType::kTranspose) {
return absl::OkStatus();
}
TransposeOperator* t_op = static_cast<TransposeOperator*>(op->get());
if (CountOpsWithInput(*model, t_op->outputs[0]) != 1) {
return absl::OkStatus();
}
Operator* next = GetOpWithInput(*model, t_op->outputs[0]);
if (!next || next->type != OperatorType::kTranspose) {
return absl::OkStatus();
}
TransposeOperator* t_next = static_cast<TransposeOperator*>(next);
if (!CountOpsWithInput(*model, t_next->outputs[0])) {
return absl::OkStatus();
}
if (TransformsToIdentity(t_op->perm, t_next->perm)) {
ReplaceOpInputsWith(model, t_next->outputs[0], t_op->inputs[0]);
DeleteOpAndArrays(model, t_next);
DeleteOpAndArrays(model, t_op);
*modified = true;
}
return absl::OkStatus();
}
} | #include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
namespace {
using ::testing::Test;
class RemoveSuccessiveTransposeTest : public Test {
protected:
RemoveSuccessiveTransposeTest() {}
void SetUp() override { model_ = std::make_unique<toco::Model>(); }
void CreateArray(const std::string& name, const std::vector<int>& shape) {
toco::Array& array = model_->GetOrCreateArray(name);
array.data_type = toco::ArrayDataType::kFloat;
toco::Shape* array_shape = array.mutable_shape();
*(array_shape->mutable_dims()) = shape;
}
void CreateConstantArray(const std::string& name,
const std::vector<int>& shape,
const std::vector<float>& data) {
CreateArray(name, shape);
toco::Array& array = model_->GetOrCreateArray(name);
auto& array_buffer = array.GetMutableBuffer<toco::ArrayDataType::kFloat>();
int bufsize = 1;
for (int dim : shape) {
bufsize *= dim;
}
array_buffer.data.resize(bufsize);
float* buf_ptr = array_buffer.data.data();
for (int i = 0; i < bufsize; ++i) {
buf_ptr[i] = data[i];
}
}
void CreateGraph(const std::vector<int>& perm1,
const std::vector<int>& perm2) {
CreateArray("InputA", {2, 2});
CreateArray("InputB", {2, 2});
CreateArray("Input", {2, 2});
CreateArray("InputTranspose", {2, 2});
CreateArray("InputTransposeTranspose", {2, 2});
CreateArray("InputTransposeTransposePlusB", {2, 2});
auto* add_op = new toco::AddOperator;
add_op->inputs = {"InputA", "InputB"};
add_op->outputs = {"Input"};
model_->operators.push_back(std::unique_ptr<toco::Operator>(add_op));
auto* transpose_op = new toco::TransposeOperator;
transpose_op->inputs = {"Input"};
transpose_op->perm = perm1;
transpose_op->outputs = {"InputTranspose"};
model_->operators.push_back(std::unique_ptr<toco::Operator>(transpose_op));
auto* transpose2_op = new toco::TransposeOperator;
transpose2_op->inputs = {"InputTranspose"};
transpose2_op->perm = perm2;
transpose2_op->outputs = {"InputTransposeTranspose"};
model_->operators.push_back(std::unique_ptr<toco::Operator>(transpose2_op));
auto* add2_op = new toco::AddOperator;
add2_op->inputs = {"InputTransposeTranspose", "InputB"};
add2_op->outputs = {"InputTransposeTransposePlusB"};
model_->operators.push_back(std::unique_ptr<toco::Operator>(add2_op));
}
std::unique_ptr<toco::Model> model_;
};
TEST_F(RemoveSuccessiveTransposeTest, RemoveTranspose) {
CreateGraph({1, 0}, {1, 0});
toco::RemoveSuccessiveTranspose transformation;
bool modified;
ASSERT_TRUE(transformation.Run(model_.get(), 1, &modified).ok());
EXPECT_TRUE(modified);
ASSERT_EQ(model_->operators.size(), 2);
ASSERT_EQ(model_->operators[0]->type, toco::OperatorType::kAdd);
ASSERT_EQ(model_->operators[1]->type, toco::OperatorType::kAdd);
ASSERT_EQ(model_->operators[1]->inputs[0], model_->operators[0]->outputs[0]);
}
TEST_F(RemoveSuccessiveTransposeTest, DontRemoveNotIdentityTranspose) {
CreateGraph({0, 2, 1}, {1, 0, 2});
toco::RemoveSuccessiveTranspose transformation;
bool modified;
ASSERT_TRUE(transformation.Run(model_.get(), 1, &modified).ok());
EXPECT_FALSE(modified);
}
TEST_F(RemoveSuccessiveTransposeTest, DontRemoveTransposeOutputUnused) {
CreateArray("InputA", {2, 2});
CreateArray("InputB", {2, 2});
CreateArray("Input", {2, 2});
CreateArray("InputTranspose", {2, 2});
CreateArray("InputTransposeTranspose", {2, 2});
auto* add_op = new toco::AddOperator;
add_op->inputs = {"InputA", "InputB"};
add_op->outputs = {"Input"};
model_->operators.push_back(std::unique_ptr<toco::Operator>(add_op));
auto* transpose_op = new toco::TransposeOperator;
transpose_op->inputs = {"Input"};
transpose_op->perm = {0, 2, 1};
transpose_op->outputs = {"InputTranspose"};
model_->operators.push_back(std::unique_ptr<toco::Operator>(transpose_op));
auto* transpose2_op = new toco::TransposeOperator;
transpose2_op->inputs = {"InputTranspose"};
transpose2_op->perm = {0, 2, 1};
transpose2_op->outputs = {"InputTransposeTranspose"};
model_->operators.push_back(std::unique_ptr<toco::Operator>(transpose2_op));
toco::RemoveSuccessiveTranspose transformation;
bool modified;
ASSERT_TRUE(transformation.Run(model_.get(), 1, &modified).ok());
EXPECT_FALSE(modified);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/remove_successive_transpose.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/tests/remove_successive_transpose_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7bab9db0-18bd-4eb9-8086-4bfa1cf88e70 | cpp | tensorflow/tensorflow | rpc_rendezvous_mgr | tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.cc | tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr_test.cc | #include "tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/request_id.h"
#include "tensorflow/core/distributed_runtime/tensor_coding.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
class RpcRemoteRendezvous : public BaseRemoteRendezvous {
public:
RpcRemoteRendezvous(const WorkerEnv* env, int64_t step_id)
: BaseRemoteRendezvous(env, step_id) {}
protected:
void RecvFromRemoteAsync(const Rendezvous::ParsedKey& parsed,
const Rendezvous::Args& args,
DoneCallback done) override;
private:
~RpcRemoteRendezvous() override {}
RpcRemoteRendezvous(const RpcRemoteRendezvous&) = delete;
void operator=(const RpcRemoteRendezvous&) = delete;
};
class RpcRecvTensorCall : public BaseRecvTensorCall {
public:
RpcRecvTensorCall() : wi_(nullptr), dst_device_(nullptr) {}
void Init(WorkerInterface* wi, int64_t step_id, StringPiece key,
AllocatorAttributes alloc_attrs, Device* dst_device,
const Rendezvous::Args& recv_args, Rendezvous::DoneCallback done) {
wi_ = wi;
alloc_attrs_ = alloc_attrs;
dst_device_ = dst_device;
recv_args_ = recv_args;
done_ = std::move(done);
req_.set_step_id(step_id);
req_.set_rendezvous_key(key.data(), key.size());
req_.set_request_id(GetUniqueRequestId());
}
void Reset() {
DCHECK_EQ(static_cast<WorkerInterface*>(nullptr), wi_)
<< "Leaking WorkerInterface in RpcRecvTensorCall::Reset().";
alloc_attrs_ = AllocatorAttributes();
dst_device_ = nullptr;
req_.Clear();
resp_.Clear();
{
mutex_lock l(mu_);
status_ = absl::OkStatus();
}
done_ = nullptr;
}
~RpcRecvTensorCall() override {
CHECK_EQ(static_cast<WorkerInterface*>(nullptr), wi_)
<< "Leaking WorkerInterface in RpcRecvTensorCall destructor.";
}
void Start(std::function<void()> recv_done) override {
StartRTCall(std::move(recv_done));
}
void StartAbort(const Status& s) override {
{
mutex_lock l(mu_);
status_.Update(s);
}
opts_.StartCancel();
}
Status status() const override {
mutex_lock l(mu_);
return status_;
}
void ReleaseWorker(WorkerCacheInterface* worker_cache) {
DCHECK_NE(static_cast<WorkerInterface*>(nullptr), wi_)
<< "RpcRecvTensorCall::ReleaseWorker() called twice.";
worker_cache->ReleaseWorker(src_worker_, wi_);
wi_ = nullptr;
}
const Tensor& tensor() const { return resp_.tensor(); }
bool is_dead() const { return resp_.metadata().is_dead(); }
Device* dst_device() const { return dst_device_; }
const Rendezvous::Args& recv_args() const { return recv_args_; }
const Rendezvous::DoneCallback& done() const { return done_; }
private:
friend class RpcRemoteRendezvous;
void StartRTCall(std::function<void()> recv_done) {
resp_.InitAlloc(dst_device_, alloc_attrs_);
auto abort_checked = std::make_shared<Notification>();
auto cb = [this, abort_checked,
recv_done = std::move(recv_done)](const Status& s) {
abort_checked->WaitForNotification();
if (!s.ok()) {
mutex_lock l(mu_);
status_.Update(s);
}
recv_done();
};
wi_->RecvTensorAsync(&opts_, &req_, &resp_, std::move(cb));
Status s;
{
mutex_lock l(mu_);
s = status_;
}
if (!s.ok()) {
opts_.StartCancel();
}
abort_checked->Notify();
}
string src_worker_;
string src_rel_device_;
WorkerInterface* wi_;
AllocatorAttributes alloc_attrs_;
Device* dst_device_;
CallOptions opts_;
RecvTensorRequest req_;
TensorResponse resp_;
Rendezvous::Args recv_args_;
Rendezvous::DoneCallback done_;
mutable mutex mu_;
Status status_ TF_GUARDED_BY(mu_);
RpcRecvTensorCall(const RpcRecvTensorCall&) = delete;
void operator=(const RpcRecvTensorCall&) = delete;
};
class RpcRecvTensorFreeList {
public:
RpcRecvTensorFreeList() {}
~RpcRecvTensorFreeList() {
for (size_t i = 0; i < objects_.size(); i++) {
delete objects_[i];
}
}
RpcRecvTensorCall* New() {
{
mutex_lock l(mu_);
if (!objects_.empty()) {
RpcRecvTensorCall* result = objects_.back();
objects_.pop_back();
return result;
}
}
return new RpcRecvTensorCall;
}
void Release(RpcRecvTensorCall* obj) {
obj->Reset();
{
mutex_lock l(mu_);
if (objects_.size() < kMaxObjects) {
objects_.push_back(obj);
return;
}
}
delete obj;
}
private:
static constexpr int kMaxObjects = 1000;
mutex mu_;
std::vector<RpcRecvTensorCall*> objects_ TF_GUARDED_BY(mu_);
};
static RpcRecvTensorFreeList* get_call_freelist() {
static RpcRecvTensorFreeList* call_freelist = new RpcRecvTensorFreeList();
return call_freelist;
}
void RpcRemoteRendezvous::RecvFromRemoteAsync(
const Rendezvous::ParsedKey& parsed, const Rendezvous::Args& recv_args,
DoneCallback done) {
CHECK(is_initialized());
Status s;
RpcRecvTensorCall* call = get_call_freelist()->New();
if (!DeviceNameUtils::SplitDeviceName(parsed.src_device, &call->src_worker_,
&call->src_rel_device_)) {
s = errors::Internal(parsed.src_device,
" is invalid remote source device.");
}
WorkerSession* sess = session();
std::shared_ptr<WorkerCacheInterface> worker_cache =
sess->GetSharedWorkerCache();
WorkerInterface* rwi = worker_cache->GetOrCreateWorker(call->src_worker_);
if (s.ok() && rwi == nullptr) {
s = errors::Internal("No worker known as ", call->src_worker_);
}
Device* dst_device;
if (s.ok()) {
s = sess->device_mgr()->LookupDevice(parsed.dst_device, &dst_device);
}
if (!s.ok()) {
if (rwi != nullptr) {
sess->worker_cache()->ReleaseWorker(call->src_worker_, rwi);
}
get_call_freelist()->Release(call);
done(s, Args(), recv_args, Tensor{}, false);
return;
}
call->Init(rwi, step_id_, parsed.FullKey(), recv_args.alloc_attrs, dst_device,
recv_args, std::move(done));
RegisterCall(call, recv_args);
if (!call->status().ok()) {
DeregisterCall(call, recv_args);
call->ReleaseWorker(sess->worker_cache());
call->done()(call->status(), Args(), Args(), Tensor(), false);
get_call_freelist()->Release(call);
return;
}
Ref();
call->Start([this, call, recv_args, worker_cache]() {
DeregisterCall(call, recv_args);
Status s = call->status();
call->ReleaseWorker(session()->worker_cache());
call->done()(s, Args(), call->recv_args(), call->tensor(), call->is_dead());
get_call_freelist()->Release(call);
Unref();
});
}
}
RpcRendezvousMgr::RpcRendezvousMgr(const WorkerEnv* env)
: BaseRendezvousMgr(env) {}
tsl::core::RefCountPtr<BaseRemoteRendezvous> RpcRendezvousMgr::Create(
int64_t step_id, const WorkerEnv* worker_env) {
return tsl::core::RefCountPtr<BaseRemoteRendezvous>(
new RpcRemoteRendezvous(worker_env, step_id));
}
} | #include "tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/control_flow.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
Tensor V(const string& content) {
Tensor tensor(DT_STRING, TensorShape({}));
tensor.scalar<tstring>()() = content;
return tensor;
}
string V(const Tensor& tensor) {
CHECK_EQ(tensor.dtype(), DT_STRING);
CHECK(TensorShapeUtils::IsScalar(tensor.shape()));
return tensor.scalar<tstring>()();
}
Rendezvous::ParsedKey MakeKey(const string& s) {
Rendezvous::ParsedKey key;
CHECK(Rendezvous::ParseKey(s, &key).ok());
return key;
}
namespace {
class DummyWorker : public TestWorkerInterface {
public:
void RecvTensorAsync(CallOptions* opts, const RecvTensorRequest* request,
TensorResponse* response, StatusCallback done) override {
SchedClosure([done = std::move(done)]() {
const int64_t t_us = random::New64() % 100 * 1000;
Env::Default()->SleepForMicroseconds(t_us);
done(absl::OkStatus());
});
}
};
class DummyWorkerCache : public WorkerCacheInterface {
void ListWorkers(std::vector<string>* workers) const override {}
void ListWorkersInJob(const string& job_name,
std::vector<string>* workers) const override {}
WorkerInterface* GetOrCreateWorker(const string& target) override {
if (dummy_remote_worker_ == nullptr) {
dummy_remote_worker_ = new DummyWorker;
}
return dummy_remote_worker_;
}
Status GetEagerClientCache(
std::unique_ptr<eager::EagerClientCache>* eager_client_cache) override {
return errors::Unimplemented("Unimplemented.");
}
Status GetCoordinationClientCache(
std::unique_ptr<CoordinationClientCache>* coord_client_cache) override {
return errors::Unimplemented("Unimplemented.");
}
bool GetDeviceLocalityNonBlocking(const string& device,
DeviceLocality* locality) override {
return false;
}
void GetDeviceLocalityAsync(const string& device, DeviceLocality* locality,
StatusCallback done) override {}
private:
DummyWorker* dummy_remote_worker_ = nullptr;
};
static Device* CreateDevice(const char* type, const char* name) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
return new FakeDevice(attr);
}
static DeviceMgr* CreateDeviceMgr() {
std::unique_ptr<Device> d0(
CreateDevice("CPU", "/job:mnist/replica:1/task:2/cpu:1"));
std::vector<std::unique_ptr<Device>> devices;
devices.emplace_back(std::move(d0));
return new StaticDeviceMgr(std::move(devices));
}
}
class RpcRendezvousMgrTest : public ::testing::Test {
protected:
RpcRendezvousMgrTest()
: cache_(new DummyWorkerCache),
worker_session_("rpc_session", "/job:mnist/replica:1/task:2",
std::unique_ptr<WorkerCacheInterface>(cache_),
std::unique_ptr<DeviceMgr>(CreateDeviceMgr()),
std::unique_ptr<GraphMgr>(), nullptr,
[](WorkerSession* worker_session, bool called,
DeviceMgr* remote_device_mgr) { return nullptr; }),
rmgr_(&env) {
env.env = Env::Default();
}
DummyWorkerCache* cache_;
WorkerEnv env;
WorkerSession worker_session_;
RpcRendezvousMgr rmgr_;
};
TEST_F(RpcRendezvousMgrTest, LocalSendRecv) {
const int64_t step_id = 123;
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
Rendezvous::Args args;
TF_ASSERT_OK(rendez->Send(key, args, V("peach"), false));
}
{
Tensor val(DT_FLOAT);
bool val_dead = false;
TF_ASSERT_OK(rmgr_.RecvLocal(step_id, key, &val, &val_dead));
EXPECT_EQ(V(val), "peach");
}
rmgr_.Cleanup(step_id);
}
TEST_F(RpcRendezvousMgrTest, LocalAbort) {
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
const int64_t step_id = 123;
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
SchedClosure([this, rendez = rendez.GetNewRef()]() {
env.env->SleepForMicroseconds(100 * 1000);
rendez->StartAbort(errors::Aborted(""));
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
EXPECT_TRUE(errors::IsAborted(rendez->Recv(key, args, &val, &val_dead)));
}
{
const int64_t step_id = 321;
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
SchedClosure([this, step_id]() {
env.env->SleepForMicroseconds(100 * 1000);
rmgr_.Cleanup(step_id);
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
EXPECT_TRUE(errors::IsAborted(rendez->Recv(key, args, &val, &val_dead)));
}
}
TEST_F(RpcRendezvousMgrTest, LocalCancel) {
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
auto* cm = new CancellationManager();
const int64_t step_id = 123;
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
Notification n;
SchedClosure([this, cm, &n]() {
env.env->SleepForMicroseconds(100 * 1000);
cm->StartCancel();
n.Notify();
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
args.cancellation_manager = cm;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
EXPECT_TRUE(errors::IsCancelled(rendez->Recv(key, args, &val, &val_dead)));
n.WaitForNotification();
delete cm;
}
TEST_F(RpcRendezvousMgrTest, CancelAfterReceived) {
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
auto* cm = new CancellationManager();
const int64_t step_id = 123;
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
Notification n;
SchedClosure([this, rendez = rendez.get(), key, cm, &n]() {
env.env->SleepForMicroseconds(100 * 1000);
TF_ASSERT_OK(rendez->Send(key, Rendezvous::Args(), V("peach"), false));
cm->StartCancel();
n.Notify();
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
args.cancellation_manager = cm;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
TF_ASSERT_OK(rendez->Recv(key, args, &val, &val_dead));
EXPECT_EQ(V(val), "peach");
n.WaitForNotification();
delete cm;
}
namespace {
class DummyDeviceContext : public DeviceContext {
public:
explicit DummyDeviceContext(int stream_id) : stream_id_(stream_id) {}
~DummyDeviceContext() override {}
int stream_id() const { return stream_id_; }
private:
const int stream_id_;
};
}
TEST_F(RpcRendezvousMgrTest, TransferDummyDeviceContext) {
DummyDeviceContext* dc = new DummyDeviceContext(123);
const int64_t step_id = 123;
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
Rendezvous::Args args;
args.device_context = dc;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
TF_ASSERT_OK(rendez->Send(key, args, V("peach"), false));
}
{
Notification n;
rmgr_.RecvLocalAsync(
step_id, key,
[&n](const Status& s, const Rendezvous::Args send_args,
const Rendezvous::Args recv_args, const Tensor& val,
bool is_dead) {
auto send_dev_context =
static_cast<DummyDeviceContext*>(send_args.device_context);
CHECK_EQ(123, send_dev_context->stream_id());
CHECK_EQ(V(val), "peach");
n.Notify();
});
n.WaitForNotification();
}
rmgr_.Cleanup(step_id);
dc->Unref();
}
TEST_F(RpcRendezvousMgrTest, RemoteRecvOne) {
const int64_t step_id = 123;
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:worker/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
Rendezvous::Args args;
Tensor val(DT_STRING);
bool val_dead = false;
TF_ASSERT_OK(rendez->Recv(key, args, &val, &val_dead));
}
rmgr_.Cleanup(step_id);
}
TEST_F(RpcRendezvousMgrTest, RemoteRecvAsyncMany) {
const int64_t step_id = 123;
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:worker/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
Rendezvous::Args args;
int num_requests = 10000;
Tensor val(DT_STRING);
mutex mu_;
Status status = absl::OkStatus();
BlockingCounter counter(num_requests);
for (int i = 0; i < num_requests; i++) {
rendez->RecvAsync(
key, args,
[&mu_, &status, &counter](const Status& s, const Rendezvous::Args&,
const Rendezvous::Args&, const Tensor&,
const bool) {
{
mutex_lock l(mu_);
status.Update(s);
}
counter.DecrementCount();
});
}
counter.Wait();
TF_ASSERT_OK(status);
}
rmgr_.Cleanup(step_id);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
360c2c87-d341-492d-b99c-25d8a17c02b4 | cpp | tensorflow/tensorflow | permutation_util | third_party/xla/xla/permutation_util.cc | third_party/xla/xla/permutation_util_test.cc | #include "xla/permutation_util.h"
#include <vector>
#include "absl/container/inlined_vector.h"
namespace xla {
bool IsPermutation(absl::Span<const int64_t> permutation) {
absl::InlinedVector<bool, 8> seen(permutation.size(), false);
for (int64_t p : permutation) {
if (p < 0 || p >= permutation.size() || seen[p]) {
return false;
}
seen[p] = true;
}
return true;
}
std::vector<int64_t> InversePermutation(
absl::Span<const int64_t> input_permutation) {
DCHECK(IsPermutation(input_permutation));
std::vector<int64_t> output_permutation(input_permutation.size(), -1);
for (size_t i = 0; i < input_permutation.size(); ++i) {
output_permutation[input_permutation[i]] = i;
}
return output_permutation;
}
std::vector<int64_t> ComposePermutations(absl::Span<const int64_t> p1,
absl::Span<const int64_t> p2) {
CHECK_EQ(p1.size(), p2.size());
std::vector<int64_t> output;
output.reserve(p1.size());
for (size_t i = 0; i < p1.size(); ++i) {
output.push_back(p1.at(p2.at(i)));
}
return output;
}
bool IsIdentityPermutation(absl::Span<const int64_t> permutation) {
for (int64_t i = 0; i < permutation.size(); ++i) {
if (permutation[i] != i) {
return false;
}
}
return true;
}
} | #include "xla/permutation_util.h"
#include "xla/test.h"
namespace xla {
namespace {
TEST(PermutationUtilTest, IsPermutation) {
EXPECT_TRUE(IsPermutation({}));
EXPECT_TRUE(IsPermutation({0}));
EXPECT_FALSE(IsPermutation({-3}));
EXPECT_TRUE(IsPermutation({0, 1}));
EXPECT_FALSE(IsPermutation({1, 1}));
EXPECT_TRUE(IsPermutation({1, 0}));
EXPECT_TRUE(IsPermutation({3, 1, 0, 2}));
EXPECT_FALSE(IsPermutation({3, 0, 2}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/permutation_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/permutation_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
066c5d82-ae42-4a0c-8c65-e9015f33cc6c | cpp | tensorflow/tensorflow | gpu_runner | tensorflow/core/tfrt/gpu/kernel/gpu_runner.cc | tensorflow/core/tfrt/gpu/kernel/gpu_runner_test.cc | #include "tensorflow/core/tfrt/gpu/kernel/gpu_runner.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "llvm/ADT/SmallVector.h"
#include "tensorflow/compiler/jit/pjrt_compile_util.h"
#include "tensorflow/compiler/jit/pjrt_tensor_buffer_util.h"
#include "tensorflow/compiler/jit/xla_compile_util.h"
#include "tensorflow/compiler/jit/xla_launch_util.h"
#include "tensorflow/compiler/jit/xla_platform_info.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/tsl/framework/device_id.h"
#include "xla/tsl/framework/device_id_manager.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include "tensorflow/core/tfrt/common/global_state.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tensorflow/core/tfrt/utils/gpu_variables_table.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/fingerprint.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
#include "tfrt/host_context/async_dispatch.h"
#include "tfrt/host_context/async_value_ref.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/kernel_registry.h"
#include "tfrt/support/forward_decls.h"
namespace tensorflow {
namespace gpu {
namespace {
tfrt::AsyncValueRef<tfrt_stub::FallbackTensor> TransferTensorToDevice(
const tfrt_stub::FallbackTensor& tensor, tfrt::HostContext* host_ctx,
Device* gpu_device) {
const tensorflow::Tensor& src = tensor.tensor();
tensorflow::AllocatorAttributes attr;
attr.set_use_pjrt_allocator(true);
tensorflow::Tensor dst(gpu_device->GetAllocator(attr), src.dtype(),
src.shape());
if (src.shape().num_elements() == 0) {
return tfrt::MakeAvailableAsyncValueRef<tfrt_stub::FallbackTensor>(dst);
}
auto result =
tfrt::MakeUnconstructedAsyncValueRef<tfrt_stub::FallbackTensor>();
DeviceContext* pjrt_device_context =
gpu_device->tensorflow_accelerator_device_info()->pjrt_context;
bool enqueued = tfrt::EnqueueBlockingWork(
host_ctx, [result = result.CopyRef(), gpu_device, pjrt_device_context,
src, dst = std::move(dst)]() mutable {
tensorflow::Notification n;
tensorflow::Status status;
pjrt_device_context->CopyCPUTensorToDevice(
&src, gpu_device, &dst, [&status, &n](Status s) mutable {
status = s;
n.Notify();
});
n.WaitForNotification();
if (!status.ok()) {
result.SetError(absl::InternalError(status.message()));
} else {
result.emplace(std::move(dst));
}
});
if (!enqueued) {
return tfrt::MakeErrorAsyncValueRef(absl::InternalError(
"Failed to enqueue blocking task to transfer tensor."));
}
return result;
}
tfrt::AsyncValueRef<tfrt_stub::FallbackTensor> TransferTensorFromDevice(
const tfrt_stub::FallbackTensor& tensor, tfrt::HostContext* host_ctx,
Device* cpu_device, Device* gpu_device) {
const tensorflow::Tensor& src = tensor.tensor();
tensorflow::AllocatorAttributes attr;
tensorflow::Tensor dst(cpu_device->GetAllocator(attr), src.dtype(),
src.shape());
if (src.shape().num_elements() == 0) {
return tfrt::MakeAvailableAsyncValueRef<tfrt_stub::FallbackTensor>(dst);
}
auto result =
tfrt::MakeUnconstructedAsyncValueRef<tfrt_stub::FallbackTensor>();
DeviceContext* pjrt_device_context =
gpu_device->tensorflow_accelerator_device_info()->pjrt_context;
bool enqueued = tfrt::EnqueueBlockingWork(
host_ctx, [result = result.CopyRef(), gpu_device, pjrt_device_context,
src, dst = std::move(dst)]() mutable {
tensorflow::Notification n;
tensorflow::Status status;
pjrt_device_context->CopyDeviceTensorToCPU(
&src, "tensor_name", gpu_device, &dst,
[&status, &n](Status s) mutable {
status = s;
n.Notify();
});
n.WaitForNotification();
if (!status.ok()) {
result.SetError(absl::InternalError(status.message()));
} else {
result.emplace(std::move(dst));
}
});
if (!enqueued) {
return tfrt::MakeErrorAsyncValueRef(absl::InternalError(
"Failed to enqueue blocking task to transfer tensor."));
}
return result;
}
absl::StatusOr<
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>>
PopulateResultsFromPjRtExecutableOutputs(
const XlaCompiler::CompilationResult& compilation_result,
std::vector<std::unique_ptr<xla::PjRtBuffer>>& executable_outputs,
Device* device, int num_outputs) {
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>
fallback_tensor_results;
for (int i = 0; i < num_outputs; ++i) {
const DataType& dtype = compilation_result.outputs[i].type;
CHECK(!compilation_result.outputs[i].is_constant);
CHECK(dtype != DT_RESOURCE);
xla::PjRtBuffer* output_buffer = executable_outputs[i].get();
if (output_buffer->IsTuple()) {
return absl::InvalidArgumentError(
"Tuple PJRT buffer output is not supported.");
}
absl::Span<const int64_t> dims;
std::optional<std::vector<int64_t>> logical_dims_storage;
if (output_buffer->has_dynamic_dimensions()) {
TF_ASSIGN_OR_RETURN(std::vector<int64_t> logical_dims,
output_buffer->logical_dimensions());
logical_dims_storage.emplace(std::move(logical_dims));
dims = *logical_dims_storage;
} else {
dims = output_buffer->dimensions();
}
TensorShape tensor_shape;
for (int i = 0; i < dims.size(); ++i) {
TF_RETURN_IF_ERROR(tensor_shape.AddDimWithStatus(dims[i]));
}
TF_ASSIGN_OR_RETURN(
Tensor output_tensor,
MakeTensorFromPjRtBuffer(dtype, tensor_shape,
std::move(executable_outputs[i])));
auto result = tfrt::MakeAvailableAsyncValueRef<tfrt_stub::FallbackTensor>(
output_tensor);
fallback_tensor_results.emplace_back(std::move(result));
}
return fallback_tensor_results;
}
absl::StatusOr<
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>>
TransferOutputsToHostIfNeeded(
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>> outputs,
absl::Span<const int64_t> used_output_indices, Device* cpu_device,
Device* gpu_device, tfrt::HostContext* host_ctx) {
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>> results;
for (int i = 0, j = 0; i < outputs.size(); ++i) {
if (j < used_output_indices.size() && i == used_output_indices[j]) {
CHECK(outputs[i].IsAvailable());
tfrt::AsyncValueRef<tfrt_stub::FallbackTensor> output_on_cpu =
TransferTensorFromDevice(outputs[i].get(), host_ctx, cpu_device,
gpu_device);
results.push_back(std::move(output_on_cpu));
++j;
} else {
results.push_back(std::move(outputs[i]));
}
}
return results;
}
absl::StatusOr<
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>>
TransferVariablesAndInputs(int device_idx,
absl::Span<const tfrt_stub::FallbackTensor> args,
absl::Span<const int64_t> resource_indices,
Device* cpu_device,
const absl::flat_hash_map<int, Device*>& gpu_devices,
tfrt::gpu::GpuVariablesTable& vars_table,
bool variables_are_shared,
tfrt::HostContext* host_ctx) {
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>> results;
tsl::PlatformDeviceId platform_device_id;
DeviceType device_type(DEVICE_GPU);
TF_RETURN_IF_ERROR(tsl::DeviceIdManager::TfToPlatformDeviceId(
device_type, tsl::TfDeviceId(device_idx), &platform_device_id));
TF_ASSIGN_OR_RETURN(const std::vector<tsl::TfDeviceId> devices_on_platform,
tsl::DeviceIdManager::GetTfDevicesOnPlatform(
device_type, platform_device_id));
absl::flat_hash_set<int64_t> resource_indices_set(resource_indices.begin(),
resource_indices.end());
const int cache_copy_idx =
variables_are_shared ? platform_device_id.value() : device_idx;
for (int i = 0, resource_idx = 0; i < args.size(); ++i) {
if (resource_indices_set.contains(i)) {
VLOG(2) << "Transfer resource arg[" << i << "].";
tfrt::AsyncValueRef<tfrt_stub::FallbackTensor> device_tensor;
auto cached_device_variable =
vars_table.GetDeviceVariable(args[i], cache_copy_idx);
if (cached_device_variable) {
VLOG(2) << "Cache hit for resource arg[" << i << "].";
device_tensor = cached_device_variable.CopyRef();
} else {
VLOG(2) << "Cache miss for resource arg[" << i << "].";
int gpu_device_idx;
if (variables_are_shared) {
const int idx = resource_idx % devices_on_platform.size();
gpu_device_idx = devices_on_platform[idx].value();
} else {
gpu_device_idx = device_idx;
}
VLOG(2) << "Transfer the resource arg[" << i << "] to device "
<< gpu_device_idx << ".";
device_tensor = TransferTensorToDevice(args[i], host_ctx,
gpu_devices.at(gpu_device_idx));
vars_table.AddOrUpdateDeviceVariable(args[i], cache_copy_idx,
std::move(device_tensor));
device_tensor =
vars_table.GetDeviceVariable(args[i], cache_copy_idx).CopyRef();
}
results.push_back(device_tensor);
++resource_idx;
} else {
VLOG(2) << "Transfer input arg[" << i << "].";
tfrt::AsyncValueRef<tfrt_stub::FallbackTensor> device_tensor =
TransferTensorToDevice(args[i], host_ctx, gpu_devices.at(device_idx));
results.push_back(device_tensor);
}
}
return results;
}
absl::StatusOr<uint64_t> GenerateFingerprint(
const std::string& function_name,
const tfd::KernelFallbackCompatRequestState* fallback_request_state) {
const FunctionLibraryDefinition* flib_def =
fallback_request_state->cpu_function_library_runtime()
->GetFunctionLibraryDefinition();
const FunctionDef* fdef = flib_def->Find(function_name);
if (!fdef) {
return absl::InternalError(
absl::StrCat("Failed to find the function ", function_name));
}
return tsl::Fingerprint64(
absl::StrCat(fallback_request_state->session_metadata().name(),
fallback_request_state->session_metadata().version(),
tsl::LegacyUnredactedDebugString(fdef->signature())));
}
std::vector<XlaCompiler::Argument> BuildXlaCompilerArguments(
absl::Span<const tfrt_stub::FallbackTensor> inputs) {
std::vector<XlaCompiler::Argument> out;
out.resize(inputs.size());
for (int input_num = 0; input_num < inputs.size(); ++input_num) {
const tensorflow::Tensor& input = inputs[input_num].tensor();
CHECK_GT(input.NumElements(), 0);
CHECK(input.dtype() != DT_RESOURCE);
XlaCompiler::Argument& arg = out[input_num];
arg.kind = XlaCompiler::Argument::kParameter;
arg.type = input.dtype();
arg.shape = input.shape();
}
return out;
}
Status CompileProgram(const GpuRunInputs& run_inputs, int device_idx,
const XlaCompiler::CompilationResult** compilation_result,
xla::PjRtClient** pjrt_client,
xla::PjRtLoadedExecutable** pjrt_executable) {
std::vector<XlaCompiler::Argument> xla_compiler_args =
BuildXlaCompilerArguments(run_inputs.args);
DeviceBase* device = run_inputs.gpu_devices.at(device_idx);
FunctionLibraryRuntime* flr =
run_inputs.fallback_request_state->process_function_library_runtime()
.GetFLR(run_inputs.gpu_devices.at(device_idx)->name());
XlaPlatformInfo platform_info =
XlaPlatformInfoFromDevice(run_inputs.gpu_devices.at(device_idx));
NameAttrList function;
function.set_name(run_inputs.func_name);
ResourceMgr* rm = tfrt_global::GetTFGlobalResourceMgr();
return CompileToPjRtLoadedExecutable(
device, platform_info, function, xla_compiler_args,
DeviceCompileMode::kStrict,
false,
false, flr, rm, compilation_result,
pjrt_client, pjrt_executable);
}
absl::StatusOr<
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>>
ExecuteProgram(
const GpuRunInputs& run_inputs,
const llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>&
transferred_args,
const XlaCompiler::CompilationResult* compilation_result,
xla::PjRtClient* pjrt_client, xla::PjRtLoadedExecutable* pjrt_executable,
int device_idx) {
std::vector<const Tensor*> inputs;
for (const auto& arg : transferred_args) {
if (arg.IsError()) {
return absl::InternalError(
absl::StrCat("Data transfer failed: ", arg.GetError().message()));
}
inputs.push_back(&arg->tensor());
}
if (compilation_result->collective_info.has_value()) {
return absl::UnimplementedError(
"Execution with collectives is not supported.");
}
TF_ASSIGN_OR_RETURN(
xla::PjRtDevice * pjrt_device,
pjrt_client->LookupAddressableDevice(xla::PjRtLocalDeviceId(device_idx)));
TF_ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<xla::PjRtBuffer>> executable_outputs,
RunPjRtExecutable(0, inputs,
{}, {},
DeviceType(DEVICE_GPU),
true, *compilation_result,
pjrt_device, pjrt_client, pjrt_executable));
TF_ASSIGN_OR_RETURN(
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>> results,
PopulateResultsFromPjRtExecutableOutputs(
*compilation_result, executable_outputs,
run_inputs.gpu_devices.at(device_idx), run_inputs.num_outputs));
return TransferOutputsToHostIfNeeded(
results, run_inputs.used_output_indices, run_inputs.cpu_device,
run_inputs.gpu_devices.at(device_idx), run_inputs.host_ctx);
}
}
absl::StatusOr<
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>>
GpuRunner::Run(GpuRunInputs run_inputs) {
TF_ASSIGN_OR_RETURN(uint64_t fingerprint,
GenerateFingerprint(run_inputs.func_name,
run_inputs.fallback_request_state));
tsl::DeviceReservation device_reservation =
serving_device_selector_->ReserveDevice(absl::StrCat(fingerprint));
const int device_idx = device_reservation.device_index();
VLOG(1) << "GpuRunner selected device " << device_idx << ".";
const XlaCompiler::CompilationResult* compilation_result;
xla::PjRtClient* pjrt_client;
xla::PjRtLoadedExecutable* pjrt_executable;
TF_RETURN_IF_ERROR(CompileProgram(run_inputs, device_idx, &compilation_result,
&pjrt_client, &pjrt_executable));
TF_ASSIGN_OR_RETURN(
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>
transferred_args,
TransferVariablesAndInputs(
device_idx, run_inputs.args, run_inputs.resource_indices,
run_inputs.cpu_device, run_inputs.gpu_devices, vars_table_,
false, run_inputs.host_ctx));
llvm::SmallVector<tfrt::RCReference<tfrt::AsyncValue>, 4>
transferred_args_to_wait;
for (const auto& arg : transferred_args) {
if (!arg.IsAvailable()) {
transferred_args_to_wait.push_back(arg.CopyRCRef());
}
}
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>> results;
results.reserve(run_inputs.num_outputs);
for (size_t i = 0; i < run_inputs.num_outputs; ++i) {
results.emplace_back(
tfrt::MakeUnconstructedAsyncValueRef<tfrt_stub::FallbackTensor>());
}
tfrt::RunWhenReady(
transferred_args_to_wait,
[run_inputs = std::move(run_inputs),
transferred_args = std::move(transferred_args), results = results,
compilation_result, pjrt_client, pjrt_executable, device_idx]() mutable {
auto execution_outputs =
ExecuteProgram(run_inputs, transferred_args, compilation_result,
pjrt_client, pjrt_executable, device_idx);
CHECK_EQ(results.size(), execution_outputs->size());
if (!execution_outputs.ok()) {
for (size_t i = 0; i < results.size(); ++i) {
results[i].SetError(
absl::InternalError(execution_outputs.status().message()));
}
return;
}
for (int i = 0; i < results.size(); ++i) {
auto& result = results[i];
auto& output_av = (*execution_outputs)[i];
output_av.AndThen([result = result, output_av = output_av] {
result.emplace(std::move(output_av.get().tensor()));
});
}
});
return results;
}
}
} | #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "tensorflow/core/tfrt/gpu/kernel/gpu_runner.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "xla/tsl/framework/serving_device_selector_policies.h"
#include "tensorflow/core/common_runtime/gpu/gpu_serving_device_selector.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/diagnostic.h"
#include "tfrt/host_context/function.h"
#include "tfrt/host_context/host_allocator.h"
#include "tfrt/host_context/host_context.h"
namespace tensorflow {
namespace gpu {
namespace {
constexpr int kNumVirtualGpuDevices = 1;
constexpr char kFunctionName[] = "foo";
StatusOr<std::unique_ptr<Graph>> SampleGraphAddXY() {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1);
auto c = ops::Add(scope.WithOpName("C"), a, b);
auto d = ops::_Retval(scope.WithOpName("D"), c, 0);
TF_RETURN_IF_ERROR(scope.ToGraph(graph.get()));
return graph;
}
StatusOr<FunctionDef> SampleFunctionAddXY(const std::string& name) {
TF_ASSIGN_OR_RETURN(auto graph, SampleGraphAddXY());
FunctionDef fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*graph, name, &fdef));
return fdef;
}
Status GetDevices(const tensorflow::tfd::KernelFallbackCompatRequestState*
fallback_request_state,
Device** cpu_device,
absl::flat_hash_map<int, Device*>& gpu_devices) {
*cpu_device = fallback_request_state->device_manager().HostCPU();
if (!*cpu_device) {
return absl::InternalError(
"Fallback request state must have a valid host cpu device.");
}
for (Device* device :
fallback_request_state->device_manager().ListDevices()) {
if (device->device_type() != DEVICE_GPU) continue;
if (!gpu_devices.try_emplace(device->parsed_name().id, device).second) {
return absl::InternalError(absl::StrCat(
"A device with the same device ID already exists when adding ",
device->name()));
}
}
if (gpu_devices.empty()) {
return absl::InternalError("No GPU device is found.");
}
for (const auto& [id, device] : gpu_devices) {
if (id >= gpu_devices.size()) {
return absl::InternalError("Device IDs are not consecutive.");
}
}
return OkStatus();
}
template <typename T>
Tensor CreateTensor(const TensorShape& input_shape,
gtl::ArraySlice<T> input_data,
Allocator* allocator = nullptr) {
Tensor tensor(DataTypeToEnum<T>::value, input_shape);
test::FillValues<T>(&tensor, input_data);
return tensor;
}
class GpuRunnerTest : public ::testing::Test {
protected:
void SetUp() override {
tensorflow::SessionOptions session_options;
TF_ASSERT_OK_AND_ASSIGN(FunctionDef fdef,
SampleFunctionAddXY(kFunctionName));
tensorflow::FunctionDefLibrary fdef_lib;
*fdef_lib.add_function() = fdef;
TF_ASSERT_OK_AND_ASSIGN(fallback_state_, tfrt_stub::FallbackState::Create(
session_options, fdef_lib));
std::function<void(std::function<void()>)> runner =
[](const std::function<void()>& f) { f(); };
tfrt_stub::OpKernelRunnerTable runner_table;
tfd::FallbackResourceArray resource_array;
fallback_request_state_ =
std::make_unique<tfd::KernelFallbackCompatRequestState>(
&runner, &fallback_state_->device_manager(), 0,
&runner_table, &resource_array,
nullptr,
std::nullopt,
&fallback_state_->process_function_library_runtime());
auto host_allocator = tfrt::CreateMallocAllocator();
auto work_queue = tfrt::CreateMultiThreadedWorkQueue(
2, 2);
host_context_ = std::make_unique<tfrt::HostContext>(
[&](const tfrt::DecodedDiagnostic& diag) {}, std::move(host_allocator),
std::move(work_queue));
tfrt::RequestContextBuilder req_ctx_builder =
tfrt::RequestContextBuilder(host_context_.get(), nullptr);
tfrt::Expected<tfrt::RCReference<tfrt::RequestContext>> req_ctx(
std::move(req_ctx_builder).build());
ASSERT_TRUE(!!req_ctx);
exec_ctx_ = std::make_unique<tfrt::ExecutionContext>(std::move(*req_ctx));
auto policy = std::make_unique<tsl::RoundRobinPolicy>();
serving_device_selector_ = std::make_unique<GpuServingDeviceSelector>(
kNumVirtualGpuDevices, std::move(policy));
gpu_runner_ = std::make_unique<GpuRunner>(serving_device_selector_.get());
}
std::unique_ptr<tfrt_stub::FallbackState> fallback_state_;
std::unique_ptr<tfd::KernelFallbackCompatRequestState>
fallback_request_state_;
std::unique_ptr<tfrt::HostContext> host_context_;
std::unique_ptr<tfrt::ExecutionContext> exec_ctx_;
std::unique_ptr<GpuServingDeviceSelector> serving_device_selector_;
std::unique_ptr<GpuRunner> gpu_runner_;
};
TEST_F(GpuRunnerTest, Basic) {
GpuRunInputs run_inputs;
llvm::SmallVector<tfrt_stub::FallbackTensor> args;
Tensor tensor1 = CreateTensor<int32>(TensorShape({1, 2}), {1, 2});
Tensor tensor2 = CreateTensor<int32>(TensorShape({1, 2}), {3, 4});
args.push_back(tfrt_stub::FallbackTensor(tensor1));
args.push_back(tfrt_stub::FallbackTensor(tensor2));
run_inputs.args = &args;
run_inputs.num_outputs = 1;
run_inputs.resource_indices = tfrt::ArrayRef<int64_t>(0);
run_inputs.used_output_indices = tfrt::ArrayRef<int64_t>(0);
run_inputs.func_name = kFunctionName;
absl::flat_hash_map<int, Device*> gpu_devices;
ASSERT_OK(GetDevices(fallback_request_state_.get(), &run_inputs.cpu_device,
gpu_devices));
run_inputs.gpu_devices = &gpu_devices;
run_inputs.fallback_request_state = fallback_request_state_.get();
run_inputs.exec_ctx = exec_ctx_.get();
TF_ASSERT_OK_AND_ASSIGN(
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>> outputs,
gpu_runner_->Run(run_inputs));
llvm::SmallVector<tfrt::RCReference<tfrt::AsyncValue>, 4> outputs_to_wait;
for (const auto& output : outputs) {
if (!output.IsAvailable()) {
outputs_to_wait.push_back(output.CopyRCRef());
}
}
exec_ctx_->host()->Await(outputs_to_wait);
ASSERT_EQ(outputs.size(), 1);
auto expected = CreateTensor<int32>(TensorShape({1, 2}), {4, 6});
test::ExpectTensorEqual<int32>(expected, outputs[0].get().tensor());
}
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/gpu/kernel/gpu_runner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/gpu/kernel/gpu_runner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bff43165-4287-4ce8-91f3-effe03e6f7a8 | cpp | tensorflow/tensorflow | graph_constructor | tensorflow/core/common_runtime/graph_constructor.cc | tensorflow/core/common_runtime/graph_constructor_test.cc | #include "tensorflow/core/common_runtime/graph_constructor.h"
#include <algorithm>
#include <memory>
#include <optional>
#include <set>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/versions.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_debug_info_builder.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/strings/scanner.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
static constexpr const bool kDoNotCheckDuplicates = true;
inline bool IsMerge(const NodeDef& node_def) {
return node_def.op() == "Merge" || node_def.op() == "RefMerge" ||
node_def.op() == "_XlaMerge";
}
inline bool IsNextIteration(const NodeDef& node_def) {
return node_def.op() == "NextIteration" ||
node_def.op() == "RefNextIteration";
}
bool IsValidNodeName(StringPiece s, bool allow_internal_ops) {
using ::tensorflow::strings::Scanner;
Scanner scanner(s);
scanner
.One(allow_internal_ops ? Scanner::LETTER_DIGIT_DOT_UNDERSCORE
: Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
while (true) {
if (!scanner.GetResult())
return false;
if (scanner.empty())
return true;
scanner.One(Scanner::RANGLE)
.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
}
}
class GraphConstructor {
public:
struct Options {
Options(const GraphConstructorOptions& in)
: allow_internal_ops(in.allow_internal_ops),
expect_device_spec(in.expect_device_spec),
propagate_device_spec(false),
uniquify_names(false),
uniquify_prefix(false),
skip_mapped_nodes(false),
importing(false),
validate_nodes(in.validate_nodes),
validate_colocation_constraints(false),
add_default_attributes(in.add_default_attributes) {}
Options(const ImportGraphDefOptions& in)
: allow_internal_ops(false),
expect_device_spec(false),
propagate_device_spec(in.propagate_device_spec),
prefix(in.prefix.empty() || absl::EndsWith(in.prefix, "/")
? in.prefix
: in.prefix + "/"),
uniquify_names(in.uniquify_names),
uniquify_prefix(in.uniquify_prefix),
input_map(in.input_map.begin(), in.input_map.end()),
skip_mapped_nodes(in.skip_mapped_nodes),
control_dependencies(in.control_dependencies),
return_tensors(in.return_tensors.begin(), in.return_tensors.end()),
return_nodes(in.return_nodes),
importing(true),
validate_nodes(true),
validate_colocation_constraints(in.validate_colocation_constraints),
validate_shape(in.validate_shape),
default_device(in.default_device) {}
bool allow_internal_ops;
bool expect_device_spec;
bool propagate_device_spec;
string prefix;
bool uniquify_names;
bool uniquify_prefix;
std::map<TensorId, TensorId> input_map;
bool skip_mapped_nodes;
std::vector<string> control_dependencies;
std::vector<TensorId> return_tensors;
std::vector<string> return_nodes;
bool importing;
bool validate_nodes;
bool validate_colocation_constraints;
bool validate_shape = true;
bool add_default_attributes = true;
string default_device;
};
typedef absl::Span<const NodeDef* const> NodeDefSlice;
static Status Construct(
const Options& opts, NodeDefSlice node_defs, const VersionDef* versions,
const FunctionDefLibrary* library, const GraphDebugInfo* debug_info,
Graph* g, ShapeRefiner* refiner,
std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys);
static Status Construct(
const Options& opts, GraphDef&& graph_def, Graph* g,
ShapeRefiner* refiner, std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys);
protected:
GraphConstructor(const Options& opts, Graph* g, ShapeRefiner* refiner,
std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys)
: opts_(opts),
g_(g),
original_versions_(g->versions()),
prefix_(opts.prefix),
refiner_(refiner),
return_tensors_(return_tensors),
return_nodes_(return_nodes),
missing_unused_input_map_keys_(missing_unused_input_map_keys) {}
virtual ~GraphConstructor() {}
Status TryImport() {
TF_RETURN_IF_ERROR(EnsureNoNameCollisions());
TF_RETURN_IF_ERROR(ValidateInputMapAndControlDependencies());
TF_RETURN_IF_ERROR(BuildNodeIndex());
TF_RETURN_IF_ERROR(InitFromEdges());
TF_RETURN_IF_ERROR(Convert());
TF_RETURN_IF_ERROR(AddBackEdges());
TF_RETURN_IF_ERROR(UpdateVersionDef());
TF_RETURN_IF_ERROR(PopulateReturnTensors());
TF_RETURN_IF_ERROR(PopulateReturnNodes());
TF_RETURN_IF_ERROR(PopulateMissingUnusedInputMapKeys());
UpdateUniquifiedColocationNames();
FixupSourceAndSinkEdges(g_);
return absl::OkStatus();
}
private:
Status EnsureNoNameCollisions();
Status ValidateInputMapAndControlDependencies();
Status BuildNodeIndex();
Status InitFromEdges();
Status Convert();
Status AddBackEdges();
Status UpdateVersionDef();
Status PopulateReturnTensors();
Status PopulateReturnNodes();
Status PopulateMissingUnusedInputMapKeys();
FunctionDefLibraryStackTraces CreateStackTracesForFunctionDefLibrary(
const FunctionDefLibrary& library) const;
void Undo();
void PrintCycles();
void DFS(int cur_node, std::vector<int>* cur_branch,
std::vector<bool>* is_on_cur_branch,
absl::flat_hash_set<int>* unvisited,
const std::vector<absl::string_view>& node_names);
Status IsNodeFullyMapped(const NodeDef& node_def, bool* is_node_mapped);
Status ValidateColocationConstraints(const NodeDef& node_def);
Status MakeNode(NodeDef&& node_def, Node** node);
Status MakeEdge(Node* src, int output_index, Node* dst, int input_index);
Status ValidateShape(Node* node);
Status ModifyNodeDefForImport(NodeDef* node_def);
void RemapNodeDefInputs(NodeDef* node_def,
std::vector<bool>* input_already_exists);
void AddControlDependencies(NodeDef* node_def,
std::vector<bool>* input_already_exists);
void AddPrefixToNodeDef(const std::vector<bool>& input_already_exists,
NodeDef* node_def);
void UniquifyNames(const std::vector<bool>& input_already_exists,
NodeDef* node_def);
void UpdateUniquifiedColocationNames();
bool NameExistsInGraph(StringPiece name);
bool NameExistsInGraphDef(StringPiece name);
string FindUniqueName(StringPiece original_name);
void UpdatePendingCountAndReady(int processed, bool is_next_iteration);
virtual size_t node_def_count() const = 0;
virtual const NodeDef& get_node_def(int i) const = 0;
virtual NodeDef consume_node_def(int i) = 0;
virtual const VersionDef* versions() const = 0;
virtual std::optional<FunctionDefLibrary> consume_library() = 0;
virtual const GraphDebugInfo* debug_info() const = 0;
const Options opts_;
Graph* g_;
const VersionDef original_versions_;
string prefix_;
StackTracesMap traces_;
ShapeRefiner* refiner_;
std::vector<std::pair<Node*, int>>* return_tensors_;
std::vector<Node*>* return_nodes_;
std::vector<SafeTensorId>* missing_unused_input_map_keys_;
std::set<TensorId> used_input_map_keys_;
absl::flat_hash_set<int> merge_node_indices_;
struct NodeInfo {
explicit NodeInfo(int i) : gdef_index(i), node(nullptr) {}
NodeInfo() : NodeInfo(-1) {}
int gdef_index;
Node* node;
};
absl::flat_hash_map<std::string, NodeInfo> gdef_nodes_;
absl::flat_hash_set<StringPiece> gdef_prefixes_;
absl::flat_hash_map<StringPiece, Node*> existing_nodes_;
absl::flat_hash_set<StringPiece> existing_prefixes_;
gtl::FlatMap<string, string> uniquified_names_;
std::set<int> ready_;
std::vector<int> pending_count_;
std::vector<absl::InlinedVector<int, 4UL>> outputs_;
struct InputInfo {
explicit InputInfo(const string& node_name, Node* n, int i)
: name(node_name), node(n), index(i) {}
string name;
Node* node;
int index;
static bool IsControlInput(const InputInfo& input) {
return input.index == Graph::kControlSlot;
}
static int CompareName(const InputInfo& lhs, const InputInfo& rhs) {
return lhs.name < rhs.name;
}
static bool IsSameName(const InputInfo& lhs, const InputInfo& rhs) {
return lhs.name == rhs.name;
}
};
struct EdgeInfo {
explicit EdgeInfo(const string& name, int i1, Node* n, int i2)
: src_name(name), src_index(i1), dst_node(n), dst_index(i2) {}
string src_name;
int src_index;
Node* dst_node;
int dst_index;
};
std::vector<EdgeInfo> back_edges_;
GraphConstructor(const GraphConstructor&) = delete;
void operator=(const GraphConstructor&) = delete;
};
class NodeDefCopyingGraphConstructor : public GraphConstructor {
public:
NodeDefCopyingGraphConstructor(
const Options& opts, NodeDefSlice node_defs, const VersionDef* versions,
const FunctionDefLibrary* library, const GraphDebugInfo* debug_info,
Graph* g, ShapeRefiner* refiner,
std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys)
: GraphConstructor(opts, g, refiner, return_tensors, return_nodes,
missing_unused_input_map_keys),
node_defs_(node_defs),
versions_(versions),
library_(library),
debug_info_(debug_info) {}
private:
size_t node_def_count() const override { return node_defs_.size(); }
const NodeDef& get_node_def(int i) const override { return *node_defs_[i]; }
NodeDef consume_node_def(int i) override { return *node_defs_[i]; }
const VersionDef* versions() const override { return versions_; }
std::optional<FunctionDefLibrary> consume_library() override {
if (library_ == nullptr) {
return std::nullopt;
} else {
return *library_;
}
}
const GraphDebugInfo* debug_info() const override { return debug_info_; }
const NodeDefSlice node_defs_;
const VersionDef* const versions_;
const FunctionDefLibrary* const library_;
const GraphDebugInfo* const debug_info_;
};
class NodeDefMovingGraphConstructor : public GraphConstructor {
public:
NodeDefMovingGraphConstructor(
const Options& opts, GraphDef&& graph_def, Graph* g,
ShapeRefiner* refiner, std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys)
: GraphConstructor(opts, g, refiner, return_tensors, return_nodes,
missing_unused_input_map_keys),
graph_def_(std::move(graph_def)),
is_consumed_(graph_def_.node_size(), false) {}
private:
size_t node_def_count() const override { return graph_def_.node().size(); }
const NodeDef& get_node_def(int i) const override {
CHECK(!is_consumed_[i])
<< "NodeDef " << i << " accessed after it was consumed.";
return graph_def_.node(i);
}
NodeDef consume_node_def(int i) override {
CHECK(!is_consumed_[i]) << "NodeDef " << i << " consumed twice.";
is_consumed_[i] = true;
return std::move(*graph_def_.mutable_node(i));
}
const VersionDef* versions() const override { return &graph_def_.versions(); }
std::optional<FunctionDefLibrary> consume_library() override {
return std::move(*graph_def_.mutable_library());
}
const GraphDebugInfo* debug_info() const override {
return &graph_def_.debug_info();
}
GraphDef graph_def_;
std::vector<bool> is_consumed_;
};
bool ForwardCompatibilityWindowPassed(const VersionDef& versions) {
return (versions.producer() - TF_GRAPH_DEF_VERSION) > 21;
}
Status MaybeAppendVersionWarning(const VersionDef* versions,
const Status& import_status) {
if (versions && ForwardCompatibilityWindowPassed(*versions)) {
return Status(
import_status.code(),
absl::StrCat(
"Converting GraphDef to Graph has failed with an error: '",
import_status.message(),
"' The binary trying to import the GraphDef was built when "
"GraphDef version was ",
TF_GRAPH_DEF_VERSION,
". The GraphDef was produced by a binary built when GraphDef "
"version was ",
versions->producer(),
". The difference between these versions is larger than "
"TensorFlow's forward compatibility guarantee, and might be the "
"root cause for failing to import the GraphDef."));
}
return import_status;
}
Status GraphConstructor::Construct(
const Options& opts, NodeDefSlice node_defs, const VersionDef* versions,
const FunctionDefLibrary* library, const GraphDebugInfo* debug_info,
Graph* g, ShapeRefiner* refiner,
std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys) {
if (versions) {
TF_RETURN_IF_ERROR(CheckVersions(*versions, TF_GRAPH_DEF_VERSION,
TF_GRAPH_DEF_VERSION_MIN_PRODUCER,
"GraphDef", "graph"));
}
NodeDefCopyingGraphConstructor c(opts, node_defs, versions, library,
debug_info, g, refiner, return_tensors,
return_nodes, missing_unused_input_map_keys);
Status s = c.TryImport();
if (!s.ok()) {
c.Undo();
s = MaybeAppendVersionWarning(versions, s);
}
return s;
}
Status GraphConstructor::Construct(
const Options& opts, GraphDef&& graph_def, Graph* g, ShapeRefiner* refiner,
std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys) {
TF_RETURN_IF_ERROR(CheckVersions(graph_def.versions(), TF_GRAPH_DEF_VERSION,
TF_GRAPH_DEF_VERSION_MIN_PRODUCER,
"GraphDef", "graph"));
VersionDef version_def = graph_def.versions();
NodeDefMovingGraphConstructor c(opts, std::move(graph_def), g, refiner,
return_tensors, return_nodes,
missing_unused_input_map_keys);
Status s = c.TryImport();
if (!s.ok()) {
c.Undo();
s = MaybeAppendVersionWarning(&version_def, s);
}
return s;
}
void GraphConstructor::UpdatePendingCountAndReady(int processed,
bool is_next_iteration) {
for (size_t i = 0; i < outputs_[processed].size(); ++i) {
const int output = outputs_[processed][i];
bool is_next_iteration_to_merge_edge =
is_next_iteration && merge_node_indices_.count(output) == 1;
if (!is_next_iteration_to_merge_edge) {
int* current_pending_count = &pending_count_[output];
CHECK_GT(*current_pending_count, 0);
(*current_pending_count)--;
if (*current_pending_count == 0) {
ready_.insert(output);
}
}
}
}
bool NodeNameInValues(const std::map<TensorId, TensorId>& input_map,
const StringPiece& node_name) {
for (auto iter = input_map.begin(); iter != input_map.end(); ++iter) {
if (iter->second.first == node_name) return true;
}
return false;
}
bool NodeNameInValues(const std::vector<string>& control_dependencies,
const StringPiece& node_name) {
return std::find(control_dependencies.begin(), control_dependencies.end(),
node_name) != control_dependencies.end();
}
void AddPrefixes(StringPiece node_name,
absl::flat_hash_set<StringPiece>* prefixes) {
size_t idx = -1;
while ((idx = node_name.find('/', idx + 1)) != StringPiece::npos) {
prefixes->insert(node_name.substr(0, idx));
}
}
Status GraphConstructor::EnsureNoNameCollisions() {
existing_nodes_.reserve(g_->num_nodes());
for (Node* n : g_->nodes()) {
bool already_exists = !existing_nodes_.insert({n->name(), n}).second;
if (already_exists) {
if (NodeNameInValues(opts_.input_map, n->name())) {
return errors::InvalidArgument(
"cannot resolve input_map because multiple nodes exist with name '",
n->name(), "'");
}
if (NodeNameInValues(opts_.control_dependencies, n->name())) {
return errors::InvalidArgument(
"cannot resolve control_dependencies because multiple nodes exist "
"with name '",
n->name(), "'");
}
}
AddPrefixes(n->name(), &existing_prefixes_);
}
if (prefix_.empty() && opts_.importing && !opts_.uniquify_names) {
for (size_t i = 0; i < node_def_count(); ++i) {
const string& name = get_node_def(i).name();
if (NameExistsInGraph(name)) {
return errors::InvalidArgument("Node name '", name,
"' already exists in the Graph");
}
}
} else if (!prefix_.empty()) {
StringPiece prefix_no_slash(prefix_);
prefix_no_slash.remove_suffix(1);
if (!IsValidNodeName(prefix_no_slash, false)) {
return errors::InvalidArgument("Imported node name prefix '", prefix_,
"' would lead to invalid node names");
}
if (NameExistsInGraph(prefix_no_slash) && opts_.uniquify_prefix) {
prefix_ = strings::StrCat(FindUniqueName(prefix_no_slash), "/");
}
}
return absl::OkStatus();
}
Status GraphConstructor::ValidateInputMapAndControlDependencies() {
for (const auto& mapping : opts_.input_map) {
TensorId src = mapping.first;
TensorId dst = mapping.second;
if (existing_nodes_.count(dst.first) == 0) {
return errors::InvalidArgument(
"node '", dst.first, "' in input_map does not exist in graph ",
"(input_map entry: ", src.ToString(), "->", dst.ToString(), ")");
}
if ((src.second == Graph::kControlSlot) !=
(dst.second == Graph::kControlSlot)) {
return errors::InvalidArgument("input_map entry ", src.ToString(), "->",
dst.ToString(), " between ",
"control edge and non-control edge");
}
}
for (const string& node : opts_.control_dependencies) {
if (existing_nodes_.count(node) == 0) {
return errors::InvalidArgument(
"node '", node,
"' in control_dependencies does not exist in "
"graph");
}
}
return absl::OkStatus();
}
Status GraphConstructor::BuildNodeIndex() {
for (int n = 0; n < node_def_count(); ++n) {
const NodeDef& node_def = get_node_def(n);
if (!IsValidNodeName(node_def.name(), opts_.allow_internal_ops)) {
return errors::InvalidArgument(
"Node '", node_def.name(),
"': Node name contains invalid characters");
}
if (!gdef_nodes_.insert(std::make_pair(node_def.name(), NodeInfo(n)))
.second) {
return errors::InvalidArgument("Node '", node_def.name(),
"' is not unique");
}
if (node_def.op().empty()) {
return errors::InvalidArgument("Node '", node_def.name(),
"' does not specify an operation");
}
if (opts_.expect_device_spec && node_def.device().empty()) {
return errors::InvalidArgument("Node '", node_def.name(),
"' is missing a device specification");
}
if (IsMerge(node_def)) {
merge_node_indices_.insert(n);
}
bool in_control_dependence = false;
for (int i = 0; i < node_def.input_size(); ++i) {
StringPiece input_name = node_def.input(i);
if (!input_name.empty() && absl::StartsWith(input_name, "^")) {
in_control_dependence = true;
} else if (in_control_dependence) {
return errors::InvalidArgument(
"Node '", node_def.name(),
"': Control dependencies must come after regular dependencies");
}
}
AddPrefixes(node_def.name(), &gdef_prefixes_);
}
return absl::OkStatus();
}
Status GraphConstructor::InitFromEdges() {
const int num_nodes = node_def_count();
pending_count_.reserve(num_nodes);
outputs_.resize(num_nodes);
gtl::FlatSet<string> next_iteration_nodes;
for (int n = 0; n < node_def_count(); ++n) {
const NodeDef& node_def = get_node_def(n);
if (IsNextIteration(node_def)) {
next_iteration_nodes.insert(node_def.name());
}
}
for (int n = 0; n < num_nodes; ++n) {
const NodeDef& node_def = get_node_def(n);
int pending_count = node_def.input_size();
if (IsMerge(node_def)) {
int32_t num_control_edges = 0;
bool has_loop_back_edge = false;
for (int i = 0; i < node_def.input_size(); ++i) {
StringPiece input_name(node_def.input(i));
if (absl::StartsWith(input_name, "^")) {
num_control_edges++;
} else {
TensorId id(ParseTensorName(input_name));
if (next_iteration_nodes.find(string(id.first)) !=
next_iteration_nodes.end()) {
has_loop_back_edge = true;
}
}
}
if (has_loop_back_edge) {
pending_count = num_control_edges + 1;
}
}
for (int i = 0; i < node_def.input_size(); ++i) {
StringPiece input_name = node_def.input(i);
TensorId id(ParseTensorName(input_name));
if (opts_.input_map.count(id) == 0) {
auto iter = gdef_nodes_.find(id.first);
if (iter == gdef_nodes_.end()) {
return errors::InvalidArgument("Node '", node_def.name(),
"': Unknown input node '",
node_def.input(i), "'");
}
outputs_[iter->second.gdef_index].push_back(n);
} else {
--pending_count;
DCHECK_GE(pending_count, 0);
}
}
if (pending_count == 0) {
ready_.insert(n);
}
pending_count_.push_back(pending_count);
}
return absl::OkStatus();
}
Status GraphConstructor::ValidateColocationConstraints(
const NodeDef& node_def) {
if (!opts_.validate_colocation_constraints || !opts_.importing)
return absl::OkStatus();
const auto iter = node_def.attr().find(kColocationAttrName);
if (iter == node_def.attr().end()) return absl::OkStatus();
for (const string& c : iter->second.list().s()) {
StringPiece s(c);
if (absl::ConsumePrefix(&s, kColocationGroupPrefix) &&
gdef_nodes_.find(s) == gdef_nodes_.end()) {
return errors::InvalidArgument(
"Node '", node_def.name(),
"' expects to be colocated with unknown node '", s, "'");
}
}
return absl::OkStatus();
}
Status GraphConstructor::MakeNode(NodeDef&& node_def, Node** node) {
Status status;
*node = g_->AddNode(std::move(node_def), &status);
if (!status.ok()) return status;
if (opts_.expect_device_spec ||
(opts_.propagate_device_spec && !(*node)->def().device().empty())) {
(*node)->set_assigned_device_name((*node)->def().device());
}
return absl::OkStatus();
}
Status GraphConstructor::ValidateShape(Node* node) {
if (!opts_.importing || !opts_.validate_shape) return absl::OkStatus();
TF_RETURN_IF_ERROR(refiner_->AddNode(node));
std::vector<const TensorShapeProto*> shape_attrs;
const char* kAttrName = "_output_shapes";
if (!TryGetNodeAttr(node->attrs(), kAttrName, &shape_attrs)) {
return absl::OkStatus();
}
auto* ic = refiner_->GetContext(node);
DCHECK(ic != nullptr)
<< "ShapeRefiner::AddNode() should have created the InferenceContext";
if (shape_attrs.size() < node->num_outputs()) {
return errors::InvalidArgument(
"Node '", node->name(), "' has ", node->num_outputs(),
" outputs but the ", kAttrName, " attribute specifies shapes for ",
shape_attrs.size(), " outputs");
}
if (shape_attrs.size() > node->num_outputs()) {
LOG(WARNING) << "Node '" << node->name() << "' has " << node->num_outputs()
<< " outputs but the " << kAttrName
<< " attribute specifies shapes for " << shape_attrs.size()
<< " outputs. Output shapes may be inaccurate.";
}
for (int i = 0; i < node->num_outputs(); ++i) {
const TensorShapeProto& p = *shape_attrs[i];
shape_inference::ShapeHandle h;
Status s = ic->MakeShapeFromShapeProto(p, &h);
if (!s.ok()) {
return errors::InvalidArgument("Node '", node->name(), " has an invalid ",
kAttrName, " attribute (shape #", i,
" error:'", s.message(), "'");
}
s = refiner_->SetShape(node, i, h);
if (!s.ok()) {
return errors::InvalidArgument(
"Node '", node->name(), "' has an ", kAttrName,
" attribute inconsistent with the GraphDef for output #", i, ": ",
s.message());
}
}
node->ClearAttr(kAttrName);
return absl::OkStatus();
}
Status GraphConstructor::ModifyNodeDefForImport(NodeDef* node_def) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(g_->op_registry()->LookUpOpDef(node_def->op(), &op_def));
AddDefaultsToNodeDef(*op_def, node_def);
TF_RETURN_IF_ERROR(ValidateNodeDef(*node_def, *op_def));
if (versions()) {
TF_RETURN_IF_ERROR(CheckOpDeprecation(*op_def, versions()->producer()));
}
return absl::OkStatus();
}
void RemoveInputs(const std::vector<int>& inputs_to_remove, NodeDef* node_def,
std::vector<bool>* input_already_exists) {
NodeDef copy;
copy.mutable_input()->Reserve(node_def->input_size() -
inputs_to_remove.size());
for (int i = 0, j = 0; i < node_def->input_size(); ++i) {
if (j < inputs_to_remove.size() && i == inputs_to_remove[j]) {
++j;
} else {
copy.add_input()->swap(*node_def->mutable_input(i));
}
}
node_def->mutable_input()->Swap(copy.mutable_input());
for (int idx : inputs_to_remove) {
input_already_exists->erase(input_already_exists->begin() + idx);
}
DCHECK_EQ(input_already_exists->size(), node_def->input_size());
}
void GraphConstructor::RemapNodeDefInputs(
NodeDef* node_def, std::vector<bool>* input_already_exists) {
DCHECK_EQ(input_already_exists->size(), node_def->input_size());
std::set<TensorId> control_inputs;
std::vector<int> inputs_to_remove;
for (int i = 0; i < node_def->input_size(); ++i) {
auto iter = opts_.input_map.find(ParseTensorName(node_def->input(i)));
if (iter == opts_.input_map.end()) continue;
used_input_map_keys_.insert(iter->first);
TensorId new_input = iter->second;
if (new_input.second == Graph::kControlSlot) {
if (control_inputs.count(new_input) > 0) {
inputs_to_remove.push_back(i);
continue;
}
control_inputs.insert(new_input);
}
node_def->set_input(i, new_input.ToString());
(*input_already_exists)[i] = true;
}
if (!inputs_to_remove.empty()) {
RemoveInputs(inputs_to_remove, node_def, input_already_exists);
}
}
void GraphConstructor::AddControlDependencies(
NodeDef* node_def, std::vector<bool>* input_already_exists) {
bool inherits_deps = false;
for (int i = 0; i < node_def->input_size(); ++i) {
if ((*input_already_exists)[i]) continue;
TensorId id(ParseTensorName(node_def->input(i)));
auto iter = gdef_nodes_.find(id.first);
DCHECK(iter != gdef_nodes_.end()) << id.first;
if (iter->second.node == nullptr) {
continue;
}
inherits_deps = true;
}
if (inherits_deps) return;
for (const string& control_dep : opts_.control_dependencies) {
string input = TensorId(control_dep, Graph::kControlSlot).ToString();
bool found = false;
for (int i = node_def->input_size() - 1; i >= 0; --i) {
const string& node_input = node_def->input(i);
if (node_input[0] != '^') {
break;
}
if (node_input == input) {
found = true;
break;
}
}
if (found) {
continue;
}
node_def->add_input(input);
input_already_exists->push_back(true);
}
}
void GraphConstructor::AddPrefixToNodeDef(
const std::vector<bool>& input_already_exists, NodeDef* node_def) {
if (prefix_.empty()) return;
node_def->set_name(strings::StrCat(prefix_, node_def->name()));
for (int i = 0; i < node_def->input_size(); ++i) {
if (input_already_exists[i]) continue;
StringPiece input(node_def->input(i));
if (absl::ConsumePrefix(&input, "^")) {
node_def->set_input(i, strings::StrCat("^", prefix_, input));
} else {
node_def->set_input(i, strings::StrCat(prefix_, input));
}
}
if (node_def->attr().find(kColocationAttrName) != node_def->attr().end()) {
auto* list =
node_def->mutable_attr()->at(kColocationAttrName).mutable_list();
for (int i = 0; i < list->s_size(); ++i) {
StringPiece v(list->s(i));
if (absl::ConsumePrefix(&v, kColocationGroupPrefix)) {
list->set_s(i, strings::StrCat(kColocationGroupPrefix, prefix_, v));
}
}
}
}
void GraphConstructor::UniquifyNames(
const std::vector<bool>& input_already_exists, NodeDef* node_def) {
if (NameExistsInGraph(node_def->name())) {
string old_name = node_def->name();
node_def->set_name(FindUniqueName(node_def->name()));
uniquified_names_[old_name] = node_def->name();
}
for (int i = 0; i < node_def->input_size(); ++i) {
if (input_already_exists[i]) continue;
TensorId id = ParseTensorName(node_def->input(i));
auto iter = uniquified_names_.find(string(id.first));
if (iter == uniquified_names_.end()) continue;
id.first = iter->second;
node_def->set_input(i, id.ToString());
}
}
void GraphConstructor::UpdateUniquifiedColocationNames() {
for (const auto& pair : gdef_nodes_) {
Node* node = pair.second.node;
if (node == nullptr) continue;
std::vector<string> coloc_values;
if (!TryGetNodeAttr(node->attrs(), kColocationAttrName, &coloc_values))
continue;
bool updated = false;
for (size_t i = 0; i < coloc_values.size(); ++i) {
StringPiece val(coloc_values[i]);
if (absl::ConsumePrefix(&val, kColocationGroupPrefix)) {
auto name_pair = uniquified_names_.find(string(val));
if (name_pair == uniquified_names_.end()) continue;
updated = true;
coloc_values[i] =
strings::StrCat(kColocationGroupPrefix, name_pair->second);
}
}
if (updated) {
node->AddAttr(kColocationAttrName, std::move(coloc_values));
}
}
}
bool GraphConstructor::NameExistsInGraph(StringPiece name) {
if (existing_nodes_.find(name) != existing_nodes_.end()) return true;
if (existing_prefixes_.find(name) != existing_prefixes_.end()) return true;
return false;
}
bool GraphConstructor::NameExistsInGraphDef(StringPiece name) {
if (gdef_nodes_.find(name) != gdef_nodes_.end()) return true;
if (gdef_prefixes_.find(name) != gdef_prefixes_.end()) return true;
return false;
}
string GraphConstructor::FindUniqueName(StringPiece original_name) {
string name(original_name);
int count = 0;
while (NameExistsInGraph(name) || (count > 0 && NameExistsInGraphDef(name))) {
name = strings::StrCat(original_name, "_", ++count);
}
return name;
}
Status GraphConstructor::IsNodeFullyMapped(const NodeDef& node_def,
bool* is_node_mapped) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(g_->op_registry()->LookUpOpDef(node_def.op(), &op_def));
for (int i = 0; i < op_def->output_arg_size(); ++i) {
if (opts_.input_map.find({node_def.name(), i}) == opts_.input_map.end()) {
*is_node_mapped = false;
return absl::OkStatus();
}
}
*is_node_mapped = true;
return absl::OkStatus();
}
void GraphConstructor::DFS(int cur_node, std::vector<int>* cur_branch,
std::vector<bool>* is_on_cur_branch,
absl::flat_hash_set<int>* unvisited,
const std::vector<absl::string_view>& node_names) {
cur_branch->push_back(cur_node);
is_on_cur_branch->at(cur_node) = true;
for (auto next_node : outputs_[cur_node]) {
if (unvisited->find(next_node) != unvisited->end()) {
if (is_on_cur_branch->at(next_node)) {
auto iter =
std::find(cur_branch->begin(), cur_branch->end(), next_node);
LOG(WARNING) << "Cycle detected:";
while (iter != cur_branch->end()) {
const absl::string_view name = node_names[*iter];
DCHECK(!name.empty());
LOG(WARNING) << "node id=" << *iter << ", name=" << name;
++iter;
}
LOG(WARNING) << "End of cycle";
} else {
DFS(next_node, cur_branch, is_on_cur_branch, unvisited, node_names);
}
}
}
cur_branch->pop_back();
is_on_cur_branch->at(cur_node) = false;
unvisited->erase(cur_node);
}
void GraphConstructor::PrintCycles() {
int num_nodes = outputs_.size();
std::vector<absl::string_view> node_names;
node_names.resize(num_nodes);
for (const auto& named_node : gdef_nodes_) {
DCHECK_GE(named_node.second.gdef_index, 0);
DCHECK_LT(named_node.second.gdef_index, num_nodes);
node_names[named_node.second.gdef_index] = named_node.first;
}
absl::flat_hash_set<int> unvisited;
for (int i = 0; i < num_nodes; i++) {
unvisited.insert(i);
}
while (!unvisited.empty()) {
int cur_node = *unvisited.begin();
std::vector<int> cur_branch;
std::vector<bool> is_on_cur_branch(num_nodes, false);
DFS(cur_node, &cur_branch, &is_on_cur_branch, &unvisited, node_names);
}
}
FunctionDefLibraryStackTraces
GraphConstructor::CreateStackTracesForFunctionDefLibrary(
const FunctionDefLibrary& library) const {
if (debug_info() == nullptr) {
FunctionDefLibraryStackTraces library_traces;
return library_traces;
} else {
return FunctionLibraryDefinition::CreateStackTracesForFunctionDefLibrary(
library, *debug_info());
}
}
Status GraphConstructor::Convert() {
if (debug_info() != nullptr) {
traces_ = LoadTracesFromDebugInfo(*debug_info());
}
if (auto library = consume_library(); library.has_value()) {
FunctionDefLibraryStackTraces library_traces;
for (const FunctionDef& fdef : library->function()) {
const std::string& function_name = fdef.signature().name();
StackTracesMap& function_traces = library_traces[function_name];
std::string key_suffix = absl::StrCat("@", function_name);
for (const auto& [traces_key, stack_trace] : traces_) {
if (!absl::EndsWith(traces_key, key_suffix)) continue;
std::string node_key =
std::string(absl::StripSuffix(traces_key, key_suffix));
function_traces[node_key] = stack_trace;
}
}
TF_RETURN_IF_ERROR(
g_->AddFunctionLibrary(*std::move(library), library_traces));
}
std::vector<InputInfo> inputs;
int processed = 0;
std::vector<bool> input_already_exists;
while (!ready_.empty()) {
int o = *ready_.begin();
ready_.erase(ready_.begin());
++processed;
inputs.clear();
bool has_data_back_edge = false;
NodeDef node_def = consume_node_def(o);
input_already_exists.clear();
input_already_exists.resize(node_def.input_size(), false);
std::string node_name = node_def.name();
if (opts_.importing) {
if (opts_.skip_mapped_nodes) {
bool is_node_mapped = false;
TF_RETURN_IF_ERROR(IsNodeFullyMapped(node_def, &is_node_mapped));
if (is_node_mapped) {
UpdatePendingCountAndReady(o, IsNextIteration(node_def));
continue;
}
}
if (!opts_.input_map.empty()) {
RemapNodeDefInputs(&node_def, &input_already_exists);
}
if (!opts_.control_dependencies.empty()) {
AddControlDependencies(&node_def, &input_already_exists);
}
if (!opts_.default_device.empty() && node_def.device().empty()) {
node_def.set_device(opts_.default_device);
}
}
DCHECK_EQ(node_def.input_size(), input_already_exists.size());
TF_RETURN_IF_ERROR(ValidateColocationConstraints(node_def));
for (int i = 0; i < node_def.input_size(); ++i) {
TensorId tensor_id = ParseTensorName(node_def.input(i));
Node* src_node;
int src_index;
if (!input_already_exists[i]) {
auto iter = gdef_nodes_.find(tensor_id.node());
DCHECK(iter != gdef_nodes_.end()) << tensor_id.node();
src_node = iter->second.node;
src_index = tensor_id.index();
if (src_node == nullptr) has_data_back_edge = true;
} else {
auto iter = existing_nodes_.find(tensor_id.node());
DCHECK(iter != existing_nodes_.end()) << tensor_id.node();
src_node = iter->second;
src_index = tensor_id.index();
}
if (src_node != nullptr && src_index >= src_node->num_outputs()) {
std::ostringstream out;
out << "Node '" << node_def.name() << "': Connecting to invalid output "
<< tensor_id.index() << " of source node " << tensor_id.node()
<< " which has " << src_node->num_outputs() << " outputs.";
if (src_node->type_string() == "If" ||
src_node->type_string() == "StatelessIf" ||
src_node->type_string() == "While" ||
src_node->type_string() == "StatelessWhile") {
out << " Try using "
<< "tf.compat.v1.experimental.output_all_intermediates(True).";
}
return errors::InvalidArgument(out.str());
}
inputs.emplace_back(string(tensor_id.node()), src_node, src_index);
}
if (has_data_back_edge && !IsMerge(node_def)) {
return errors::InvalidArgument(
"Node '", node_def.name(),
"' had a back edge, but only Merge nodes can have back edges.");
}
Node* node;
if (opts_.importing) {
if (!prefix_.empty()) {
AddPrefixToNodeDef(input_already_exists, &node_def);
}
if (opts_.uniquify_names && (prefix_.empty() || !opts_.uniquify_prefix)) {
UniquifyNames(input_already_exists, &node_def);
}
}
if (opts_.importing) {
TF_RETURN_IF_ERROR(ModifyNodeDefForImport(&node_def));
} else {
const OpDef* op_def;
TF_RETURN_IF_ERROR(
g_->op_registry()->LookUpOpDef(node_def.op(), &op_def));
if (opts_.add_default_attributes) {
AddDefaultsToNodeDef(*op_def, &node_def);
}
if (opts_.validate_nodes) {
TF_RETURN_IF_ERROR(ValidateNodeDef(node_def, *op_def));
}
}
TF_RETURN_IF_ERROR(MakeNode(std::move(node_def), &node));
if (node != nullptr) {
if (traces_.contains(node_name)) {
node->SetStackTrace(traces_[node_name]);
}
}
gdef_nodes_[node_name].node = node;
auto first_control = absl::c_find_if(inputs, &InputInfo::IsControlInput);
auto first_control_copy = first_control;
std::sort(first_control, inputs.end(), &InputInfo::CompareName);
inputs.erase(
std::unique(first_control_copy, inputs.end(), &InputInfo::IsSameName),
inputs.end());
for (size_t i = 0; i < inputs.size(); ++i) {
if (inputs[i].node == nullptr) {
back_edges_.emplace_back(inputs[i].name, inputs[i].index, node, i);
} else if (inputs[i].index == Graph::kControlSlot) {
g_->AddControlEdge(inputs[i].node, node, kDoNotCheckDuplicates);
} else {
TF_RETURN_IF_ERROR(MakeEdge(inputs[i].node, inputs[i].index, node, i));
}
}
TF_RETURN_IF_ERROR(ValidateShape(node));
UpdatePendingCountAndReady(o, node->IsNextIteration());
}
if (processed < node_def_count()) {
LOG(WARNING) << "IN " << __func__ << " " << (node_def_count() - processed)
<< " NODES IN A CYCLE";
for (int64_t i = 0; i < node_def_count(); i++) {
if (pending_count_[i] != 0) {
LOG(WARNING) << "PENDING: " << SummarizeNodeDef(get_node_def(i))
<< " WITH PENDING COUNT = " << pending_count_[i];
}
}
PrintCycles();
return errors::InvalidArgument(node_def_count() - processed,
" nodes in a cycle");
}
return absl::OkStatus();
}
Status GraphConstructor::AddBackEdges() {
for (const auto& e : back_edges_) {
Node* src_node = gdef_nodes_[e.src_name].node;
if (e.src_index == Graph::kControlSlot) {
g_->AddControlEdge(src_node, e.dst_node, kDoNotCheckDuplicates);
} else {
TF_RETURN_IF_ERROR(
MakeEdge(src_node, e.src_index, e.dst_node, e.dst_index));
}
VLOG(2) << "Add back edge: " << src_node->name() << " -> "
<< e.dst_node->name();
}
return absl::OkStatus();
}
Status GraphConstructor::UpdateVersionDef() {
if (versions() == nullptr) return absl::OkStatus();
if (!opts_.importing) {
g_->set_versions(*versions());
return absl::OkStatus();
}
VersionDef g_versions = g_->versions();
g_versions.set_producer(
std::min(g_versions.producer(), versions()->producer()));
g_versions.set_min_consumer(
std::max(g_versions.min_consumer(), versions()->min_consumer()));
if (versions()->bad_consumers_size() > 0) {
std::set<int> bad(g_versions.bad_consumers().begin(),
g_versions.bad_consumers().end());
bad.insert(versions()->bad_consumers().begin(),
versions()->bad_consumers().end());
g_versions.clear_bad_consumers();
for (int v : bad) {
g_versions.add_bad_consumers(v);
}
}
g_->set_versions(g_versions);
return absl::OkStatus();
}
Status GraphConstructor::PopulateReturnTensors() {
if (opts_.return_tensors.empty()) return absl::OkStatus();
for (const TensorId& id : opts_.return_tensors) {
auto iter = opts_.input_map.find(id);
if (iter == opts_.input_map.end()) {
auto iter = gdef_nodes_.find(id.first);
if (iter == gdef_nodes_.end()) {
return errors::InvalidArgument("Requested return tensor '",
id.ToString(),
"' not found in graph def");
}
int num_outputs = iter->second.node->num_outputs();
if ((id.second < 0 || id.second >= num_outputs) &&
id.second != Graph::kControlSlot) {
return errors::InvalidArgument("Invalid return output ", id.second,
" of node '", id.first, "', which has ",
num_outputs, " output(s)");
}
return_tensors_->push_back({iter->second.node, id.second});
} else {
TensorId remapped_id = iter->second;
DCHECK_GT(existing_nodes_.count(remapped_id.first), 0);
Node* node = existing_nodes_[remapped_id.first];
return_tensors_->push_back({node, remapped_id.second});
}
}
return absl::OkStatus();
}
Status GraphConstructor::PopulateReturnNodes() {
if (opts_.return_nodes.empty()) return absl::OkStatus();
for (StringPiece name : opts_.return_nodes) {
auto iter = gdef_nodes_.find(name);
if (iter == gdef_nodes_.end()) {
return errors::InvalidArgument("Requested return node '", name,
"' not found in graph def");
}
return_nodes_->push_back(iter->second.node);
}
return absl::OkStatus();
}
Status GraphConstructor::PopulateMissingUnusedInputMapKeys() {
if (missing_unused_input_map_keys_ == nullptr) return absl::OkStatus();
for (const auto& input_map_pair : opts_.input_map) {
TensorId key = input_map_pair.first;
if (used_input_map_keys_.count(key) > 0) continue;
auto pair = gdef_nodes_.find(key.first);
if (pair == gdef_nodes_.end()) {
missing_unused_input_map_keys_->push_back(key);
continue;
}
const NodeDef& node_def = get_node_def(pair->second.gdef_index);
const OpDef* op_def;
TF_RETURN_IF_ERROR(g_->op_registry()->LookUpOpDef(node_def.op(), &op_def));
int num_outputs;
TF_RETURN_IF_ERROR(NumOutputsForNode(node_def, *op_def, &num_outputs));
if (key.second >= num_outputs) {
missing_unused_input_map_keys_->push_back(key);
}
}
return absl::OkStatus();
}
void GraphConstructor::Undo() {
for (const auto& iter : gdef_nodes_) {
if (iter.second.node != nullptr) {
g_->RemoveNode(iter.second.node);
}
}
g_->set_versions(original_versions_);
}
Status GraphConstructor::MakeEdge(Node* src, int output_index, Node* dst,
int input_index) {
if (output_index >= src->num_outputs()) {
return errors::InvalidArgument(
"Output ", output_index, " of node ", src->name(),
" does not exist. Node only has ", src->num_outputs(), " outputs.");
}
if (input_index >= dst->num_inputs()) {
return errors::InvalidArgument(
"Input ", input_index, " of node ", dst->name(),
" does not exist. Node only has ", dst->num_inputs(), " inputs.");
}
DataType src_out = src->output_type(output_index);
DataType dst_in = dst->input_type(input_index);
if (!TypesCompatible(dst_in, src_out)) {
return errors::InvalidArgument(
"Input ", input_index, " of node ", dst->name(), " was passed ",
DataTypeString(src_out), " from ", src->name(), ":", output_index,
" incompatible with expected ", DataTypeString(dst_in), ".");
}
g_->AddEdge(src, output_index, dst, input_index);
return absl::OkStatus();
}
}
Status ConvertGraphDefToGraph(const GraphConstructorOptions& opts,
const GraphDef& gdef, Graph* g) {
ShapeRefiner refiner(gdef.versions().producer(), g->op_registry());
return GraphConstructor::Construct(
opts, gdef.node(), &gdef.versions(), &gdef.library(), &gdef.debug_info(),
g, &refiner, nullptr, nullptr,
nullptr);
}
Status ConvertGraphDefToGraph(const GraphConstructorOptions& opts,
GraphDef&& gdef, Graph* g) {
ShapeRefiner refiner(gdef.versions().producer(), g->op_registry());
return GraphConstructor::Construct(opts, std::move(gdef), g, &refiner,
nullptr,
nullptr,
nullptr);
}
Status ConvertNodeDefsToGraph(const GraphConstructorOptions& opts,
absl::Span<const NodeDef> nodes, Graph* g,
const GraphDebugInfo* debug_info) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, g->op_registry());
std::vector<const NodeDef*> node_defs;
node_defs.reserve(nodes.size());
for (const auto& n : nodes) {
node_defs.push_back(&n);
}
return GraphConstructor::Construct(opts, node_defs, nullptr, nullptr,
debug_info, g, &refiner,
nullptr,
nullptr,
nullptr);
}
Status ImportGraphDef(const ImportGraphDefOptions& opts, const GraphDef& gdef,
Graph* g, ShapeRefiner* refiner,
ImportGraphDefResults* results) {
if (!opts.return_tensors.empty()) {
if (results == nullptr) {
return errors::InvalidArgument(
"results argument to ImportGraphDef() must be non-null if "
"opts.return_tensors is non-empty");
}
}
if (!opts.return_nodes.empty()) {
if (opts.skip_mapped_nodes) {
return errors::InvalidArgument(
"Requesting return_nodes with skip_mapped_nodes set is not currently "
"supported");
}
if (results == nullptr) {
return errors::InvalidArgument(
"results argument to ImportGraphDef() must be non-null if "
"opts.return_nodes is non-empty");
}
}
if (results != nullptr) {
if (!results->return_tensors.empty() || !results->return_nodes.empty() ||
!results->missing_unused_input_map_keys.empty()) {
return errors::InvalidArgument(
"All fields in results argument to ImportGraphDef() must be empty.");
}
}
ShapeRefiner default_refiner(gdef.versions().producer(), g->op_registry());
if (refiner == nullptr) {
refiner = &default_refiner;
} else {
if (gdef.versions().producer() > 0 &&
gdef.versions().producer() < refiner->graph_def_version() &&
g->num_nodes() > 2) {
LOG(WARNING) << "Importing a graph with a lower producer version "
<< gdef.versions().producer()
<< " into an existing graph with producer version "
<< refiner->graph_def_version() << ". Shape inference will "
<< "have run different parts of the graph with different "
<< "producer versions.";
}
}
refiner->set_graph_def_version(
std::min(refiner->graph_def_version(), gdef.versions().producer()));
if (results == nullptr) {
return GraphConstructor::Construct(opts, gdef.node(), &gdef.versions(),
&gdef.library(), &gdef.debug_info(), g,
refiner, nullptr, nullptr, nullptr);
} else {
return GraphConstructor::Construct(
opts, gdef.node(), &gdef.versions(), &gdef.library(),
&gdef.debug_info(), g, refiner, &results->return_tensors,
&results->return_nodes, &results->missing_unused_input_map_keys);
}
}
void CopyGraph(const Graph& src, Graph* dest) { dest->Copy(src); }
} | #include "tensorflow/core/common_runtime/graph_constructor.h"
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
class GraphConstructorTest : public ::testing::Test {
protected:
GraphConstructorTest() : graph_(OpRegistry::Global()) {}
void Convert(const string& gdef_ascii) {
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii, &gdef_));
}
void ExpectError(const string& gdef_ascii,
const std::vector<string>& expected_error_strs,
string not_expected_error_str = "") {
const string original_graph_description = GraphDebugString();
Convert(gdef_ascii);
GraphConstructorOptions opts;
Status status = ConvertGraphDefToGraph(opts, gdef_, &graph_);
EXPECT_FALSE(status.ok());
for (const string& error : expected_error_strs) {
EXPECT_TRUE(absl::StrContains(status.message(), error))
<< "Expected to find '" << error << "' in " << status;
}
if (!not_expected_error_str.empty()) {
EXPECT_TRUE(!absl::StrContains(status.message(), not_expected_error_str))
<< "Expected not to find '" << not_expected_error_str << "' in "
<< status;
}
EXPECT_EQ(original_graph_description, GraphDebugString());
}
void ExpectError(const string& gdef_ascii, const ImportGraphDefOptions& opts,
const std::vector<string>& expected_error_strs,
ShapeRefiner* refiner = nullptr,
ImportGraphDefResults* results = nullptr) {
const string original_graph_description = GraphDebugString();
Convert(gdef_ascii);
Status status = ImportGraphDef(opts, gdef_, &graph_, refiner, results);
EXPECT_FALSE(status.ok());
for (const string& error : expected_error_strs) {
EXPECT_TRUE(absl::StrContains(status.message(), error))
<< "Expected to find '" << error << "' in " << status;
}
EXPECT_EQ(original_graph_description, GraphDebugString());
}
void ExpectOK(const string& gdef_ascii) {
Convert(gdef_ascii);
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, gdef_, &graph_));
}
void ExpectOK(const string& gdef_ascii, const ImportGraphDefOptions& opts,
ShapeRefiner* refiner = nullptr,
ImportGraphDefResults* results = nullptr) {
Convert(gdef_ascii);
Status s = ImportGraphDef(opts, gdef_, &graph_, refiner, results);
EXPECT_EQ(absl::OkStatus(), s) << s;
}
void ExpectVersions(int min_consumer, int producer) {
EXPECT_EQ(min_consumer, graph_.versions().min_consumer())
<< "Expected min consumer " << min_consumer << ", got "
<< graph_.versions().min_consumer();
EXPECT_EQ(producer, graph_.versions().producer())
<< "Expected producer " << producer << ", got "
<< graph_.versions().producer();
}
Node* FindNode(const string& name) {
for (Node* n : graph_.nodes()) {
if (n->name() == name) return n;
}
return nullptr;
}
bool HasNode(const string& name) { return FindNode(name) != nullptr; }
bool HasEdge(const string& src, int src_out, const string& dst, int dst_in) {
for (const Edge* e : graph_.edges()) {
if (e->src()->name() == src && e->src_output() == src_out &&
e->dst()->name() == dst && e->dst_input() == dst_in) {
return true;
}
}
return false;
}
bool HasControlEdge(const string& src, const string& dst) {
return HasEdge(src, Graph::kControlSlot, dst, Graph::kControlSlot);
}
string ColocationGroup(const string& node) {
Node* n = nullptr;
for (Node* ni : graph_.nodes()) {
if (ni->name() == node) {
n = ni;
break;
}
}
if (n == nullptr) {
return "";
}
std::vector<string> value;
Status s = GetNodeAttr(n->attrs(), kColocationAttrName, &value);
if (!s.ok()) {
return "";
}
if (value.size() != 1) {
ADD_FAILURE()
<< "ColocationGroup was written with the assumption of at most 1 "
"value for the _class attribute. Update it and its callers";
return "";
}
StringPiece loc(value[0]);
return absl::ConsumePrefix(&loc, kColocationGroupPrefix) ? string(loc) : "";
}
string GraphDebugString() const {
return graph_.ToGraphDefDebug().DebugString();
}
Graph graph_;
private:
GraphDef gdef_;
};
Status Scalars(shape_inference::InferenceContext* c) {
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->Scalar());
}
return absl::OkStatus();
}
REGISTER_OP("ABC");
REGISTER_OP("TestParams").Output("o: float").SetShapeFn(Scalars);
REGISTER_OP("TestInput")
.Output("a: float")
.Output("b: float")
.SetShapeFn(Scalars);
REGISTER_OP("TestMul")
.Input("a: float")
.Input("b: float")
.Output("o: float")
.SetShapeFn(Scalars);
REGISTER_OP("TestInt").Input("a: int32");
REGISTER_OP("TestOneInputTwoOutputs")
.Input("x: float")
.Output("y: float")
.Output("z: float")
.SetShapeFn(Scalars);
REGISTER_OP("TestOneInputOneOutput")
.Input("x: T")
.Output("y: T")
.Attr("T: {float, int64}")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("TestVariadicOutput")
.Output("outputs: N * int32")
.Attr("N: int >= 0")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("TestDefaultAttr")
.Attr("default_int: int=31415")
.SetShapeFn(shape_inference::NoOutputs);
REGISTER_OP("RequiresCurrentGraphVersion")
.Output("version: int32")
.SetIsStateful()
.SetShapeFn([](shape_inference::InferenceContext* c) {
if (c->graph_def_version() != TF_GRAPH_DEF_VERSION) {
return errors::InvalidArgument("Wrong graph version for shape");
}
return shape_inference::ScalarShape(c);
});
TEST_F(GraphConstructorTest, InvalidNodeName) {
auto expect_invalid_name = [this](const char* name) {
ExpectError(strings::StrCat("node { name: '", name, "' op: 'ABC' }"),
{"Node name contains invalid characters"});
};
expect_invalid_name("a:b");
expect_invalid_name("_abc");
expect_invalid_name(R"(a\\b)");
expect_invalid_name("/a");
expect_invalid_name("-a");
ExpectOK("node { name: 'a-bc_' op: 'ABC' }");
ExpectOK("node { name: 'a-B.0/.c_' op: 'ABC' }");
ExpectOK("node { name: '0123' op: 'ABC' }");
ExpectOK("node { name: '.0123' op: 'ABC' }");
}
TEST_F(GraphConstructorTest, InvalidSourceNodeName) {
ExpectError(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: 'W999' input: 'input' }",
{"Unknown input node", "W999"});
}
TEST_F(GraphConstructorTest, InvalidSourceNodeIndex) {
ExpectError(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1:1', 'input:1' ] }",
{"Connecting to invalid output 1 of source node W1"});
}
TEST_F(GraphConstructorTest, GraphWithCycle) {
ExpectError(
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'input:0', 't2' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'input:1', 't1' ] }",
{"cycle"});
}
TEST_F(GraphConstructorTest, GraphWithOKCycle) {
ExpectOK(R"EOF(
node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
}
node {
name: "while/Enter"
op: "Enter"
input: "Const"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Merge"
op: "Merge"
input: "while/Enter"
input: "while/NextIteration"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Less/y"
op: "Const"
input: "^while/Merge"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 10
}
}
}
}
node {
name: "while/Less"
op: "Less"
input: "while/Merge"
input: "while/Less/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/LoopCond"
op: "LoopCond"
input: "while/Less"
}
node {
name: "while/Switch"
op: "Switch"
input: "while/Merge"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge"
}
}
}
}
node {
name: "while/Identity"
op: "Identity"
input: "while/Switch:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Add/y"
op: "Const"
input: "^while/Identity"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 1
}
}
}
}
node {
name: "while/Add"
op: "Add"
input: "while/Identity"
input: "while/Add/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration"
op: "NextIteration"
input: "while/Add"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Exit"
op: "Exit"
input: "while/Switch"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
versions {
producer: 11
}
)EOF");
}
TEST_F(GraphConstructorTest, ImportGraphThatUsesConstantValueFromInsideLoop) {
const string pb_ascii = R"EOF(
node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
}
node {
name: "Const_1"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 1
}
}
int_val: 0
}
}
}
}
node {
name: "while/Enter"
op: "Enter"
input: "Const"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Enter_1"
op: "Enter"
input: "Const_1"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Merge"
op: "Merge"
input: "while/Enter"
input: "while/NextIteration"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Merge_1"
op: "Merge"
input: "while/Enter_1"
input: "while/NextIteration_1"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Less/y"
op: "Const"
input: "^while/Merge"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 10
}
}
}
}
node {
name: "while/Less"
op: "Less"
input: "while/Merge"
input: "while/Less/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/LoopCond"
op: "LoopCond"
input: "while/Less"
}
node {
name: "while/Switch"
op: "Switch"
input: "while/Merge"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge"
}
}
}
}
node {
name: "while/Switch_1"
op: "Switch"
input: "while/Merge_1"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge_1"
}
}
}
}
node {
name: "while/Identity"
op: "Identity"
input: "while/Switch:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Identity_1"
op: "Identity"
input: "while/Switch_1:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/transpose"
op: "Transpose"
input: "while/Identity_1"
input: "while/Identity_1"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "Tperm"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration"
op: "NextIteration"
input: "while/Identity"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration_1"
op: "NextIteration"
input: "while/transpose"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Exit"
op: "Exit"
input: "while/Switch"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Exit_1"
op: "Exit"
input: "while/Switch_1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
versions {
producer: 21
}
)EOF";
GraphDef def;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(pb_ascii, &def));
ImportGraphDefOptions opts;
auto s = ImportGraphDef(opts, def, &graph_, nullptr);
ASSERT_EQ(absl::OkStatus(), s) << s;
}
TEST_F(GraphConstructorTest, TypeMismatch) {
ExpectError(
"node { name: 'input' op: 'TestInput' }"
"node { name: 'int' op: 'TestInt' input: [ 'input' ] }",
{"Input 0 of node int was passed float from input:0 incompatible with "
"expected int32."});
}
TEST_F(GraphConstructorTest, EmptyGraph) {
ExpectOK("");
ExpectVersions(0, 0);
}
TEST_F(GraphConstructorTest, VersionGraph) {
ExpectOK(strings::StrCat("versions { producer: ", TF_GRAPH_DEF_VERSION,
" min_consumer: ", TF_GRAPH_DEF_VERSION_MIN_CONSUMER,
"}"));
ExpectVersions(TF_GRAPH_DEF_VERSION_MIN_CONSUMER, TF_GRAPH_DEF_VERSION);
}
TEST_F(GraphConstructorTest, ForwardCompatError) {
ExpectError(
strings::StrCat(
"node { name: 'a:b' op: 'ABC' }\n"
"versions { producer: ",
TF_GRAPH_DEF_VERSION + 22,
" min_consumer: ", TF_GRAPH_DEF_VERSION_MIN_CONSUMER, "}"),
{"forward compatibility guarantee"});
}
TEST_F(GraphConstructorTest, NoForwardCompatError) {
ExpectError(
strings::StrCat(
"node { name: 'a:b' op: 'ABC' }\n"
"versions { producer: ",
TF_GRAPH_DEF_VERSION + 21,
" min_consumer: ", TF_GRAPH_DEF_VERSION_MIN_CONSUMER, "}"),
{"Node name contains invalid characters"},
"forward compatibility guarantee");
}
TEST_F(GraphConstructorTest, LowVersion) {
ExpectError(strings::StrCat("versions { producer: ", -1, " }"),
{strings::StrCat("GraphDef producer version -1 below min "
"producer ",
TF_GRAPH_DEF_VERSION_MIN_PRODUCER,
" supported by TensorFlow ", TF_VERSION_STRING,
". Please regenerate your graph.")});
}
TEST_F(GraphConstructorTest, HighVersion) {
const int version = TF_GRAPH_DEF_VERSION + 1;
ExpectError(strings::StrCat("versions { min_consumer: ", version, " }"),
{strings::StrCat("GraphDef min consumer version ", version,
" above current version ", TF_GRAPH_DEF_VERSION,
" for TensorFlow ", TF_VERSION_STRING,
". Please upgrade TensorFlow.")});
}
TEST_F(GraphConstructorTest, BadVersion) {
const int version = TF_GRAPH_DEF_VERSION + 1;
const int bad = TF_GRAPH_DEF_VERSION;
ExpectError(
strings::StrCat("versions { producer: ", version, " bad_consumers: ", bad,
" }"),
{strings::StrCat(
"GraphDef disallows consumer version ", bad,
". Please upgrade TensorFlow: this version is likely buggy.")});
}
TEST_F(GraphConstructorTest, SimpleModel) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }");
EXPECT_TRUE(HasNode("W1"));
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasEdge("W1", 0, "t1", 0));
EXPECT_TRUE(HasEdge("input", 1, "t1", 1));
}
TEST_F(GraphConstructorTest, SimpleModelWithControlEdges) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' input: [ '^W1' ] }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W1', 'input:1', '^t1' ] }");
EXPECT_TRUE(HasNode("W1"));
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasNode("t2"));
EXPECT_TRUE(HasEdge("W1", 0, "t1", 0));
EXPECT_TRUE(HasEdge("input", 1, "t1", 1));
EXPECT_TRUE(HasEdge("W1", 0, "t2", 0));
EXPECT_TRUE(HasEdge("input", 1, "t2", 1));
EXPECT_TRUE(HasControlEdge("W1", "input"));
EXPECT_TRUE(HasControlEdge("t1", "t2"));
}
TEST_F(GraphConstructorTest, Error_ControlEdgeBeforeRealInput) {
ExpectError(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' input: [ '^W1' ] }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W1', '^t1', 'input:1' ] }",
{"Node 't2': Control dependencies must come after regular dependencies"});
}
TEST_F(GraphConstructorTest, ImportGraphDef) {
GraphDef def;
ImportGraphDefOptions opts;
const string& source = graph_.FindNodeId(Graph::kSourceId)->name();
const string& sink = graph_.FindNodeId(Graph::kSinkId)->name();
Status s = ImportGraphDef(opts, def, &graph_, nullptr);
ASSERT_EQ(absl::OkStatus(), s) << s;
EXPECT_EQ(2, graph_.num_nodes());
EXPECT_TRUE(HasControlEdge(source, sink));
EXPECT_EQ(1, graph_.num_edges());
bool parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
node { name: "A" op: "TestParams" }
node { name: "X" op: "TestParams" }
node {
name: "B"
op: "TestOneInputTwoOutputs"
input: "A"
attr {
key: "_class"
value { list { s: "loc:@A" } }
}
}
node {
name: "C"
op: "TestOneInputTwoOutputs"
input: "B:1"
input: "^X"
}
node {
name: "D"
op: "TestMul"
input: "B:0"
input: "C:0"
})EOF",
&def);
ASSERT_TRUE(parsed);
s = ImportGraphDef(opts, def, &graph_, nullptr);
ASSERT_EQ(absl::OkStatus(), s) << s;
EXPECT_EQ(5 + 2, graph_.num_nodes());
EXPECT_EQ("A", ColocationGroup("B"));
EXPECT_TRUE(HasEdge("A", 0, "B", 0));
EXPECT_TRUE(HasEdge("B", 1, "C", 0));
EXPECT_TRUE(HasEdge("B", 0, "D", 0));
EXPECT_TRUE(HasEdge("C", 0, "D", 1));
EXPECT_TRUE(HasControlEdge("X", "C"));
EXPECT_TRUE(HasControlEdge(source, sink));
EXPECT_TRUE(HasControlEdge(source, "A"));
EXPECT_TRUE(HasControlEdge(source, "X"));
EXPECT_TRUE(HasControlEdge("D", sink));
EXPECT_EQ(9, graph_.num_edges());
s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s)) << s;
opts.prefix = "import";
s = ImportGraphDef(opts, def, &graph_, nullptr);
ASSERT_EQ(absl::OkStatus(), s) << s;
EXPECT_EQ(
10 + 2,
graph_.num_nodes());
EXPECT_EQ("A", ColocationGroup("B"));
EXPECT_EQ("import/A", ColocationGroup("import/B"));
EXPECT_TRUE(HasEdge("A", 0, "B", 0));
EXPECT_TRUE(HasEdge("B", 1, "C", 0));
EXPECT_TRUE(HasEdge("B", 0, "D", 0));
EXPECT_TRUE(HasEdge("C", 0, "D", 1));
EXPECT_TRUE(HasControlEdge("X", "C"));
EXPECT_TRUE(HasEdge("import/A", 0, "import/B", 0));
EXPECT_TRUE(HasEdge("import/B", 1, "import/C", 0));
EXPECT_TRUE(HasEdge("import/B", 0, "import/D", 0));
EXPECT_TRUE(HasEdge("import/C", 0, "import/D", 1));
EXPECT_TRUE(HasControlEdge("import/X", "import/C"));
EXPECT_TRUE(HasControlEdge(source, sink));
EXPECT_TRUE(HasControlEdge(source, "A"));
EXPECT_TRUE(HasControlEdge(source, "X"));
EXPECT_TRUE(HasControlEdge("D", sink));
EXPECT_TRUE(HasControlEdge(source, "import/A"));
EXPECT_TRUE(HasControlEdge(source, "import/X"));
EXPECT_TRUE(HasControlEdge("import/D", sink));
EXPECT_EQ(17, graph_.num_edges());
}
TEST_F(GraphConstructorTest, ImportGraphDef_DefaultAttrs) {
GraphDef def;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"node{ name:'A' op:'TestDefaultAttr'}", &def));
Status s = ImportGraphDef(ImportGraphDefOptions(), def, &graph_, nullptr);
ASSERT_EQ(absl::OkStatus(), s) << s;
Node* a = nullptr;
for (Node* n : graph_.nodes()) {
if (n->name() == "A") {
a = n;
break;
}
}
ASSERT_TRUE(a != nullptr);
int value = 0;
s = GetNodeAttr(a->attrs(), "default_int", &value);
ASSERT_EQ(absl::OkStatus(), s) << s << " -- " << a->def().DebugString();
EXPECT_EQ(31415, value);
}
TEST_F(GraphConstructorTest, ImportGraphDef_Versioning) {
GraphDef def;
const ImportGraphDefOptions opts;
def.mutable_versions()->set_producer(TF_GRAPH_DEF_VERSION_MIN_PRODUCER - 1);
Status s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s)) << s;
def.mutable_versions()->Clear();
def.mutable_versions()->set_min_consumer(TF_GRAPH_DEF_VERSION + 1);
s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s)) << s;
def.mutable_versions()->Clear();
def.mutable_versions()->add_bad_consumers(TF_GRAPH_DEF_VERSION);
s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s)) << s;
def.mutable_versions()->Clear();
graph_.ToGraphDef(&def);
s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_EQ(absl::OkStatus(), s) << s;
def.Clear();
const int original_min_consumer = graph_.versions().min_consumer();
def.mutable_versions()->set_min_consumer(original_min_consumer + 2);
def.mutable_versions()->add_bad_consumers(TF_GRAPH_DEF_VERSION - 1);
s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_EQ(absl::OkStatus(), s) << s;
EXPECT_EQ(original_min_consumer + 2, graph_.versions().min_consumer());
ASSERT_EQ(1, graph_.versions().bad_consumers_size());
EXPECT_EQ(TF_GRAPH_DEF_VERSION - 1, graph_.versions().bad_consumers(0));
}
TEST_F(GraphConstructorTest, ImportGraphDef_DeprecatedOps) {
GraphDef def;
bool parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
node {
name: "zeros"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 1
}
dim {
size: 149
}
dim {
size: 149
}
dim {
size: 32
}
}
float_val: 0.0
}
}
}
}
node {
name: "m_v_beta_gamma"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 32
}
}
tensor_content: "\265\374\010=S\250\t\276\206\371>;Z\306y>\217]@\276\347\206\202\275\3747\241\275+1\227=J1\352\275\353?H;`\253\000>\023Y\014\276\341\310L;\301\030\314;\032Kw\275\273fQ;\036\252\200=\257o/\273\377\241\247\275\307,\332\274L\255\247\274\023\331R=r\271\225<\016/\204<\364\340\375\272t\030J=\220\306}\276\276x\003\275\231\013}\276\212\034\224\276\257\020\216>A\223\217\276"
}
}
}
}
node {
name: "batchnorm"
op: "BatchNormWithGlobalNormalization"
input: "zeros"
input: "m_v_beta_gamma"
input: "m_v_beta_gamma"
input: "m_v_beta_gamma"
input: "m_v_beta_gamma"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "scale_after_normalization"
value {
b: false
}
}
attr {
key: "variance_epsilon"
value {
f: 0.0010000000475
}
}
}
)EOF",
&def);
ASSERT_TRUE(parsed);
Status s = ImportGraphDef(ImportGraphDefOptions(), def, &graph_, nullptr);
EXPECT_EQ(absl::OkStatus(), s) << s;
Graph g2(OpRegistry::Global());
def.mutable_versions()->set_producer(10);
s = ImportGraphDef(ImportGraphDefOptions(), def, &g2, nullptr);
EXPECT_EQ(error::UNIMPLEMENTED, s.code());
EXPECT_TRUE(absl::StrContains(s.message(),
"BatchNormWithGlobalNormalization is not "
"available in GraphDef version 10"))
<< s;
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMap) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'input' op: 'TestInput' }", ImportGraphDefOptions(),
&refiner);
ImportGraphDefOptions opts;
opts.input_map[TensorId("new_input", 0)] = TensorId("input", 1);
opts.input_map[TensorId("new_input", 1)] = TensorId("input", 0);
ExpectOK(
R"EOF(
node { name: 'new_input' op: 'TestInput' }
node { name: 't1' op: 'TestMul' input: [ 'new_input:0', 'new_input:1' ] }
node { name: 't2' op: 'TestMul' input: [ 't1:0', 't1:0' ] }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasNode("t2"));
EXPECT_TRUE(HasNode("new_input"));
EXPECT_TRUE(HasEdge("input", 1, "t1", 0));
EXPECT_TRUE(HasEdge("input", 0, "t1", 1));
EXPECT_FALSE(HasEdge("new_input", 0, "t1", 0));
EXPECT_FALSE(HasEdge("new_input", 0, "t1", 1));
EXPECT_TRUE(HasEdge("t1", 0, "t2", 0));
Node* t1 = FindNode("t1");
ASSERT_EQ(t1->requested_inputs().size(), 2);
ASSERT_EQ(t1->requested_inputs()[0], "input:1");
ASSERT_EQ(t1->requested_inputs()[1], "input:0");
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMapWithPrefix) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK(
"node { name: 'input' op: 'TestInput' } "
"node { name: 'unmapped_input' op: 'TestInput'}",
ImportGraphDefOptions(), &refiner);
ImportGraphDefOptions opts;
opts.input_map[TensorId("input", 0)] = TensorId("input", 0);
opts.input_map[TensorId("input", 1)] = TensorId("input", 0);
opts.prefix = "import";
ExpectOK(
R"EOF(
node { name: 'input' op: 'TestInput' }
node { name: 'unmapped_input' op: 'TestInput' }
node { name: 't1' op: 'TestMul' input: [ 'input:0', 'input:1' ] }
node { name: 't2' op: 'TestMul' input: [ 't1:0', 't1:0' ] }
node { name: 't3' op: 'TestMul' input: [ 'unmapped_input:0',
'unmapped_input:1' ] }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("unmapped_input"));
EXPECT_TRUE(HasNode("import/unmapped_input"));
EXPECT_TRUE(HasNode("import/t1"));
EXPECT_TRUE(HasNode("import/t2"));
EXPECT_TRUE(HasNode("import/input"));
EXPECT_TRUE(HasEdge("input", 0, "import/t1", 0));
EXPECT_TRUE(HasEdge("input", 0, "import/t1", 1));
EXPECT_FALSE(HasEdge("import/input", 0, "import/t1", 0));
EXPECT_FALSE(HasEdge("import/input", 0, "import/t1", 1));
EXPECT_TRUE(HasEdge("import/t1", 0, "import/t2", 0));
EXPECT_TRUE(HasEdge("import/unmapped_input", 0, "import/t3", 0));
EXPECT_TRUE(HasEdge("import/unmapped_input", 1, "import/t3", 1));
Node* t1 = FindNode("import/t1");
ASSERT_EQ(t1->requested_inputs().size(), 2);
EXPECT_EQ(t1->requested_inputs()[0], "input:0");
EXPECT_EQ(t1->requested_inputs()[1], "input:0");
Node* t2 = FindNode("import/t2");
ASSERT_EQ(t2->requested_inputs().size(), 2);
EXPECT_EQ(t2->requested_inputs()[0], "import/t1:0");
EXPECT_EQ(t2->requested_inputs()[1], "import/t1:0");
Node* t3 = FindNode("import/t3");
ASSERT_EQ(t3->requested_inputs().size(), 2);
EXPECT_EQ(t3->requested_inputs()[0], "import/unmapped_input:0");
EXPECT_EQ(t3->requested_inputs()[1], "import/unmapped_input:1");
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMapWithControlEdges) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'W1' op: 'TestParams' }", ImportGraphDefOptions(),
&refiner);
ImportGraphDefOptions opts;
const int kControlSlot = Graph::kControlSlot;
opts.input_map[TensorId("W2", kControlSlot)] = TensorId("W1", kControlSlot);
opts.input_map[TensorId("W3", kControlSlot)] = TensorId("W1", kControlSlot);
ExpectOK(
R"EOF(
node { name: 'W2' op: 'TestParams' }
node { name: 'W3' op: 'TestParams' }
node { name: 'input' op: 'TestInput' input: [ '^W2' ] }
node { name: 't1' op: 'TestOneInputTwoOutputs' input: [ 'W2' ] }
node { name: 't2' op: 'TestOneInputTwoOutputs'
input: [ 'input', '^W2', '^W3' ] }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("W1"));
EXPECT_TRUE(HasNode("W2"));
EXPECT_TRUE(HasNode("W3"));
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasNode("t2"));
EXPECT_TRUE(HasControlEdge("W1", "input"));
EXPECT_FALSE(HasControlEdge("W2", "input"));
EXPECT_TRUE(HasEdge("W2", 0, "t1", 0));
EXPECT_TRUE(HasControlEdge("W1", "t2"));
EXPECT_FALSE(HasControlEdge("W2", "t2"));
EXPECT_TRUE(HasEdge("input", 0, "t2", 0));
Node* t2 = FindNode("t2");
EXPECT_EQ(t2->in_edges().size(), 2);
opts.prefix = "import";
opts.input_map.clear();
opts.input_map[TensorId("W1", kControlSlot)] = TensorId("W1", kControlSlot);
ExpectOK(
R"EOF(
node { name: 'W1' op: 'TestParams' }
node { name: 'input' op: 'TestInput' input: [ '^W1' ] }
node { name: 't1' op: 'TestOneInputTwoOutputs' input: [ 'W1' ] }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("import/W1"));
EXPECT_TRUE(HasNode("import/input"));
EXPECT_TRUE(HasNode("import/t1"));
EXPECT_TRUE(HasControlEdge("W1", "import/input"));
EXPECT_FALSE(HasControlEdge("import/W1", "import/input"));
EXPECT_TRUE(HasEdge("import/W1", 0, "import/t1", 0));
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMapWithBadControlEdge) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'W1' op: 'TestParams' }", ImportGraphDefOptions(),
&refiner);
ImportGraphDefOptions opts;
opts.input_map[TensorId("W2", Graph::kControlSlot)] = TensorId("W1", 0);
ExpectError(
R"EOF(
node { name: 'W2' op: 'TestParams' }
node { name: 'input' op: 'TestInput' input: [ '^W2' ] }
)EOF",
opts,
{"input_map entry ^W2->W1:0 between control edge and non-control edge"},
&refiner);
opts.input_map.clear();
opts.input_map[TensorId("W2", 0)] = TensorId("W1", Graph::kControlSlot);
ExpectError(
R"EOF(
node { name: 'W2' op: 'TestParams' }
node { name: 'input' op: 'TestInput' input: [ '^W2' ] }
)EOF",
opts,
{"input_map entry W2:0->^W1 between control edge and non-control edge"},
&refiner);
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMapWithInvalidNodeIndex) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'input1' op: 'TestInput' }", ImportGraphDefOptions(),
&refiner);
ImportGraphDefOptions opts;
opts.input_map[TensorId("input2", 0)] = TensorId("input1", 3);
ExpectError(
R"EOF(
node { name: 'input2' op: 'TestInput' }
node { name: 't1' op: 'TestMul' input: [ 'input2:0', 'input2:1' ] }
)EOF",
opts,
{"Node 't1': Connecting to invalid output 3 of source node input1 which "
"has 2 outputs"},
&refiner);
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMapWithMissingEntries) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'W1' op: 'TestParams' }", ImportGraphDefOptions(),
&refiner);
ImportGraphDefOptions opts;
const int kControlSlot = Graph::kControlSlot;
opts.input_map[TensorId("W2", kControlSlot)] = TensorId("DNE", kControlSlot);
ExpectError(
R"EOF(
node { name: 'W2' op: 'TestParams' }
node { name: 'input' op: 'TestInput' input: [ '^W2' ] }
)EOF",
opts,
{"node 'DNE' in input_map does not exist in graph (input_map entry: "
"^W2->^DNE)"},
&refiner);
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMapDuplicateNodeNames) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
Node* node;
TF_CHECK_OK(NodeBuilder("dup", "Placeholder")
.Attr("dtype", DT_FLOAT)
.Finalize(&graph_, &node));
TF_CHECK_OK(NodeBuilder("dup", "Placeholder")
.Attr("dtype", DT_FLOAT)
.Finalize(&graph_, &node));
ImportGraphDefOptions opts;
opts.input_map[TensorId("new_input", 0)] = TensorId("dup", 0);
ExpectError(
R"EOF(
node { name: 'new_input' op: 'TestInput' }
node { name: 't1' op: 'TestMul' input: [ 'new_input:0', 'new_input:1' ] }
)EOF",
opts,
{"cannot resolve input_map because multiple nodes exist with name 'dup'"},
&refiner);
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMapMissingUnusedKeys) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ImportGraphDefOptions opts;
ImportGraphDefResults results;
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }",
opts, &refiner, &results);
EXPECT_TRUE(results.missing_unused_input_map_keys.empty());
results.missing_unused_input_map_keys.push_back(TensorId());
ExpectError(
"node { name: 'W2' op: 'TestParams' }", opts,
{"All fields in results argument to ImportGraphDef() must be empty."},
&refiner, &results);
const int kControlSlot = Graph::kControlSlot;
results.missing_unused_input_map_keys.clear();
opts.input_map[TensorId("W2", kControlSlot)] = TensorId("W1", kControlSlot);
opts.input_map[TensorId("new_input", 0)] = TensorId("input", 0);
opts.input_map[TensorId("new_input", 1)] = TensorId("input", 0);
opts.input_map[TensorId("new_input", 3)] = TensorId("input", 0);
opts.input_map[TensorId("DNE", 0)] = TensorId("input", 0);
opts.input_map[TensorId("t1", 0)] = TensorId("W1", 0);
opts.input_map[TensorId("variadic", 4)] = TensorId("input", 0);
ExpectOK(
R"EOF(
node { name: 'W2' op: 'TestParams' }
node { name: 'new_input' op: 'TestInput' input: [ '^W2' ] }
node { name: 't1' op: 'TestMul' input: [ 'new_input:0', 'new_input:1' ] }
node { name: 'variadic' op: 'TestVariadicOutput'
attr { key: "N" value { i: 5 } } }
)EOF",
opts, &refiner, &results);
std::set<TensorId> expected_unused_keys = {TensorId("new_input", 3),
TensorId("DNE", 0)};
ASSERT_EQ(results.missing_unused_input_map_keys.size(),
expected_unused_keys.size());
std::set<TensorId> actual_unused_keys(
results.missing_unused_input_map_keys.begin(),
results.missing_unused_input_map_keys.end());
EXPECT_EQ(actual_unused_keys, expected_unused_keys);
opts = ImportGraphDefOptions();
opts.input_map[TensorId("new_input", 0)] = TensorId("input", 0);
opts.input_map[TensorId("new_input", 1)] = TensorId("input", 1);
opts.input_map[TensorId("new_input", 2)] = TensorId("input", 1);
opts.skip_mapped_nodes = true;
opts.prefix = "import";
results = ImportGraphDefResults();
ExpectOK(
R"EOF(
node { name: 'W2' op: 'TestParams' }
node { name: 'new_input' op: 'TestInput' input: [ '^W2' ] }
node { name: 't1' op: 'TestMul' input: [ 'new_input:0', 'new_input:1' ] }
)EOF",
opts, &refiner, &results);
ASSERT_EQ(results.missing_unused_input_map_keys.size(), 1);
EXPECT_EQ(results.missing_unused_input_map_keys[0],
SafeTensorId("new_input", 2));
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMapWithUnboundInput) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'input' op: 'TestInput' }", ImportGraphDefOptions(),
&refiner);
ImportGraphDefOptions opts;
opts.input_map[TensorId("new_input", 0)] = TensorId("input", 1);
opts.input_map[TensorId("new_input", 1)] = TensorId("input", 0);
ExpectOK(
R"EOF(
node { name: 't1' op: 'TestMul' input: [ 'new_input:0', 'new_input:1' ] }
node { name: 't2' op: 'TestMul' input: [ 't1:0', 't1:0' ] }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasNode("t2"));
EXPECT_FALSE(HasNode("new_input"));
EXPECT_TRUE(HasEdge("input", 1, "t1", 0));
EXPECT_TRUE(HasEdge("input", 0, "t1", 1));
EXPECT_TRUE(HasEdge("t1", 0, "t2", 0));
Node* t1 = FindNode("t1");
ASSERT_EQ(t1->requested_inputs().size(), 2);
ASSERT_EQ(t1->requested_inputs()[0], "input:1");
ASSERT_EQ(t1->requested_inputs()[1], "input:0");
}
TEST_F(GraphConstructorTest, ImportGraphDef_SkipMappedNodes_FullyMapped) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'input' op: 'TestInput' }", ImportGraphDefOptions(),
&refiner);
ImportGraphDefOptions opts;
opts.skip_mapped_nodes = true;
opts.input_map[TensorId("new_input", 0)] = TensorId("input", 1);
opts.input_map[TensorId("new_input", 1)] = TensorId("input", 0);
ExpectOK(
R"EOF(
node { name: 'new_input' op: 'TestInput' }
node { name: 't1' op: 'TestMul' input: [ 'new_input:0', 'new_input:1' ] }
node { name: 't2' op: 'TestMul' input: [ 't1:0', 't1:0' ] }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasNode("t2"));
EXPECT_FALSE(HasNode("new_input"));
EXPECT_TRUE(HasEdge("input", 1, "t1", 0));
EXPECT_TRUE(HasEdge("input", 0, "t1", 1));
EXPECT_TRUE(HasEdge("t1", 0, "t2", 0));
Node* t1 = FindNode("t1");
ASSERT_EQ(t1->requested_inputs().size(), 2);
ASSERT_EQ(t1->requested_inputs()[0], "input:1");
ASSERT_EQ(t1->requested_inputs()[1], "input:0");
}
TEST_F(GraphConstructorTest, ImportGraphDef_SkipMappedNodes_NotFullyMapped) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'input' op: 'TestInput' }", ImportGraphDefOptions(),
&refiner);
ImportGraphDefOptions opts;
opts.skip_mapped_nodes = true;
opts.input_map[TensorId("new_input", 1)] = TensorId("input", 0);
ExpectOK(
R"EOF(
node { name: 'new_input' op: 'TestInput' }
node { name: 't1' op: 'TestMul' input: [ 'new_input:0', 'new_input:1' ] }
node { name: 't2' op: 'TestMul' input: [ 't1:0', 't1:0' ] }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasNode("t2"));
EXPECT_TRUE(HasNode("new_input"));
EXPECT_FALSE(HasEdge("input", 1, "t1", 0));
EXPECT_TRUE(HasEdge("input", 0, "t1", 1));
EXPECT_TRUE(HasEdge("new_input", 0, "t1", 0));
EXPECT_FALSE(HasEdge("new_input", 1, "t1", 1));
EXPECT_TRUE(HasEdge("t1", 0, "t2", 0));
Node* t1 = FindNode("t1");
ASSERT_EQ(t1->requested_inputs().size(), 2);
ASSERT_EQ(t1->requested_inputs()[0], "new_input:0");
ASSERT_EQ(t1->requested_inputs()[1], "input:0");
}
TEST_F(GraphConstructorTest, ImportGraphDef_ReturnTensors) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ImportGraphDefOptions opts;
opts.return_tensors.push_back({"input", 1});
opts.return_tensors.push_back({"t1", 0});
opts.return_tensors.push_back({"input", 0});
ImportGraphDefResults results;
ExpectOK(
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: ['input:0', 'input:1'] }",
opts, &refiner, &results);
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasEdge("input", 0, "t1", 0));
EXPECT_TRUE(HasEdge("input", 1, "t1", 1));
ASSERT_EQ(results.return_tensors.size(), 3);
EXPECT_EQ(results.return_tensors[0].first->name(), "input");
EXPECT_EQ(results.return_tensors[0].second, 1);
EXPECT_EQ(results.return_tensors[1].first->name(), "t1");
EXPECT_EQ(results.return_tensors[1].second, 0);
EXPECT_EQ(results.return_tensors[2].first->name(), "input");
EXPECT_EQ(results.return_tensors[2].second, 0);
opts.return_tensors.clear();
results = ImportGraphDefResults();
opts.prefix = "import";
opts.input_map[{"new_input", 1}] = {"input", 0};
opts.return_tensors.push_back({"new_input", 0});
opts.return_tensors.push_back({"new_input", 1});
ExpectOK("node { name: 'new_input' op: 'TestInput' }", opts, &refiner,
&results);
EXPECT_TRUE(HasNode("import/new_input"));
ASSERT_EQ(results.return_tensors.size(), 2);
EXPECT_EQ(results.return_tensors[0].first->name(), "import/new_input");
EXPECT_EQ(results.return_tensors[0].second, 0);
EXPECT_EQ(results.return_tensors[1].first->name(), "input");
EXPECT_EQ(results.return_tensors[1].second, 0);
opts.prefix.clear();
opts.input_map.clear();
opts.return_tensors.clear();
results = ImportGraphDefResults();
opts.input_map[{"new_input", 0}] = {"_SOURCE", 0};
opts.return_tensors.push_back({"new_input", 0});
ExpectOK("node { name: 'new_input' op: 'TestInput' }", opts, &refiner,
&results);
EXPECT_TRUE(HasNode("new_input"));
ASSERT_EQ(results.return_tensors.size(), 1);
EXPECT_EQ(results.return_tensors[0].first->name(), "_SOURCE");
EXPECT_EQ(results.return_tensors[0].second, 0);
}
TEST_F(GraphConstructorTest, ImportGraphDef_ReturnTensorsErrors) {
ImportGraphDefOptions opts;
opts.return_tensors.push_back({"new_input", 0});
ExpectError("node { name: 'new_input' op: 'TestInput' }", opts,
{"results argument to ImportGraphDef() must be non-null if "
"opts.return_tensors is non-empty"});
ImportGraphDefResults results;
results.return_tensors.push_back({nullptr, 0});
ExpectError(
"node { name: 'new_input' op: 'TestInput' }", opts,
{"All fields in results argument to ImportGraphDef() must be empty."},
nullptr, &results);
results.return_tensors.clear();
ExpectError("node { name: 'W1' op: 'TestParams' }", opts,
{"Requested return tensor 'new_input:0' not found in graph def"},
nullptr, &results);
opts.return_tensors.clear();
opts.return_tensors.push_back({"new_input", 2});
ExpectError("node { name: 'new_input' op: 'TestInput' }", opts,
{"Invalid return output 2 of node 'new_input', which has 2 "
"output(s)"},
nullptr, &results);
}
TEST_F(GraphConstructorTest, ImportGraphDef_ReturnNodes) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ImportGraphDefOptions opts;
opts.return_nodes.push_back("input");
opts.return_nodes.push_back("t1");
ImportGraphDefResults results;
ExpectOK(
"node { name: 'input' op: 'TestInput' }"
"node { name: 'input2' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: ['input:0', 'input2:1'] }",
opts, &refiner, &results);
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("input2"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasEdge("input", 0, "t1", 0));
EXPECT_TRUE(HasEdge("input2", 1, "t1", 1));
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_tensors.size(), 0);
EXPECT_EQ(results.missing_unused_input_map_keys.size(), 0);
EXPECT_EQ(results.return_nodes[0]->name(), "input");
EXPECT_EQ(results.return_nodes[1]->name(), "t1");
opts = ImportGraphDefOptions();
results = ImportGraphDefResults();
opts.prefix = "import";
opts.return_nodes.push_back("input");
ExpectOK("node { name: 'input' op: 'TestInput' }", opts, &refiner, &results);
EXPECT_TRUE(HasNode("import/input"));
ASSERT_EQ(results.return_nodes.size(), 1);
EXPECT_EQ(results.return_nodes[0]->name(), "import/input");
opts = ImportGraphDefOptions();
results = ImportGraphDefResults();
opts.input_map[{"new_input", 0}] = {"input", 0};
opts.return_nodes.push_back("new_input");
ExpectOK("node { name: 'new_input' op: 'TestInput' }", opts, &refiner,
&results);
EXPECT_TRUE(HasNode("new_input"));
ASSERT_EQ(results.return_nodes.size(), 1);
EXPECT_EQ(results.return_nodes[0]->name(), "new_input");
}
TEST_F(GraphConstructorTest, ImportGraphDef_ReturnNodesErrors) {
ImportGraphDefOptions opts;
opts.return_nodes.push_back("new_input");
ExpectError("node { name: 'new_input' op: 'TestInput' }", opts,
{"results argument to ImportGraphDef() must be non-null if "
"opts.return_nodes is non-empty"});
ImportGraphDefResults results;
results.return_nodes.push_back(nullptr);
ExpectError(
"node { name: 'new_input' op: 'TestInput' }", opts,
{"All fields in results argument to ImportGraphDef() must be empty."},
nullptr, &results);
results.return_nodes.clear();
ExpectError("node { name: 'W1' op: 'TestParams' }", opts,
{"Requested return node 'new_input' not found in graph def"},
nullptr, &results);
opts.skip_mapped_nodes = true;
ExpectError("node { name: 'new_input' op: 'TestInput' }", opts,
{"Requesting return_nodes with skip_mapped_nodes set is not "
"currently supported"});
}
TEST_F(GraphConstructorTest, ImportGraphDef_UniquifyNames) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
const char* graph_def_str =
"node { name: 'A' op: 'TestInput' }"
"node { name: 'B' op: 'TestOneInputTwoOutputs' input: ['A'] }";
ImportGraphDefOptions opts;
opts.uniquify_names = true;
opts.return_nodes.push_back("A");
opts.return_nodes.push_back("B");
ImportGraphDefResults results;
ExpectOK(graph_def_str, opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A");
EXPECT_EQ(results.return_nodes[1]->name(), "B");
EXPECT_EQ(results.return_nodes[1]->def().input(0), "A");
results = ImportGraphDefResults();
ExpectOK(graph_def_str, opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A_1");
EXPECT_EQ(results.return_nodes[1]->name(), "B_1");
EXPECT_EQ(results.return_nodes[1]->def().input(0), "A_1:0");
results = ImportGraphDefResults();
ExpectOK(graph_def_str, opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A_2");
EXPECT_EQ(results.return_nodes[1]->name(), "B_2");
EXPECT_EQ(results.return_nodes[1]->def().input(0), "A_2:0");
opts.prefix = "A";
opts.uniquify_prefix = true;
results = ImportGraphDefResults();
ExpectOK(graph_def_str, opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A_3/A");
EXPECT_EQ(results.return_nodes[1]->name(), "A_3/B");
EXPECT_EQ(results.return_nodes[1]->def().input(0), "A_3/A");
ExpectOK("node { name: 'B_3' op: 'TestInput' }");
opts.uniquify_prefix = false;
results = ImportGraphDefResults();
ExpectOK(graph_def_str, opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A/A");
EXPECT_EQ(results.return_nodes[1]->name(), "A/B");
EXPECT_EQ(results.return_nodes[1]->def().input(0), "A/A");
results = ImportGraphDefResults();
ExpectOK(graph_def_str, opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A/A_1");
EXPECT_EQ(results.return_nodes[1]->name(), "A/B_1");
EXPECT_EQ(results.return_nodes[1]->def().input(0), "A/A_1:0");
opts = ImportGraphDefOptions();
opts.uniquify_names = true;
opts.return_nodes.push_back("A_1");
opts.return_nodes.push_back("B_1");
results = ImportGraphDefResults();
ExpectOK(
"node { name: 'A_1' op: 'TestInput' }"
"node { name: 'B_1' op: 'TestOneInputTwoOutputs' input: ['A_1:0'] }",
opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A_1_1");
EXPECT_EQ(results.return_nodes[1]->name(), "B_1_1");
EXPECT_EQ(results.return_nodes[1]->def().input(0), "A_1_1:0");
opts = ImportGraphDefOptions();
opts.uniquify_names = true;
opts.return_nodes.push_back("A");
opts.return_nodes.push_back("A_4");
opts.return_nodes.push_back("B");
opts.return_nodes.push_back("B_4/B");
results = ImportGraphDefResults();
ExpectOK(
"node { name: 'A' op: 'TestInput' }"
"node { name: 'A_4' op: 'TestInput' }"
"node { name: 'B' op: 'TestOneInputTwoOutputs' input: ['A'] }"
"node { name: 'B_4/B' op: 'TestOneInputTwoOutputs' input: ['A_4'] }",
opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 4);
EXPECT_EQ(results.return_nodes[0]->name(), "A_5");
EXPECT_EQ(results.return_nodes[1]->name(), "A_4");
EXPECT_EQ(results.return_nodes[2]->name(), "B_5");
EXPECT_EQ(results.return_nodes[2]->def().input(0), "A_5:0");
EXPECT_EQ(results.return_nodes[3]->name(), "B_4/B");
EXPECT_EQ(results.return_nodes[3]->def().input(0), "A_4");
ExpectOK("node { name: 'foo/abc' op: 'ABC' }");
opts = ImportGraphDefOptions();
opts.uniquify_names = true;
opts.return_nodes.push_back("foo");
results = ImportGraphDefResults();
ExpectOK("node { name: 'foo' op: 'TestInput' }", opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 1);
EXPECT_EQ(results.return_nodes[0]->name(), "foo_1");
ExpectOK("node { name: 'outer/inner/abc' op: 'ABC' }");
opts = ImportGraphDefOptions();
opts.uniquify_names = true;
opts.return_nodes.push_back("outer");
opts.return_nodes.push_back("inner");
opts.return_nodes.push_back("abc");
opts.return_nodes.push_back("outer/inner");
opts.return_nodes.push_back("outer/inner/abc");
results = ImportGraphDefResults();
ExpectOK(
"node { name: 'outer' op: 'TestInput' }"
"node { name: 'inner' op: 'TestInput' }"
"node { name: 'abc' op: 'TestInput' }"
"node { name: 'outer/inner' op: 'TestInput' }"
"node { name: 'outer/inner/abc' op: 'TestInput' }",
opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 5);
EXPECT_EQ(results.return_nodes[0]->name(), "outer_1");
EXPECT_EQ(results.return_nodes[1]->name(), "inner");
EXPECT_EQ(results.return_nodes[2]->name(), "abc");
EXPECT_EQ(results.return_nodes[3]->name(), "outer/inner_1");
EXPECT_EQ(results.return_nodes[4]->name(), "outer/inner/abc_1");
opts = ImportGraphDefOptions();
opts.uniquify_names = true;
opts.input_map[TensorId("A", 0)] = TensorId("A", 0);
opts.input_map[TensorId("B", 0)] = TensorId("B", 0);
opts.return_nodes.push_back("A");
opts.return_nodes.push_back("B");
results = ImportGraphDefResults();
ExpectOK(graph_def_str, opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A_6");
EXPECT_EQ(results.return_nodes[1]->name(), "B_6");
EXPECT_EQ(results.return_nodes[1]->def().input(0), "A:0");
}
TEST_F(GraphConstructorTest, ImportGraphDef_UniquifyNames_ColocationGroups) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK(
"node { name: 'A' op: 'TestInput' }"
"node { name: 'B' op: 'TestOneInputTwoOutputs' input: ['A'] }");
ImportGraphDefOptions opts;
opts.uniquify_names = true;
opts.return_nodes.push_back("A");
opts.return_nodes.push_back("B");
ImportGraphDefResults results;
ExpectOK(
"node { name: 'A' op: 'TestInput' }"
"node { name: 'B' op: 'TestOneInputTwoOutputs' input: ['A:0'] "
" attr { key: '_class' value { list { s:'loc:@A' } } } }",
opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A_1");
EXPECT_EQ(results.return_nodes[1]->name(), "B_1");
const AttrValue* class_attr =
results.return_nodes[1]->attrs().Find(kColocationAttrName);
ASSERT_TRUE(class_attr != nullptr);
ASSERT_EQ(class_attr->list().s_size(), 1);
EXPECT_EQ(class_attr->list().s(0), "loc:@A_1");
results = ImportGraphDefResults();
ExpectOK(
"node { name: 'A' op: 'TestInput' "
" attr { key: '_class' value { list { s:'loc:@B' } } } }"
"node { name: 'B' op: 'TestOneInputTwoOutputs' input: ['A:0'] }",
opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A_2");
EXPECT_EQ(results.return_nodes[1]->name(), "B_2");
class_attr = results.return_nodes[0]->attrs().Find(kColocationAttrName);
ASSERT_TRUE(class_attr != nullptr);
ASSERT_EQ(class_attr->list().s_size(), 1);
EXPECT_EQ(class_attr->list().s(0), "loc:@B_2");
results = ImportGraphDefResults();
ExpectOK(
"node { name: 'A' op: 'TestInput' "
" attr { key: '_class' value { list { s:'loc:@B' } } } }"
"node { name: 'B' op: 'TestOneInputTwoOutputs' input: ['A:0'] "
" attr { key: '_class' value { list { s:'loc:@B' } } } }",
opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A_3");
EXPECT_EQ(results.return_nodes[1]->name(), "B_3");
class_attr = results.return_nodes[0]->attrs().Find(kColocationAttrName);
ASSERT_TRUE(class_attr != nullptr);
ASSERT_EQ(class_attr->list().s_size(), 1);
EXPECT_EQ(class_attr->list().s(0), "loc:@B_3");
class_attr = results.return_nodes[1]->attrs().Find(kColocationAttrName);
ASSERT_TRUE(class_attr != nullptr);
ASSERT_EQ(class_attr->list().s_size(), 1);
EXPECT_EQ(class_attr->list().s(0), "loc:@B_3");
}
TEST_F(GraphConstructorTest, ImportGraphDef_WithCycle) {
GraphDef def;
bool parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
}
node {
name: "while/Enter"
op: "Enter"
input: "Const"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Merge"
op: "Merge"
input: "while/Enter"
input: "while/NextIteration"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Less/y"
op: "Const"
input: "^while/Merge"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 10
}
}
}
}
node {
name: "while/Less"
op: "Less"
input: "while/Merge"
input: "while/Less/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/LoopCond"
op: "LoopCond"
input: "while/Less"
}
node {
name: "while/Switch"
op: "Switch"
input: "while/Merge"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge"
}
}
}
}
node {
name: "while/Identity"
op: "Identity"
input: "while/Switch:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Add/y"
op: "Const"
input: "^while/Identity"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 1
}
}
}
}
node {
name: "while/Add"
op: "Add"
input: "while/Identity"
input: "while/Add/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration"
op: "NextIteration"
input: "while/Add"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Exit"
op: "Exit"
input: "while/Switch"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
versions {
producer: 11
}
)EOF",
&def);
ASSERT_TRUE(parsed);
Status s = ImportGraphDef(ImportGraphDefOptions(), def, &graph_, nullptr);
EXPECT_EQ(absl::OkStatus(), s) << s;
}
TEST_F(GraphConstructorTest, ImportGraphDef_ControlDeps) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }",
ImportGraphDefOptions(), &refiner);
ImportGraphDefOptions opts;
opts.control_dependencies = {"W1", "W2"};
opts.prefix = "import";
opts.input_map[TensorId("W2", -1)] = TensorId("W2", -1);
opts.input_map[TensorId("W3", -1)] = TensorId("W2", -1);
ExpectOK(
R"EOF(
node { name: 'W2' op: 'TestParams' }
node { name: 'W3' op: 'TestParams' }
node { name: 'input' op: 'TestInput' }
node { name: 'input2' op: 'TestInput' input: [ '^W2' ] }
node { name: 'input3' op: 'TestInput' input: [ '^W2', '^W3' ] }
node { name: 't1' op: 'TestMul' input: [ 'input:0', 'input:1' ] }
node { name: 't2' op: 'TestMul'
input: [ 'input:0', 'input:1', '^W2', '^W3' ] }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("import/W2"));
EXPECT_TRUE(HasNode("import/W3"));
EXPECT_TRUE(HasNode("import/input"));
EXPECT_TRUE(HasNode("import/input2"));
EXPECT_TRUE(HasNode("import/input3"));
EXPECT_TRUE(HasNode("import/t1"));
EXPECT_TRUE(HasNode("import/t2"));
EXPECT_TRUE(HasControlEdge("W1", "import/W2"));
EXPECT_TRUE(HasControlEdge("W2", "import/W2"));
EXPECT_TRUE(HasControlEdge("W1", "import/W3"));
EXPECT_TRUE(HasControlEdge("W2", "import/W3"));
EXPECT_TRUE(HasControlEdge("W1", "import/input"));
EXPECT_TRUE(HasControlEdge("W2", "import/input"));
EXPECT_FALSE(HasControlEdge("W1", "import/t1"));
EXPECT_FALSE(HasControlEdge("W2", "import/t1"));
EXPECT_TRUE(HasEdge("import/input", 0, "import/t1", 0));
EXPECT_TRUE(HasEdge("import/input", 1, "import/t1", 1));
EXPECT_TRUE(HasControlEdge("W2", "import/t2"));
EXPECT_FALSE(HasControlEdge("W1", "import/t2"));
EXPECT_TRUE(HasEdge("import/input", 0, "import/t1", 0));
EXPECT_TRUE(HasEdge("import/input", 1, "import/t1", 1));
EXPECT_TRUE(HasControlEdge("W1", "import/input2"));
EXPECT_TRUE(HasControlEdge("W2", "import/input2"));
EXPECT_FALSE(HasControlEdge("import/W2", "import/input2"));
EXPECT_TRUE(HasControlEdge("W1", "import/input3"));
EXPECT_TRUE(HasControlEdge("W2", "import/input3"));
Node* w2 = FindNode("import/W2");
ASSERT_EQ(w2->requested_inputs().size(), 2);
EXPECT_EQ(w2->requested_inputs()[0], "^W1");
EXPECT_EQ(w2->requested_inputs()[1], "^W2");
Node* w3 = FindNode("import/W3");
ASSERT_EQ(w3->requested_inputs().size(), 2);
EXPECT_EQ(w3->requested_inputs()[0], "^W1");
EXPECT_EQ(w3->requested_inputs()[1], "^W2");
Node* input = FindNode("import/input");
ASSERT_EQ(input->requested_inputs().size(), 2);
EXPECT_EQ(input->requested_inputs()[0], "^W1");
EXPECT_EQ(input->requested_inputs()[1], "^W2");
Node* input2 = FindNode("import/input2");
ASSERT_EQ(input2->requested_inputs().size(), 2);
EXPECT_EQ(input2->requested_inputs()[0], "^W2");
EXPECT_EQ(input2->requested_inputs()[1], "^W1");
Node* input3 = FindNode("import/input3");
ASSERT_EQ(input3->requested_inputs().size(), 2);
EXPECT_EQ(input3->requested_inputs()[0], "^W2");
EXPECT_EQ(input3->requested_inputs()[1], "^W1");
Node* t1 = FindNode("import/t1");
ASSERT_EQ(t1->requested_inputs().size(), 2);
EXPECT_EQ(t1->requested_inputs()[0], "import/input:0");
EXPECT_EQ(t1->requested_inputs()[1], "import/input:1");
Node* t2 = FindNode("import/t2");
ASSERT_EQ(t2->requested_inputs().size(), 3);
EXPECT_EQ(t2->requested_inputs()[0], "import/input:0");
EXPECT_EQ(t2->requested_inputs()[1], "import/input:1");
EXPECT_EQ(t2->requested_inputs()[2], "^W2");
}
TEST_F(GraphConstructorTest, ImportGraphDef_ControlDepsWithCycle) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }",
ImportGraphDefOptions(), &refiner);
ImportGraphDefOptions opts;
opts.control_dependencies.push_back("W1");
opts.input_map[TensorId("new_input", 0)] = TensorId("input", 0);
ExpectOK(
R"EOF(
node { name: 'new_input' op: 'TestInput' }
node { name: 'merge' op: 'Merge' input: [ 'new_input:0', 'next:0' ]
attr { key: "N" value: { i: 2 } }
attr { key: "T" value: { type: DT_FLOAT } } }
node { name: 't1' op: 'TestMul' input: [ 'merge:0', 'merge:0' ] }
node { name: 'next' op: 'NextIteration' input: ['t1:0']
attr { key: "T" value: { type: DT_FLOAT } } }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("new_input"));
EXPECT_TRUE(HasNode("merge"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasNode("next"));
EXPECT_TRUE(HasEdge("merge", 0, "t1", 0));
EXPECT_TRUE(HasEdge("t1", 0, "next", 0));
EXPECT_TRUE(HasEdge("next", 0, "merge", 1));
EXPECT_TRUE(HasControlEdge("W1", "merge"));
EXPECT_FALSE(HasControlEdge("W1", "t1"));
Node* merge = FindNode("merge");
ASSERT_EQ(merge->requested_inputs().size(), 3);
EXPECT_EQ(merge->requested_inputs()[0], "input:0");
EXPECT_EQ(merge->requested_inputs()[1], "next:0");
EXPECT_EQ(merge->requested_inputs()[2], "^W1");
Node* t1 = FindNode("t1");
ASSERT_EQ(t1->requested_inputs().size(), 2);
EXPECT_EQ(t1->requested_inputs()[0], "merge:0");
EXPECT_EQ(t1->requested_inputs()[1], "merge:0");
Node* next = FindNode("next");
ASSERT_EQ(next->requested_inputs().size(), 1);
EXPECT_EQ(next->requested_inputs()[0], "t1:0");
}
TEST_F(GraphConstructorTest, ImportGraphDef_ControlDepsErrors) {
ImportGraphDefOptions opts;
opts.control_dependencies.push_back("W1");
ExpectError("node { name: 'W1' op: 'TestParams' }", opts,
{"node 'W1' in control_dependencies does not exist in graph"});
}
TEST_F(GraphConstructorTest, ImportGraphDef_ErrorsDoNoChangeTheGraph) {
GraphDef def;
TF_EXPECT_OK(
NodeDefBuilder("scope/A", "TestParams").Finalize(def.add_node()));
ImportGraphDefOptions opts;
const string& source = graph_.FindNodeId(Graph::kSourceId)->name();
const string& sink = graph_.FindNodeId(Graph::kSinkId)->name();
Status s = ImportGraphDef(opts, def, &graph_, nullptr);
ASSERT_EQ(absl::OkStatus(), s) << s;
EXPECT_EQ(3, graph_.num_nodes());
EXPECT_TRUE(HasControlEdge(source, sink));
EXPECT_TRUE(HasControlEdge(source, "scope/A"));
EXPECT_TRUE(HasControlEdge("scope/A", sink));
EXPECT_EQ(3, graph_.num_edges());
const string original_graph_description = GraphDebugString();
#define EXPECT_IMPORT_FAILURE(graph_def, options, expected_err) \
do { \
Status s = ImportGraphDef(options, graph_def, &graph_, nullptr); \
EXPECT_NE(OkStatus(), s) << s; \
EXPECT_TRUE(s.message().find(expected_err) != string::npos) << s; \
const string graph_description = GraphDebugString(); \
EXPECT_EQ(original_graph_description, graph_description); \
EXPECT_EQ(3, graph_.num_nodes()); \
EXPECT_TRUE(HasControlEdge(source, sink)); \
EXPECT_TRUE(HasControlEdge(source, "scope/A")); \
EXPECT_TRUE(HasControlEdge("scope/A", sink)); \
EXPECT_EQ(3, graph_.num_edges()); \
} while (0)
EXPECT_IMPORT_FAILURE(def, opts,
"Node name 'scope/A' already exists in the Graph");
GraphDef bad_def;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"node{name:'!B' op:'TestParams'}", &bad_def));
EXPECT_IMPORT_FAILURE(bad_def, opts,
"Node '!B': Node name contains invalid characters");
opts.prefix = "!bad_prefix";
EXPECT_IMPORT_FAILURE(def, opts,
"Imported node name prefix '!bad_prefix/' would lead "
"to invalid node names");
opts.prefix = "import";
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"node{name:'B' op:'SomeUnknownOp'}", &bad_def));
EXPECT_IMPORT_FAILURE(bad_def, opts,
"Op type not registered 'SomeUnknownOp'");
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"node{name:'B' op:'TestOneInputTwoOutputs' input:'C'}", &bad_def));
EXPECT_IMPORT_FAILURE(bad_def, opts, "Node 'B': Unknown input node 'C'");
bool parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
node{ name:"Root" op:"TestParams" } # TestParams produces a float
node{
name:"Integer"
op:"TestOneInputOneOutput"
attr{ key:"T" value{ type:DT_INT64 } }
input: "Root"
}
)EOF",
&bad_def);
ASSERT_TRUE(parsed);
EXPECT_IMPORT_FAILURE(bad_def, opts,
"Input 0 of node import/Integer was passed float from "
"import/Root:0 incompatible with expected int64");
parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
node{ name:"A" op:"TestParams" }
node{ name:"B" op:"TestOneInputTwoOutputs" input:"A:1" }
)EOF",
&bad_def);
ASSERT_TRUE(parsed);
EXPECT_IMPORT_FAILURE(bad_def, opts,
"Node 'B': Connecting to invalid output 1 of source "
"node A which has 1 outputs");
parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
node{ name:"A" op:"TestParams" }
node{ name:"B" op:"TestParams" }
node{ name:"C" op:"TestOneInputTwoOutputs" input:"A" input:"B" }
)EOF",
&bad_def);
ASSERT_TRUE(parsed);
EXPECT_IMPORT_FAILURE(bad_def, opts, "do not match 2 inputs specified");
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"node{ name:'A' op:'TestOneInputTwoOutputs' }", &bad_def));
EXPECT_IMPORT_FAILURE(bad_def, opts, "do not match 0 inputs specified");
parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
node{
name:"A"
op:"TestParams"
attr{
key:"_class"
value{ list{ s:"loc:@B" } }
}
})EOF",
&bad_def);
ASSERT_TRUE(parsed);
EXPECT_IMPORT_FAILURE(
bad_def, opts, "Node 'A' expects to be colocated with unknown node 'B'");
opts.prefix = "";
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"node{name:'scope/A' op:'TestParams'}", &bad_def));
EXPECT_IMPORT_FAILURE(bad_def, opts,
"Node name 'scope/A' already exists in the Graph");
parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
node { name: "A" op: "TestParams" }
node { name: "B" op: "L2Loss"
input: "A:0"
attr { key: "T" value { type: DT_FLOAT } }
attr { key: "_output_shapes"
value { list { shape { dim { size: 43 } } } } } }
)EOF",
&bad_def);
ASSERT_TRUE(parsed);
EXPECT_IMPORT_FAILURE(bad_def, opts,
"Node 'B' has an _output_shapes attribute inconsistent "
"with the GraphDef for output #0");
#undef EXPECT_IMPORT_FAILURE
}
TEST_F(GraphConstructorTest, ImportGraphDef_FunctionDefs) {
ImportGraphDefOptions opts;
ExpectOK(
R"EOF(
node {
name: "Placeholder" op: "Placeholder"
attr { key: "dtype" value { type: DT_FLOAT } }
attr { key: "shape" value { shape { } } }
}
node {
name: "Placeholder_1" op: "Placeholder"
attr { key: "dtype" value { type: DT_FLOAT } }
attr { key: "shape" value { shape { } } }
}
node {
name: "Foo_d03c39a3" op: "Foo_d03c39a3"
input: "Placeholder" input: "Placeholder_1"
}
library {
function {
signature {
name: "Foo_d03c39a3"
input_arg { name: "x" type: DT_FLOAT }
input_arg { name: "y" type: DT_FLOAT }
output_arg { name: "add" type: DT_FLOAT }
}
node_def {
name: "add" op: "Add" input: "x" input: "y"
attr { key: "T" value { type: DT_FLOAT } }
}
ret { key: "add" value: "add:z:0" }
}
function {
signature {
name: "FooGrad_dc60abc8"
input_arg { name: "x" type: DT_FLOAT }
input_arg { name: "y" type: DT_FLOAT }
input_arg { name: "dz" type: DT_FLOAT }
output_arg { name: "dz" type: DT_FLOAT }
output_arg { name: "dz_U0" type: DT_FLOAT }
}
ret { key: "dz" value: "dz:0" }
ret { key: "dz_U0" value: "dz:0" }
}
gradient {
function_name: "Foo_d03c39a3" gradient_func: "FooGrad_dc60abc8"
}
}
versions { producer: 21 min_consumer: 12 }
)EOF",
opts);
EXPECT_TRUE(HasNode("Placeholder"));
EXPECT_TRUE(HasNode("Placeholder_1"));
EXPECT_TRUE(HasNode("Foo_d03c39a3"));
const OpDef* op_def;
TF_ASSERT_OK(graph_.op_registry()->LookUpOpDef("Foo_d03c39a3", &op_def));
TF_ASSERT_OK(graph_.op_registry()->LookUpOpDef("FooGrad_dc60abc8", &op_def));
GraphDef gdef;
graph_.ToGraphDef(&gdef);
EXPECT_EQ(gdef.library().function_size(), 2);
EXPECT_EQ(gdef.library().gradient_size(), 1);
EXPECT_EQ(gdef.library().gradient()[0].function_name(), "Foo_d03c39a3");
EXPECT_EQ(gdef.library().gradient()[0].gradient_func(), "FooGrad_dc60abc8");
std::unique_ptr<Session> sess(NewSession(SessionOptions()));
TF_ASSERT_OK(sess->Create(gdef));
Tensor p1(DT_FLOAT, TensorShape({1}));
p1.scalar<float>()() = 1.0;
Tensor p2(DT_FLOAT, TensorShape({1}));
p2.scalar<float>()() = 2.0;
std::vector<std::pair<string, Tensor>> inputs = {{"Placeholder", p1},
{"Placeholder_1", p2}};
std::vector<string> output_names = {"Foo_d03c39a3"};
std::vector<string> target_names;
std::vector<Tensor> outputs;
TF_ASSERT_OK(sess->Run(inputs, output_names, target_names, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_EQ(outputs[0].scalar<float>()(), 3.0);
}
TEST_F(GraphConstructorTest, ImportGraphDef_NestedFunctionDefs) {
ImportGraphDefOptions opts;
ExpectOK(
R"EOF(
node {
name: "Placeholder" op: "Placeholder"
attr { key: "dtype" value { type: DT_FLOAT } }
attr { key: "shape" value { shape { } } }
}
node {
name: "Placeholder_1" op: "Placeholder"
attr { key: "dtype" value { type: DT_FLOAT } }
attr { key: "shape" value { shape { } } }
}
node {
name: "Outer_966fa13d" op: "Outer_966fa13d"
input: "Placeholder" input: "Placeholder_1"
}
library {
function {
signature {
name: "Outer_966fa13d"
input_arg { name: "x" type: DT_FLOAT }
input_arg { name: "y" type: DT_FLOAT }
output_arg { name: "Inner_d03c39a3" type: DT_FLOAT }
}
node_def {
name: "Inner_d03c39a3" op: "Inner_d03c39a3" input: "x" input: "y"
}
ret { key: "Inner_d03c39a3" value: "Inner_d03c39a3:add:0" }
}
function {
signature {
name: "Inner_d03c39a3"
input_arg { name: "x" type: DT_FLOAT }
input_arg { name: "y" type: DT_FLOAT }
output_arg { name: "add" type: DT_FLOAT }
}
node_def {
name: "add" op: "Add" input: "x" input: "y"
attr { key: "T" value { type: DT_FLOAT } }
}
ret { key: "add" value: "add:z:0" }
}
}
versions { producer: 21 min_consumer: 12 }
)EOF",
opts);
EXPECT_TRUE(HasNode("Placeholder"));
EXPECT_TRUE(HasNode("Placeholder_1"));
EXPECT_TRUE(HasNode("Outer_966fa13d"));
const OpDef* op_def;
Status s = graph_.op_registry()->LookUpOpDef("Inner_d03c39a3", &op_def);
ASSERT_TRUE(s.ok()) << s.message();
s = graph_.op_registry()->LookUpOpDef("Outer_966fa13d", &op_def);
ASSERT_TRUE(s.ok()) << s.message();
GraphDef gdef;
graph_.ToGraphDef(&gdef);
std::unique_ptr<Session> sess(NewSession(SessionOptions()));
s = sess->Create(gdef);
ASSERT_TRUE(s.ok()) << s.message();
Tensor p1(DT_FLOAT, TensorShape({1}));
p1.scalar<float>()() = 1.0;
Tensor p2(DT_FLOAT, TensorShape({1}));
p2.scalar<float>()() = 2.0;
std::vector<std::pair<string, Tensor>> inputs = {{"Placeholder", p1},
{"Placeholder_1", p2}};
std::vector<string> output_names = {"Outer_966fa13d"};
std::vector<string> target_names;
std::vector<Tensor> outputs;
s = sess->Run(inputs, output_names, target_names, &outputs);
ASSERT_TRUE(s.ok()) << s.message();
ASSERT_EQ(outputs.size(), 1);
EXPECT_EQ(outputs[0].scalar<float>()(), 3.0);
}
TEST_F(GraphConstructorTest, ImportGraphDef_OptionsMemMgmt) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'input' op: 'TestInput' }", ImportGraphDefOptions(),
&refiner);
char buf1[100];
char buf2[100];
char buf3[100];
snprintf(buf1, sizeof(buf1), "input");
snprintf(buf2, sizeof(buf2), "new_input");
snprintf(buf3, sizeof(buf3), "t1");
ImportGraphDefOptions opts;
opts.input_map[TensorId(buf2, 0)] = TensorId(buf1, 0);
opts.return_tensors.push_back(TensorId(buf3, 0));
snprintf(buf1, sizeof(buf1), "xxxxxxxxxxxxxxxxxxxx");
snprintf(buf2, sizeof(buf2), "xxxxxxxxxxxxxxxxxxxx");
snprintf(buf3, sizeof(buf3), "xxxxxxxxxxxxxxxxxxxx");
ImportGraphDefResults results;
ExpectOK(
R"EOF(
node { name: 'new_input' op: 'TestInput' }
node { name: 't1' op: 'TestMul' input: [ 'new_input:0', 'new_input:1' ] }
)EOF",
opts, &refiner, &results);
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("new_input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasEdge("input", 0, "t1", 0));
EXPECT_TRUE(HasEdge("new_input", 1, "t1", 1));
ASSERT_EQ(results.return_tensors.size(), 1);
EXPECT_EQ(results.return_tensors[0].first->name(), "t1");
}
TEST_F(GraphConstructorTest, CopyGraph) {
const int v = TF_GRAPH_DEF_VERSION;
const int bad = v + 17;
VersionDef versions;
versions.set_producer(v - 1);
versions.set_min_consumer(v - 2);
versions.add_bad_consumers(bad);
Graph src(OpRegistry::Global());
src.set_versions(versions);
Graph dst(OpRegistry::Global());
CopyGraph(src, &dst);
EXPECT_EQ(dst.versions().producer(), versions.producer());
EXPECT_EQ(dst.versions().min_consumer(), versions.min_consumer());
EXPECT_EQ(dst.versions().bad_consumers_size(), 1);
EXPECT_EQ(dst.versions().bad_consumers(0), bad);
}
TEST_F(GraphConstructorTest, GraphDefVersionUsedForShapeInference) {
string gdef_ascii = strings::StrCat(R"EOF(
node{ name:"A" op:"RequiresCurrentGraphVersion" }
versions { producer: )EOF",
TF_GRAPH_DEF_VERSION - 1, "}");
ImportGraphDefOptions opts;
ExpectError(gdef_ascii, opts, {"Wrong graph version for shape"});
gdef_ascii = strings::StrCat(R"EOF(
node{ name:"A" op:"RequiresCurrentGraphVersion" }
versions { producer: )EOF",
TF_GRAPH_DEF_VERSION, "}");
ExpectOK(gdef_ascii, opts);
}
TEST_F(GraphConstructorTest, GraphDefVersionMergingDuringImport) {
ImportGraphDefOptions opts;
ExpectOK(
"versions { producer: 15 min_consumer: 5 bad_consumers: 2 bad_consumers: "
"3 "
"}",
opts);
EXPECT_EQ(15, graph_.versions().producer());
EXPECT_EQ(5, graph_.versions().min_consumer());
ASSERT_EQ(2, graph_.versions().bad_consumers_size());
EXPECT_EQ(2, graph_.versions().bad_consumers(0));
EXPECT_EQ(3, graph_.versions().bad_consumers(1));
ExpectOK(
"versions { producer: 10 min_consumer: 8 bad_consumers: 1 bad_consumers: "
"3 "
"}",
opts);
EXPECT_EQ(10, graph_.versions().producer());
EXPECT_EQ(8, graph_.versions().min_consumer());
ASSERT_EQ(3, graph_.versions().bad_consumers_size());
EXPECT_EQ(1, graph_.versions().bad_consumers(0));
EXPECT_EQ(2, graph_.versions().bad_consumers(1));
EXPECT_EQ(3, graph_.versions().bad_consumers(2));
ExpectOK("versions { producer: 20 min_consumer: 7 }", opts);
EXPECT_EQ(10, graph_.versions().producer());
EXPECT_EQ(8, graph_.versions().min_consumer());
ASSERT_EQ(3, graph_.versions().bad_consumers_size());
EXPECT_EQ(1, graph_.versions().bad_consumers(0));
EXPECT_EQ(2, graph_.versions().bad_consumers(1));
EXPECT_EQ(3, graph_.versions().bad_consumers(2));
}
TEST_F(GraphConstructorTest, ImportGraphDefProvidedShapeRefinerVersions) {
ImportGraphDefOptions opts;
string gdef_ascii;
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
gdef_ascii = strings::StrCat(R"EOF(
node {
name: "Sum/input"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 1
}
}
tensor_content: "\000\000\000\001\000\000\000\002"
}
}
}
}
node {
name: "Sum/reduction_indices"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 1
}
}
tensor_content: "\000\000\000\000\000\000\000\001"
}
}
}
}
node {
name: "Sum"
op: "Sum"
input: "Sum/input"
input: "Sum/reduction_indices"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "Tidx"
value {
type: DT_INT32
}
}
attr {
key: "keep_dims"
value {
b: false
}
}
}
versions {
producer: 20
})EOF");
#else
gdef_ascii = strings::StrCat(R"EOF(
node {
name: "Sum/input"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 1
}
}
tensor_content: "\001\000\000\000\002\000\000\000"
}
}
}
}
node {
name: "Sum/reduction_indices"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 1
}
}
tensor_content: "\000\000\000\000\001\000\000\000"
}
}
}
}
node {
name: "Sum"
op: "Sum"
input: "Sum/input"
input: "Sum/reduction_indices"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "Tidx"
value {
type: DT_INT32
}
}
attr {
key: "keep_dims"
value {
b: false
}
}
}
versions {
producer: 20
})EOF");
#endif
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK(gdef_ascii, opts, &refiner);
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
gdef_ascii = strings::StrCat(R"EOF(
node {
name: "RandomConst"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 1
}
}
tensor_content: "\000\000\000\001\000\000\000\002"
}
}
}
}
versions {
producer: 21
})EOF");
#else
gdef_ascii = strings::StrCat(R"EOF(
node {
name: "RandomConst"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 1
}
}
tensor_content: "\001\000\000\000\002\000\000\000"
}
}
}
}
versions {
producer: 21
})EOF");
#endif
ExpectOK(gdef_ascii, opts, &refiner);
EXPECT_EQ(20, refiner.graph_def_version());
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
gdef_ascii = strings::StrCat(R"EOF(
node {
name: "RandomConst2"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 1
}
}
tensor_content: "\000\000\000\001\000\000\000\002"
}
}
}
}
versions {
producer: 17
})EOF");
#else
gdef_ascii = strings::StrCat(R"EOF(
node {
name: "RandomConst2"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 1
}
}
tensor_content: "\001\000\000\000\002\000\000\000"
}
}
}
}
versions {
producer: 17
})EOF");
#endif
ExpectOK(gdef_ascii, opts, &refiner);
EXPECT_EQ(17, refiner.graph_def_version());
}
TEST_F(GraphConstructorTest, ImportGraphDef_ValidateColocationConstraints) {
GraphDef def;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"node { name: 'A' op: 'TestInput' attr { key: '_class' value { list { "
"s:'loc:@missing' } } } }",
&def));
ImportGraphDefOptions options;
Status s = ImportGraphDef(options, def, &graph_, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s)) << s;
options.validate_colocation_constraints = false;
TF_EXPECT_OK(ImportGraphDef(options, def, &graph_, nullptr));
}
TEST_F(GraphConstructorTest, ImportGraphDef_ValidateDefaultDevice) {
std::string gdef_ascii(
R"EOF(
node { name: 'test_input' op: 'TestInput' }
node { name: 'test_input_with_dev' op: 'TestInput' device: 'some dev'}
node { name: 'test_op' op: 'TestMul' input: [ 'test_input:0', 'test_input:1' ] }
node { name: 'test_op_with_dev' op: 'TestMul' input: [ 'test_input:0', 'test_input:1' ] device: 'some dev'}
)EOF");
GraphDef gdef;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(gdef_ascii, &gdef));
ImportGraphDefOptions options;
options.default_device = "/gpu:13";
ImportGraphDefResults res;
TF_ASSERT_OK(ImportGraphDef(options, gdef, &graph_, nullptr, &res));
std::map<string, string> node2dev;
for (Node* n : graph_.nodes()) {
node2dev[n->name()] = n->requested_device();
}
EXPECT_EQ(node2dev["test_input"], "/gpu:13");
EXPECT_EQ(node2dev["test_op"], "/gpu:13");
EXPECT_EQ(node2dev["test_input_with_dev"], "some dev");
EXPECT_EQ(node2dev["test_op_with_dev"], "some dev");
}
TEST_F(GraphConstructorTest, ImportGraphDef_UnknownOps) {
const string pb_ascii = "node { name: 'op_from_contrib' op: 'OpFromContrib'}";
ExpectError(pb_ascii, {"Op type not registered 'OpFromContrib'"});
ExpectError(
pb_ascii,
{"Make sure the Op and Kernel are registered in the "
"binary running in this process. Note that if you "
"are loading a saved graph which used ops from "
"tf.contrib (e.g. `tf.contrib.resampler`), accessing should be done "
"before importing the graph, as contrib ops are lazily registered "
"when the module is first accessed."});
}
TEST_F(GraphConstructorTest, GraphDebugInfo_Node_StackTrace_Deserialize) {
ExpectOK(R"(
node {
name: "w1"
op: "TestParams"
}
node {
name: "input"
op: "TestInput"
}
node {
name: "t1"
op: "TestMul"
input: "w1"
input: "input:1"
}
debug_info {
files: "alpha.cc"
files: "beta.cc"
files: "gamma.cc"
traces {
key: "w1"
value {
file_line_cols {
file_index: 0
line: 20
func: "foo"
}
file_line_cols {
file_index: 1
line: 30
func: "bar"
}
}
}
traces {
key: "input"
value {
file_line_cols {
file_index: 0
line: 20
func: "foo"
}
file_line_cols {
file_index: 2
line: 35
func: "tree"
}
}
}
traces {
key: "a1@foo"
value {
file_line_cols {
file_index: 0
line: 20
func: "foo"
}
file_line_cols {
file_index: 1
line: 30
func: "bar"
}
}
}
})");
Node* w1 = FindNode("w1");
EXPECT_NE(w1, nullptr);
const std::shared_ptr<AbstractStackTrace>& w1_stack = w1->GetStackTrace();
EXPECT_NE(w1_stack, nullptr);
EXPECT_EQ(w1_stack->ToString({}),
"File \"alpha.cc\", line 20, in foo\n"
"File \"beta.cc\", line 30, in bar");
Node* input = FindNode("input");
EXPECT_NE(input, nullptr);
const std::shared_ptr<AbstractStackTrace>& input_stack =
input->GetStackTrace();
EXPECT_NE(input_stack, nullptr);
EXPECT_EQ(input_stack->ToString({}),
"File \"alpha.cc\", line 20, in foo\n"
"File \"gamma.cc\", line 35, in tree");
}
TEST_F(GraphConstructorTest,
GraphDebugInfo_Node_StackTrace_Deserialize_InvalidFileIndex) {
ExpectOK(R"(
node {
name: "w1"
op: "TestParams"
}
node {
name: "input"
op: "TestInput"
}
node {
name: "t1"
op: "TestMul"
input: "w1"
input: "input:1"
}
debug_info {
files: "alpha.cc"
files: "beta.cc"
files: "gamma.cc"
traces {
key: "w1"
value {
file_line_cols {
file_index: 2
line: 20
func: "foo"
}
file_line_cols {
file_index: -1
line: 30
func: "negative_index"
}
file_line_cols {
file_index: 3
line: 40
func: "index_ge_length"
}
}
}
})");
Node* w1 = FindNode("w1");
EXPECT_NE(w1, nullptr);
const std::shared_ptr<AbstractStackTrace>& w1_stack = w1->GetStackTrace();
EXPECT_NE(w1_stack, nullptr);
EXPECT_EQ(w1_stack->ToString({}),
"File \"gamma.cc\", line 20, in foo\n"
"File \"<UNKNOWN_FILE_NAME>\", line 30, in negative_index\n"
"File \"<UNKNOWN_FILE_NAME>\", line 40, in index_ge_length");
}
TEST_F(GraphConstructorTest,
GraphDebugInfo_FunctionLibrary_StackTrace_Deserialize) {
ExpectOK(R"(
node {
name: "a"
op: "TestParams"
}
node {
name: "b"
op: "TestInput"
}
node {
name: "t1"
op: "TestMul"
input: "a"
input: "b:1"
}
library {
function {
signature { name: "foo" }
node_def { name: "a1" }
node_def { name: "a2" }
}
function {
signature { name: "bar" }
node_def { name: "b1" }
node_def { name: "b2" }
}
}
debug_info {
files: "alpha.cc"
files: "beta.cc"
files: "gamma.cc"
files: "delta.cc"
traces {
key: "input"
value {
file_line_cols { file_index: 0 line: 20 func: "foo" }
file_line_cols { file_index: 2 line: 35 func: "tree" }
}
}
traces {
key: "a1@foo"
value {
file_line_cols { file_index: 0 line: 20 func: "jazz" }
file_line_cols { file_index: 1 line: 30 func: "buzz" }
}
}
traces {
key: "a2@foo"
value {
file_line_cols { file_index: 1 line: 25 func: "fuzz" }
file_line_cols { file_index: 2 line: 35 func: "fizz" }
}
}
traces {
key: "b1@bar"
value {
file_line_cols { file_index: 0 line: 23 func: "chez" }
file_line_cols { file_index: 3 line: 33 func: "whiz" }
}
}
traces {
key: "b2@bar"
value {
file_line_cols { file_index: 1 line: 24 func: "quip" }
file_line_cols { file_index: 3 line: 34 func: "jape" }
}
}
})");
const FunctionLibraryDefinition& flib_def = graph_.flib_def();
core::RefCountPtr<FunctionRecord> foo_function_record =
flib_def.FindRecord("foo");
EXPECT_NE(foo_function_record.get(), nullptr);
const StackTracesMap& foo_stack_traces = foo_function_record->stack_traces();
auto a1_iter = foo_stack_traces.find("a1");
EXPECT_NE(a1_iter, foo_stack_traces.end());
std::shared_ptr<AbstractStackTrace> a1_stack_trace = a1_iter->second;
EXPECT_NE(a1_stack_trace.get(), nullptr);
EXPECT_EQ(a1_stack_trace->ToString({}),
"File \"alpha.cc\", line 20, in jazz\n"
"File \"beta.cc\", line 30, in buzz");
auto a2_iter = foo_stack_traces.find("a2");
EXPECT_NE(a2_iter, foo_stack_traces.end());
std::shared_ptr<AbstractStackTrace> a2_stack_trace = a2_iter->second;
EXPECT_NE(a2_stack_trace.get(), nullptr);
EXPECT_EQ(a2_stack_trace->ToString({}),
"File \"beta.cc\", line 25, in fuzz\n"
"File \"gamma.cc\", line 35, in fizz");
core::RefCountPtr<FunctionRecord> bar_function_record =
flib_def.FindRecord("bar");
EXPECT_NE(bar_function_record.get(), nullptr);
const StackTracesMap& bar_stack_traces = bar_function_record->stack_traces();
auto b1_iter = bar_stack_traces.find("b1");
EXPECT_NE(b1_iter, bar_stack_traces.end());
std::shared_ptr<AbstractStackTrace> b1_stack_trace = b1_iter->second;
EXPECT_NE(b1_stack_trace.get(), nullptr);
EXPECT_EQ(b1_stack_trace->ToString({}),
"File \"alpha.cc\", line 23, in chez\n"
"File \"delta.cc\", line 33, in whiz");
auto b2_iter = bar_stack_traces.find("b2");
EXPECT_NE(b2_iter, bar_stack_traces.end());
std::shared_ptr<AbstractStackTrace> b2_stack_trace = b2_iter->second;
EXPECT_NE(b2_stack_trace.get(), nullptr);
EXPECT_EQ(b2_stack_trace->ToString({}),
"File \"beta.cc\", line 24, in quip\n"
"File \"delta.cc\", line 34, in jape");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/graph_constructor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/graph_constructor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
572bf3bf-5817-49f1-84e8-2c61e8d34dfe | cpp | tensorflow/tensorflow | cudnn_custom_call_converter | third_party/xla/xla/service/gpu/transforms/cudnn_custom_call_converter.cc | third_party/xla/xla/service/gpu/transforms/cudnn_custom_call_converter_test.cc | #include "xla/service/gpu/transforms/cudnn_custom_call_converter.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
namespace {
class CustomCallVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleCustomCall(HloInstruction *hlo) override {
if (hlo->custom_call_target() != kCuDnnFusionKind) {
return absl::OkStatus();
}
HloComputation *computation = hlo->GetModule()->AddEmbeddedComputation(
hlo->called_computations()[0]->Clone());
HloInstruction *fusion =
hlo->parent()->AddInstruction(HloInstruction::CreateFusion(
hlo->shape(), HloInstruction::FusionKind::kCustom, hlo->operands(),
computation));
GpuBackendConfig gpu_config;
FusionBackendConfig &backend_config =
*gpu_config.mutable_fusion_backend_config();
backend_config.set_kind(hlo->custom_call_target());
TF_RETURN_IF_ERROR(fusion->set_backend_config(gpu_config));
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, fusion));
return absl::OkStatus();
}
};
}
absl::StatusOr<bool> CuDnnCustomCallConverter::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
return CustomCallVisitor().RunOnModule(module, execution_threads);
}
}
} | #include "xla/service/gpu/transforms/cudnn_custom_call_converter.h"
#include <gtest/gtest.h>
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
using ConverterTest = HloTestBase;
TEST_F(ConverterTest, CustomCallGetsConvertedToCustomFusion) {
RunAndFilecheckHloRewrite(R"(
f {
a = s8[] parameter(0)
ROOT r = s8[] add(a, a)
}
ENTRY e {
b = s8[] parameter(0)
ROOT c = s8[] custom-call(b),
custom_call_target="__cudnn$fusion", called_computations={f}
})",
CuDnnCustomCallConverter(), R"(
; CHECK: ROOT %fusion = s8[] fusion(%b), kind=kCustom, calls=%f
; CHECK-SAME: "fusion_backend_config":{"kind":"__cudnn$fusion"}
)");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_custom_call_converter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_custom_call_converter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7606cb75-ccae-4f79-b2f0-741f5d1073cb | cpp | tensorflow/tensorflow | ir_emitter | third_party/xla/xla/service/cpu/ir_emitter.cc | third_party/xla/xla/service/cpu/ir_emitter_test.cc | #include "xla/service/cpu/ir_emitter.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cstddef>
#include <iterator>
#include <limits>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/FMF.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Value.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/map_util.h"
#include "xla/primitive_util.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/cpu_options.h"
#include "xla/service/cpu/cpu_runtime.h"
#include "xla/service/cpu/dot_op_emitter.h"
#include "xla/service/cpu/elemental_math_emitter.h"
#include "xla/service/cpu/ir_emission_utils.h"
#include "xla/service/cpu/ir_function.h"
#include "xla/service/cpu/onednn_config.pb.h"
#include "xla/service/cpu/parallel_loop_emitter.h"
#include "xla/service/elemental_ir_emitter.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/llvm_ir/buffer_assignment_util.h"
#include "xla/service/llvm_ir/dynamic_update_slice_util.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/llvm_loop.h"
#include "xla/service/llvm_ir/llvm_type_conversion_util.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/service/llvm_ir/loop_emitter.h"
#include "xla/service/llvm_ir/tuple_ops.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tsl/lib/math/math_util.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_memory_util.h"
#endif
namespace xla {
namespace {
using llvm_ir::IrName;
using llvm_ir::SetToFirstInsertPoint;
}
namespace cpu {
class IrEmitter::CpuElementalIrEmitter : public ElementalIrEmitter {
public:
CpuElementalIrEmitter(const HloModuleConfig& module_config,
IrEmitter* ir_emitter, llvm::Module* module)
: ElementalIrEmitter(
module, ir_emitter->b(),
Options{true}),
hlo_module_config_(module_config),
ir_emitter_(ir_emitter) {}
protected:
absl::StatusOr<llvm::Value*> EmitAtan2(PrimitiveType prim_type,
llvm::Value* lhs, llvm::Value* rhs,
absl::string_view) override {
return xla::cpu::EmitAtan2(module(), *b(), prim_type, lhs, rhs);
}
absl::StatusOr<llvm::Value*> EmitTanh(PrimitiveType prim_type,
llvm::Value* value) override {
return xla::cpu::EmitTanh(module(), *b(), prim_type, value);
}
absl::StatusOr<llvm::Value*> EmitErf(PrimitiveType prim_type,
llvm::Value* value) override {
return xla::cpu::EmitErf(module(), *b(), prim_type, value);
}
absl::StatusOr<std::vector<llvm::Value*>> EmitThreadLocalCall(
const HloComputation& callee, absl::Span<llvm::Value* const> parameters,
absl::string_view name, bool is_reducer) override {
return ir_emitter_->EmitThreadLocalCall(callee, parameters, name,
is_reducer);
}
bool fast_min_max() override {
return hlo_module_config_.debug_options().xla_cpu_enable_fast_min_max();
}
const HloModuleConfig& hlo_module_config_;
IrEmitter* ir_emitter_;
};
IrEmitter::IrEmitter(mlir::MLIRContext* mlir_context,
const HloModule& hlo_module,
const BufferAssignment& assignment,
llvm::Module* llvm_module,
absl::flat_hash_map<const HloInstruction*, int64_t>
instruction_to_profile_idx,
absl::flat_hash_map<const HloComputation*, int64_t>
computation_to_profile_idx,
absl::flat_hash_map<const HloComputation*, bool>
computation_transitively_contains_custom_call,
const TargetMachineFeatures* target_machine_features,
bool emit_code_for_msan)
: assignment_(assignment),
module_(llvm_module),
arch_type_(llvm::Triple(llvm_module->getTargetTriple()).getArch()),
main_builder_(llvm_module->getContext()),
current_builder_(&main_builder_),
mlir_context_(mlir_context),
instruction_to_profile_idx_(std::move(instruction_to_profile_idx)),
computation_to_profile_idx_(std::move(computation_to_profile_idx)),
computation_transitively_contains_custom_call_(
std::move(computation_transitively_contains_custom_call)),
alias_analysis_(hlo_module, assignment, &llvm_module->getContext()),
hlo_module_config_(hlo_module.config()),
is_top_level_computation_(false),
target_machine_features_(*target_machine_features),
emit_code_for_msan_(emit_code_for_msan) {
b()->setFastMathFlags(llvm_ir::GetCpuFastMathFlags(hlo_module_config_));
absl::Status s = GatherComputationsByAllocationType(
&hlo_module, &thread_local_computations_, &global_computations_);
absl::c_sort(thread_local_computations_);
absl::c_sort(global_computations_);
TF_CHECK_OK(s) << "Should have failed buffer assignment.";
}
IrEmitter::~IrEmitter() {
if (!compute_function_.empty()) {
LOG(WARNING) << "Compute function stack is not empty: "
<< compute_function_.size();
}
};
void IrEmitter::EmitThreadLocalFunctionEpilogue(HloComputation* computation) {
llvm::Argument* out_parameter = compute_function()->result_arg();
llvm_ir::IrArray root_value = GetIrArrayFor(computation->root_instruction());
const Shape& return_shape = computation->root_instruction()->shape();
if (ShapeUtil::IsScalar(return_shape)) {
llvm::Value* ret_value =
Load(root_value.GetBasePointeeType(), root_value.GetBasePointer(),
"load_ret_value");
Store(ret_value, out_parameter);
} else {
CHECK(return_shape.IsTuple());
llvm::Type* tuple_type = llvm_ir::ShapeToIrType(return_shape, module_);
for (int i = 0; i < return_shape.tuple_shapes_size(); i++) {
const Shape& element_shape = return_shape.tuple_shapes(i);
llvm::Value* destination = llvm_ir::EmitGetTupleElement(
element_shape,
i,
MinimumAlignmentForShape(element_shape), out_parameter,
tuple_type, b());
llvm::Value* source = llvm_ir::EmitGetTupleElement(
element_shape,
i,
MinimumAlignmentForShape(element_shape),
root_value.GetBasePointer(), root_value.GetBasePointeeType(), b());
Store(Load(IrShapeType(element_shape), source), destination);
}
}
}
absl::StatusOr<llvm::Function*> IrEmitter::EmitComputation(
HloComputation* computation, absl::string_view function_name_prefix,
bool is_top_level_computation,
absl::Span<HloInstruction* const> instruction_order,
bool allow_reassociation,
absl::Span<const llvm::Attribute::AttrKind> function_attributes) {
std::string function_name = name_uniquer_.GetUniqueName(function_name_prefix);
VLOG(2) << "Emitting IR for CPU function [" << function_name_prefix << "]";
is_top_level_computation_ = is_top_level_computation;
allow_reassociation_ = allow_reassociation;
num_dynamic_loop_bounds_ = 0;
auto backend_config_or =
computation->root_instruction()->backend_config<BackendConfig>();
if (backend_config_or.ok() &&
!backend_config_or->outer_dimension_partitions().empty()) {
num_dynamic_loop_bounds_ =
backend_config_or->outer_dimension_partitions().size();
}
if (computation->root_instruction()->opcode() != HloOpcode::kOutfeed) {
TF_ASSIGN_OR_RETURN(
computation_root_allocation_,
assignment_.GetUniqueTopLevelSlice(computation->root_instruction()));
}
bool has_thread_local_param = false;
for (const HloInstruction* param : computation->parameter_instructions()) {
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice param_slice,
assignment_.GetUniqueTopLevelSlice(param));
has_thread_local_param |= param_slice.allocation()->is_thread_local();
computation_parameter_allocations_[param_slice.allocation()->index()] =
param->parameter_number();
}
InitializeIrFunction(function_name);
bool use_rdtscp = arch_type_ == llvm::Triple::ArchType::x86 ||
arch_type_ == llvm::Triple::ArchType::x86_64;
profiling_state_ = ProfilingState(use_rdtscp);
tracing_state_.set_enabled(
computation->parent()->config().cpu_traceme_enabled());
llvm::IRBuilderBase::FastMathFlagGuard guard(*b());
llvm::FastMathFlags flags = b()->getFastMathFlags();
flags.setAllowReassoc(flags.allowReassoc() || allow_reassociation);
b()->setFastMathFlags(flags);
TF_RETURN_IF_ERROR(computation->AcceptOrdered(this, instruction_order));
llvm::Function* ir_function = compute_function()->function();
for (llvm::Attribute::AttrKind attr : function_attributes) {
ir_function->addFnAttr(attr);
}
InsertOrDie(&emitted_functions_,
ComputationToEmit{computation, allow_reassociation}, ir_function);
const BufferAllocation* root_allocation =
computation_root_allocation_.allocation();
if (root_allocation &&
(root_allocation->is_thread_local() ||
(root_allocation->is_constant() && has_thread_local_param))) {
EmitThreadLocalFunctionEpilogue(computation);
}
PopComputeFunction();
computation_root_allocation_ = BufferAllocation::Slice();
computation_parameter_allocations_.clear();
return ir_function;
}
void IrEmitter::InitializeIrFunction(const std::string& function_name) {
llvm::Function::LinkageTypes linkage =
is_top_level_computation_ ? llvm::GlobalValue::ExternalLinkage
: llvm::GlobalValue::InternalLinkage;
compute_function_.emplace(function_name, linkage, hlo_module_config_, module_,
b(), num_dynamic_loop_bounds_);
}
absl::Status IrEmitter::HandleBitcast(HloInstruction* bitcast) {
VLOG(2) << "HandleBitcast: " << bitcast->ToString();
emitted_value_[bitcast] = GetEmittedValueFor(bitcast->operand(0));
return absl::OkStatus();
}
llvm::Constant* IrEmitter::EmitGlobalForLiteral(const Literal& literal) {
llvm::Constant* initializer =
llvm_ir::ConvertLiteralToIrConstant(literal, module_);
llvm::GlobalVariable* result_global = new llvm::GlobalVariable(
*module_,
initializer->getType(),
true,
llvm::GlobalValue::PrivateLinkage,
initializer,
"");
result_global->setAlignment(
llvm::Align(MinimumAlignmentForShape(literal.shape())));
result_global->setUnnamedAddr(llvm::GlobalVariable::UnnamedAddr::Global);
return result_global;
}
absl::Status IrEmitter::EmitConstantGlobals() {
for (const BufferAllocation& allocation : assignment_.Allocations()) {
if (!allocation.is_constant()) {
continue;
}
const Literal& literal = llvm_ir::LiteralForConstantAllocation(allocation);
llvm::Constant* global_for_const;
auto it = emitted_literals_.find(LayoutSensitiveLiteralWrapper{literal});
if (it != emitted_literals_.end()) {
global_for_const = it->second;
} else {
global_for_const = EmitGlobalForLiteral(literal);
InsertOrDie(&emitted_literals_, LayoutSensitiveLiteralWrapper{literal},
global_for_const);
}
InsertOrDie(&constant_buffer_to_global_, allocation.index(),
global_for_const);
}
return absl::OkStatus();
}
absl::Status IrEmitter::HandleConstant(HloInstruction* constant) {
VLOG(2) << "HandleConstant: " << constant->ToString();
return EmitTargetAddressForOp(constant);
}
absl::Status IrEmitter::HandleCopy(HloInstruction* copy) {
if (copy->shape().IsTuple() ||
(copy->shape().IsArray() &&
LayoutUtil::Equal(copy->operand(0)->shape().layout(),
copy->shape().layout()))) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(copy));
return EmitMemcpy(*(copy->operand(0)), *copy);
} else if (copy->shape().IsArray()) {
return DefaultAction(copy);
}
return Unimplemented("unsupported operand type %s for copy instruction",
PrimitiveType_Name(copy->shape().element_type()));
}
int MinimumAlignmentForPrimitiveType(PrimitiveType primitive_type) {
int64_t byte_size = ShapeUtil::ByteSizeOfPrimitiveType(primitive_type);
DCHECK_GE(byte_size, 0);
DCHECK_LE(byte_size, 16);
return std::min(int64_t{8}, byte_size);
}
int IrEmitter::MinimumAlignmentForPrimitiveType(PrimitiveType primitive_type) {
return ::xla::cpu::MinimumAlignmentForPrimitiveType(primitive_type);
}
int64_t IrEmitter::ByteSizeOf(const Shape& shape) const {
return llvm_ir::ByteSizeOf(shape, module_->getDataLayout());
}
int IrEmitter::MinimumAlignmentForShape(const Shape& shape) {
if (ShapeUtil::IsScalar(shape)) {
return MinimumAlignmentForPrimitiveType(shape.element_type());
}
int64_t buffer_size = ByteSizeOf(shape);
DCHECK_GE(buffer_size, 0);
DCHECK_LE(buffer_size, SIZE_MAX);
return target_machine_features_.minimum_alignment_for_allocation(buffer_size);
}
void IrEmitter::AttachAlignmentMetadataForLoad(llvm::LoadInst* load,
const Shape& shape) {
int alignment = MinimumAlignmentForShape(shape);
if (alignment > 1) {
llvm_ir::SetAlignmentMetadataForLoad(load, alignment);
}
}
void IrEmitter::AttachAlignmentMetadataForLoad(llvm::LoadInst* load,
int64_t buffer_size) {
int alignment =
target_machine_features_.minimum_alignment_for_allocation(buffer_size);
if (alignment > 1) {
llvm_ir::SetAlignmentMetadataForLoad(load, alignment);
}
}
void IrEmitter::AttachDereferenceableMetadataForLoad(llvm::LoadInst* load,
const Shape& shape) {
AttachDereferenceableMetadataForLoad(load, ByteSizeOf(shape));
}
void IrEmitter::AttachDereferenceableMetadataForLoad(llvm::LoadInst* load,
int64_t buffer_size) {
if (buffer_size > 0) {
llvm_ir::SetDereferenceableMetadataForLoad(load, buffer_size);
}
}
void IrEmitter::AttachInvariantLoadMetadataForLoad(llvm::LoadInst* load) const {
AttachInvariantLoadMetadataForLoad(load, hlo_module_config_);
}
void IrEmitter::AttachInvariantLoadMetadataForLoad(
llvm::LoadInst* load, const HloModuleConfig& config) {
if (config.debug_options().xla_llvm_enable_invariant_load_metadata()) {
load->setMetadata(llvm::LLVMContext::MD_invariant_load,
llvm::MDNode::get(load->getContext(), {}));
}
}
absl::Status IrEmitter::HandleGetTupleElement(
HloInstruction* get_tuple_element) {
const HloInstruction* operand = get_tuple_element->operand(0);
const Shape& shape = get_tuple_element->shape();
emitted_value_[get_tuple_element] = llvm_ir::EmitGetTupleElement(
shape, get_tuple_element->tuple_index(), MinimumAlignmentForShape(shape),
GetEmittedValueFor(operand), IrShapeType(operand->shape()), b());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleSelect(HloInstruction* select) {
auto pred = select->operand(0);
TF_RET_CHECK(pred->shape().element_type() == PRED);
return DefaultAction(select);
}
absl::Status IrEmitter::HandleInfeed(HloInstruction* instruction) {
HloInfeedInstruction* infeed = Cast<HloInfeedInstruction>(instruction);
VLOG(2) << "HandleInfeed: " << infeed->ToString();
const Shape& data_shape = infeed->infeed_shape();
DCHECK(ShapeUtil::Equal(data_shape,
ShapeUtil::GetTupleElementShape(infeed->shape(), 0)));
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(infeed));
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice data_slice,
assignment_.GetUniqueSlice(infeed, {0}));
llvm::Value* data_address = EmitBufferPointer(data_slice, data_shape);
llvm::Type* data_type = IrShapeType(data_shape);
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice token_slice,
assignment_.GetUniqueSlice(infeed, {1}));
llvm::Value* token_address = EmitBufferPointer(
token_slice, ShapeUtil::GetTupleElementShape(infeed->shape(), 1));
llvm_ir::EmitTuple(GetIrArrayFor(infeed), {data_address, token_address}, b());
if (data_shape.IsTuple()) {
TF_RET_CHECK(!ShapeUtil::IsNestedTuple(data_shape));
std::vector<llvm::Value*> tuple_element_addresses;
for (int i = 0; i < data_shape.tuple_shapes_size(); ++i) {
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice buffer,
assignment_.GetUniqueSlice(infeed, {0, i}));
const Shape& tuple_element_shape =
ShapeUtil::GetTupleElementShape(data_shape, i);
llvm::Value* tuple_element_address =
EmitBufferPointer(buffer, tuple_element_shape);
TF_RETURN_IF_ERROR(EmitXfeedTransfer(
XfeedKind::kInfeed, tuple_element_shape, tuple_element_address));
tuple_element_addresses.push_back(tuple_element_address);
}
llvm_ir::EmitTuple(llvm_ir::IrArray(data_address, data_type, data_shape),
tuple_element_addresses, b());
} else {
TF_RETURN_IF_ERROR(
EmitXfeedTransfer(XfeedKind::kInfeed, data_shape, data_address));
}
return absl::OkStatus();
}
absl::Status IrEmitter::EmitXfeedTransfer(XfeedKind kind, const Shape& shape,
llvm::Value* program_buffer_address) {
int64_t length = ByteSizeOf(shape);
if (length < 0 || length > std::numeric_limits<int32_t>::max()) {
return InvalidArgument(
"xfeed (infeed or outfeed) buffer length %d is outside the valid "
"size range",
length);
}
int32_t length_32 = static_cast<int32_t>(length);
int32_t shape_length;
TF_ASSIGN_OR_RETURN(
llvm::Value * shape_ptr,
llvm_ir::EncodeSelfDescribingShapeConstant(shape, &shape_length, b()));
const char* acquire_func_name =
kind == XfeedKind::kInfeed
? runtime::kAcquireInfeedBufferForDequeueSymbolName
: runtime::kAcquireOutfeedBufferForPopulationSymbolName;
llvm::Value* acquired_pointer = EmitCallToFunc(
acquire_func_name,
{GetExecutableRunOptionsArgument(), b()->getInt32(length_32), shape_ptr,
b()->getInt32(shape_length)},
b()->getPtrTy());
if (kind == XfeedKind::kInfeed) {
MemCpy(program_buffer_address, llvm::Align(1),
acquired_pointer,
llvm::Align(1), length_32);
} else {
MemCpy(acquired_pointer, llvm::Align(1),
program_buffer_address,
llvm::Align(1), length_32);
if (emit_code_for_msan_) {
const llvm::DataLayout& dl = module_->getDataLayout();
llvm::Type* intptr_type = b()->getIntPtrTy(dl);
EmitCallToFunc(
"__msan_unpoison",
{acquired_pointer, llvm::ConstantInt::get(intptr_type, length)},
b()->getVoidTy());
}
}
const char* release_func_name =
kind == XfeedKind::kInfeed
? runtime::kReleaseInfeedBufferAfterDequeueSymbolName
: runtime::kReleaseOutfeedBufferAfterPopulationSymbolName;
EmitCallToFunc(release_func_name,
{GetExecutableRunOptionsArgument(), b()->getInt32(length_32),
acquired_pointer, shape_ptr, b()->getInt32(shape_length)},
b()->getVoidTy());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleOutfeed(HloInstruction* outfeed) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(outfeed));
HloInstruction* operand = outfeed->operands()[0];
const Shape& operand_shape = operand->shape();
llvm::Value* value = GetEmittedValueFor(operand);
if (!operand_shape.IsTuple()) {
return EmitXfeedTransfer(XfeedKind::kOutfeed, operand_shape, value);
}
TF_RET_CHECK(!ShapeUtil::IsNestedTuple(operand_shape));
for (int i = 0; i < operand_shape.tuple_shapes_size(); ++i) {
const Shape& tuple_element_shape =
ShapeUtil::GetTupleElementShape(operand_shape, i);
llvm::Value* tuple_element = llvm_ir::EmitGetTupleElement(
tuple_element_shape, i, MinimumAlignmentForShape(tuple_element_shape),
value, IrShapeType(operand_shape), b());
TF_RETURN_IF_ERROR(EmitXfeedTransfer(XfeedKind::kOutfeed,
tuple_element_shape, tuple_element));
}
return absl::OkStatus();
}
absl::Status IrEmitter::HandleSort(HloInstruction* hlo) {
const HloSortInstruction* sort = Cast<HloSortInstruction>(hlo);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(sort));
Shape keys_shape = sort->keys()->shape();
PrimitiveType keys_type = keys_shape.element_type();
if (!primitive_util::IsArrayType(keys_type)) {
return Unimplemented("Element type %s not supported in the Sort op on CPU.",
PrimitiveType_Name(keys_type));
}
std::vector<llvm::Value*> destination_addresses(sort->operand_count());
for (int64_t i = 0; i < sort->operand_count(); ++i) {
ShapeIndex shape_index =
sort->values_count() > 0 ? ShapeIndex({i}) : ShapeIndex({});
const HloInstruction* operand = sort->operand(i);
TF_RET_CHECK(
LayoutUtil::LayoutsInShapesEqual(keys_shape, operand->shape()));
TF_RET_CHECK(LayoutUtil::LayoutsInShapesEqual(
keys_shape, ShapeUtil::GetSubshape(sort->shape(), shape_index)));
auto destination_buffer = GetAllocationSlice(*sort, shape_index);
destination_addresses[i] =
EmitBufferPointer(destination_buffer, operand->shape());
auto source_address = GetAllocationSlice(*operand);
if (destination_buffer != source_address) {
int64_t primitive_type_size =
ShapeUtil::ByteSizeOfPrimitiveType(operand->shape().element_type());
auto source_buffer = GetEmittedValueFor(operand);
int64_t size = ByteSizeOf(operand->shape());
MemCpy(destination_addresses[i],
llvm::Align(primitive_type_size), source_buffer,
llvm::Align(primitive_type_size), size);
}
}
Shape normalized_keys_shape =
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(keys_shape);
auto logical_to_physical =
LayoutUtil::MakeLogicalToPhysical(keys_shape.layout());
TF_RET_CHECK(sort->sort_dimension() < logical_to_physical.size());
int64_t physical_dimension_to_sort =
logical_to_physical[sort->sort_dimension()];
int64_t sort_dimension_elements =
normalized_keys_shape.dimensions(physical_dimension_to_sort);
int64_t higher_dimensions = 1;
for (int64_t i = 0; i < physical_dimension_to_sort; ++i) {
higher_dimensions *= normalized_keys_shape.dimensions(i);
}
int64_t lower_dimensions = 1;
for (int64_t i = normalized_keys_shape.rank() - 1;
i > physical_dimension_to_sort; --i) {
lower_dimensions *= normalized_keys_shape.dimensions(i);
}
CHECK(absl::c_binary_search(thread_local_computations_, sort->to_apply()));
llvm::Value* values = llvm_ir::EmitAllocaAtFunctionEntryWithCount(
b()->getPtrTy(), b()->getInt32(sort->operand_count()), "cc_values_alloca",
b());
llvm::Value* sizes = llvm_ir::EmitAllocaAtFunctionEntryWithCount(
b()->getInt32Ty(), b()->getInt32(sort->operand_count()),
"cc_sizes_alloca", b());
for (int64_t i = 0; i < sort->operand_count(); ++i) {
llvm::Value* slot_in_values_alloca =
ConstInBoundsGEP1_32(b()->getPtrTy(), values, i);
Store(destination_addresses[i], slot_in_values_alloca);
llvm::Value* slot_in_sizes_alloca =
ConstInBoundsGEP1_32(b()->getInt32Ty(), sizes, i);
llvm::Value* size = b()->getInt32(ShapeUtil::ByteSizeOfPrimitiveType(
sort->operand(i)->shape().element_type()));
Store(size, slot_in_sizes_alloca);
}
auto less_than_function =
FindOrDie(emitted_functions_,
ComputationToEmit{sort->to_apply(), allow_reassociation_});
EmitCallToFunc(
runtime::kKeyValueSortSymbolName,
{b()->getInt64(higher_dimensions), b()->getInt64(sort_dimension_elements),
b()->getInt64(lower_dimensions), values,
b()->getInt32(sort->operand_count()), sizes,
b()->getInt1(sort->is_stable()), GetExecutableRunOptionsArgument(),
GetProfileCountersArgument(), less_than_function},
b()->getVoidTy());
if (sort->values_count() > 0) {
llvm_ir::EmitTuple(GetIrArrayFor(sort), destination_addresses, b());
}
return absl::OkStatus();
}
absl::Status IrEmitter::HandleTuple(HloInstruction* tuple) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(tuple));
llvm::SmallVector<llvm::Value*> base_ptrs;
for (auto operand : tuple->operands()) {
base_ptrs.push_back(GetEmittedValueFor(operand));
}
llvm_ir::EmitTuple(GetIrArrayFor(tuple), base_ptrs, b());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleReduceWindow(HloInstruction* reduce_window) {
bool saved_allow_reassociation = allow_reassociation_;
allow_reassociation_ = true;
absl::Status status = DefaultAction(reduce_window);
allow_reassociation_ = saved_allow_reassociation;
return status;
}
absl::Status IrEmitter::HandleSelectAndScatter(
HloInstruction* select_and_scatter) {
CHECK_EQ(select_and_scatter->operand_count(), 3);
const auto operand = select_and_scatter->operand(0);
const auto source = select_and_scatter->operand(1);
return HandleSelectAndScatter(select_and_scatter, GetIrArrayFor(operand),
GetIrArrayFor(source),
GetIrArrayFor(select_and_scatter));
}
absl::Status IrEmitter::HandleSelectAndScatter(
HloInstruction* select_and_scatter, const llvm_ir::IrArray& operand_array,
const llvm_ir::IrArray& source_array,
const llvm_ir::IrArray& output_array) {
CHECK_EQ(select_and_scatter->operand_count(), 3);
const auto operand = select_and_scatter->operand(0);
const auto source = select_and_scatter->operand(1);
const auto init_value = select_and_scatter->operand(2);
const Window& window = select_and_scatter->window();
PrimitiveType operand_element_type = operand->shape().element_type();
const int64_t rank = operand->shape().rank();
CHECK_EQ(rank, source->shape().rank());
CHECK_EQ(rank, window.dimensions_size());
if (window_util::HasDilation(window)) {
return Unimplemented(
"Dilation for SelectAndScatter is not implemented on CPU. ");
}
TF_RETURN_IF_ERROR(EmitTargetElementLoop(
select_and_scatter, IrName(select_and_scatter, "init"),
[this, init_value](const llvm_ir::IrArray::Index& target_index) {
llvm::Value* init_value_addr = GetEmittedValueFor(init_value);
return Load(IrShapeType(init_value->shape()), init_value_addr);
},
std::optional<llvm_ir::IrArray>(output_array)));
llvm_ir::ForLoopNest source_loops(IrName(select_and_scatter), b());
const llvm_ir::IrArray::Index source_index =
source_loops.AddLoopsForShape(source->shape(), "source");
SetToFirstInsertPoint(source_loops.GetInnerLoopBodyBasicBlock(), b());
llvm::AllocaInst* selected_value_address = llvm_ir::EmitAllocaAtFunctionEntry(
llvm_ir::PrimitiveTypeToIrType(operand_element_type, module_),
"selected_value_address", b(),
MinimumAlignmentForPrimitiveType(operand_element_type));
llvm::AllocaInst* selected_index_address =
llvm_ir::EmitAllocaAtFunctionEntryWithCount(
b()->getInt64Ty(), b()->getInt32(rank), "selected_index_address",
b());
llvm::AllocaInst* initialized_flag_address =
llvm_ir::EmitAllocaAtFunctionEntry(b()->getInt1Ty(),
"initialized_flag_address", b());
Store(b()->getInt1(false), initialized_flag_address);
llvm_ir::ForLoopNest window_loops(IrName(select_and_scatter, "window"), b());
llvm::SmallVector<int64_t> window_size;
for (const auto& dim : window.dimensions()) {
window_size.push_back(dim.size());
}
const llvm_ir::IrArray::Index window_index = window_loops.AddLoopsForShape(
ShapeUtil::MakeShape(operand_element_type, window_size), "window");
SetToFirstInsertPoint(window_loops.GetInnerLoopBodyBasicBlock(), b());
llvm::SmallVector<llvm::Value*> operand_multi_index(source_index.size());
llvm::Value* in_bounds_condition = b()->getTrue();
for (int64_t i = 0; i < rank; ++i) {
llvm::Value* strided_index =
NSWMul(source_index[i], b()->getInt64(window.dimensions(i).stride()));
operand_multi_index[i] =
NSWSub(NSWAdd(strided_index, window_index[i]),
b()->getInt64(window.dimensions(i).padding_low()));
llvm::Value* index_condition =
ICmpULT(operand_multi_index[i],
b()->getInt64(ShapeUtil::GetDimension(operand->shape(), i)));
in_bounds_condition = And(in_bounds_condition, index_condition);
}
CHECK(in_bounds_condition != nullptr);
llvm_ir::LlvmIfData if_in_bounds =
llvm_ir::EmitIfThenElse(in_bounds_condition, "in-bounds", b());
SetToFirstInsertPoint(if_in_bounds.true_block, b());
llvm_ir::LlvmIfData if_initialized =
llvm_ir::EmitIfThenElse(Load(initialized_flag_address->getAllocatedType(),
initialized_flag_address),
"initialized", b());
SetToFirstInsertPoint(if_initialized.false_block, b());
const auto save_operand_index =
[&](const llvm_ir::IrArray::Index& operand_index) {
for (int64_t i = 0; i < rank; ++i) {
llvm::Value* selected_index_address_slot =
InBoundsGEP(selected_index_address->getAllocatedType(),
selected_index_address, {b()->getInt32(i)});
Store(operand_index[i], selected_index_address_slot);
}
};
llvm_ir::IrArray::Index operand_index(
operand_multi_index, operand_array.GetShape(), b()->getInt64Ty());
llvm::Value* operand_data =
operand_array.EmitReadArrayElement(operand_index, b());
Store(operand_data, selected_value_address);
save_operand_index(operand_index);
Store(b()->getInt1(true), initialized_flag_address);
SetToFirstInsertPoint(if_initialized.true_block, b());
llvm::Value* operand_address =
operand_array.EmitArrayElementAddress(operand_index, b());
llvm::Value* operand_element =
Load(operand_array.GetElementLlvmType(), operand_address);
llvm::Value* result = EmitScalarReturningThreadLocalCall(
*select_and_scatter->select(),
{Load(selected_value_address->getAllocatedType(), selected_value_address),
operand_element},
"select_function");
llvm::Value* cond = ICmpNE(
result,
llvm::ConstantInt::get(llvm_ir::PrimitiveTypeToIrType(PRED, module_), 0),
"boolean_predicate");
llvm_ir::LlvmIfData if_select_lhs =
llvm_ir::EmitIfThenElse(cond, "if-select-lhs", b());
SetToFirstInsertPoint(if_select_lhs.false_block, b());
Store(Load(operand_array.GetElementLlvmType(), operand_address),
selected_value_address);
save_operand_index(operand_index);
SetToFirstInsertPoint(window_loops.GetOuterLoopExitBasicBlock(), b());
llvm::SmallVector<llvm::Value*> selected_multi_index;
for (int64_t i = 0; i < rank; ++i) {
const std::vector<llvm::Value*> gep_index = {b()->getInt32(i)};
llvm::Value* selected_index_address_slot =
InBoundsGEP(selected_index_address->getAllocatedType(),
selected_index_address, gep_index);
llvm::Type* type = llvm::GetElementPtrInst::getIndexedType(
selected_index_address->getAllocatedType(), gep_index);
selected_multi_index.push_back(Load(type, selected_index_address_slot));
}
llvm::Value* source_value =
source_array.EmitReadArrayElement(source_index, b());
llvm_ir::IrArray::Index selected_index(
selected_multi_index, output_array.GetShape(), source_index.GetType());
llvm::Value* output_value =
output_array.EmitReadArrayElement(selected_index, b());
llvm::Value* scatter_value = EmitScalarReturningThreadLocalCall(
*select_and_scatter->scatter(), {output_value, source_value},
"scatter_function");
output_array.EmitWriteArrayElement(selected_index, scatter_value, b());
SetToFirstInsertPoint(source_loops.GetOuterLoopExitBasicBlock(), b());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleDot(HloInstruction* dot) {
auto lhs = dot->operand(0);
auto rhs = dot->operand(1);
TF_RETURN_IF_ERROR(ElementTypesSameAndSupported(
*dot, {lhs, rhs},
{PRED, S8, U8, S16, U16, S32, U32, S64, U64, F16, F32, F64, C64, C128}));
const DotDimensionNumbers& dnums = dot->dot_dimension_numbers();
if (dnums.lhs_contracting_dimensions_size() != 1) {
return Unimplemented(
"Dot with multiple contracting dimensions not implemented.");
}
llvm_ir::IrArray lhs_array(GetIrArrayFor(lhs));
llvm_ir::IrArray rhs_array(GetIrArrayFor(rhs));
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(dot));
llvm_ir::IrArray target_array = GetIrArrayFor(dot);
VLOG(2) << "HandleDot: ";
VLOG(2) << " lhs operand: "
<< llvm_ir::DumpToString(lhs_array.GetBasePointer());
VLOG(2) << " rhs operand: "
<< llvm_ir::DumpToString(rhs_array.GetBasePointer());
VLOG(2) << " target: "
<< llvm_ir::DumpToString(target_array.GetBasePointer());
return EmitDotOperation(*dot, target_array, lhs_array, rhs_array,
nullptr,
GetExecutableRunOptionsArgument(), b(),
hlo_module_config_, target_machine_features_);
}
absl::Status IrEmitter::HandleConvolution(HloInstruction* convolution) {
auto lhs = convolution->operand(0);
auto rhs = convolution->operand(1);
TF_RETURN_IF_ERROR(ElementTypesSameAndSupported(
*convolution, {lhs, rhs},
{PRED, S8, U8, S16, U16, S32, U32, S64, U64, F16, F32, F64, C64, C128}));
if (PotentiallyImplementedAsEigenConvolution(*convolution,
target_machine_features_)) {
const Shape& lhs_shape = lhs->shape();
const Shape& rhs_shape = rhs->shape();
const Shape& convolution_shape = convolution->shape();
if (LayoutUtil::IsMonotonicWithDim0Major(lhs_shape.layout()) &&
LayoutUtil::IsMonotonicWithDim0Major(rhs_shape.layout()) &&
LayoutUtil::IsMonotonicWithDim0Major(convolution_shape.layout())) {
bool one_dim_convolution = lhs_shape.dimensions_size() == 3;
llvm::Value* lhs_address = GetEmittedValueFor(lhs);
llvm::Value* rhs_address = GetEmittedValueFor(rhs);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(convolution));
const ConvolutionDimensionNumbers& dnums =
convolution->convolution_dimension_numbers();
absl::InlinedVector<int64_t, 2> input_dims;
absl::InlinedVector<int64_t, 2> kernel_dims;
absl::InlinedVector<int64_t, 2> output_dims;
if (one_dim_convolution) {
input_dims.push_back(1);
kernel_dims.push_back(1);
output_dims.push_back(1);
}
const Shape& input_shape = convolution->operand(0)->shape();
int64_t input_batch =
input_shape.dimensions(dnums.input_batch_dimension());
for (int d : dnums.input_spatial_dimensions()) {
input_dims.push_back(input_shape.dimensions(d));
}
int64_t input_channels =
input_shape.dimensions(dnums.input_feature_dimension());
const Shape& kernel_shape = convolution->operand(1)->shape();
for (int d : dnums.kernel_spatial_dimensions()) {
kernel_dims.push_back(kernel_shape.dimensions(d));
}
int64_t kernel_channels =
kernel_shape.dimensions(dnums.kernel_input_feature_dimension());
int64_t kernel_filters =
kernel_shape.dimensions(dnums.kernel_output_feature_dimension());
const Shape& convolution_shape = convolution->shape();
for (int d : dnums.output_spatial_dimensions()) {
output_dims.push_back(convolution_shape.dimensions(d));
}
const Window& window = convolution->window();
absl::InlinedVector<int64_t, 2> strides;
absl::InlinedVector<std::pair<int64_t, int64_t>, 2> padding;
absl::InlinedVector<int64_t, 2> base_dilation;
absl::InlinedVector<int64_t, 2> window_dilation;
if (one_dim_convolution) {
strides.push_back(1);
padding.push_back({0, 0});
base_dilation.push_back(1);
window_dilation.push_back(1);
}
for (const auto& d : window.dimensions()) {
strides.push_back(d.stride());
padding.push_back({d.padding_low(), d.padding_high()});
base_dilation.push_back(d.base_dilation());
window_dilation.push_back(d.window_dilation());
}
PrimitiveType primitive_type = lhs->shape().element_type();
bool multi_threaded =
hlo_module_config_.debug_options().xla_cpu_multi_thread_eigen();
bool use_mkl_dnn =
hlo_module_config_.debug_options().xla_cpu_use_mkl_dnn() &&
convolution->feature_group_count() == 1;
bool use_acl = hlo_module_config_.debug_options().xla_cpu_use_acl();
auto valid_num_dims = [](absl::Span<const int64_t> xs) {
return xs.size() >= 2 && xs.size() <= 3;
};
TF_RET_CHECK(valid_num_dims(input_dims)) << input_dims.size();
TF_RET_CHECK(valid_num_dims(kernel_dims));
TF_RET_CHECK(valid_num_dims(output_dims));
TF_RET_CHECK(valid_num_dims(strides));
TF_RET_CHECK(padding.size() >= 2 && padding.size() <= 3);
TF_RET_CHECK(valid_num_dims(base_dilation));
TF_RET_CHECK(valid_num_dims(window_dilation));
const char* fn_name;
if (input_dims.size() == 2) {
fn_name =
primitive_type == F16
? (multi_threaded
? runtime::kEigenConv2DF16SymbolName
: runtime::kEigenSingleThreadedConv2DF16SymbolName)
: (multi_threaded
? (use_mkl_dnn
? runtime::kMKLConv2DF32SymbolName
: (use_acl ? runtime::kACLConv2DF32SymbolName
: runtime::kEigenConv2DF32SymbolName))
: runtime::kEigenSingleThreadedConv2DF32SymbolName);
} else if (input_dims.size() == 3) {
fn_name =
primitive_type == F16
? (multi_threaded
? runtime::kEigenConv3DF16SymbolName
: runtime::kEigenSingleThreadedConv3DF16SymbolName)
: (multi_threaded
? runtime::kEigenConv3DF32SymbolName
: runtime::kEigenSingleThreadedConv3DF32SymbolName);
} else {
LOG(FATAL) << "Invalid number of dimensions " << input_dims.size();
}
if (!multi_threaded && use_mkl_dnn) {
LOG(WARNING) << "Using Eigen instead of MKL-DNN for single-threaded "
"convolution.";
}
std::vector<llvm::Value*> args = {
GetExecutableRunOptionsArgument(),
GetEmittedValueFor(convolution),
lhs_address,
rhs_address,
b()->getInt64(input_batch),
};
for (int64_t d : input_dims) {
args.push_back(b()->getInt64(d));
}
args.push_back(b()->getInt64(input_channels));
for (int64_t d : kernel_dims) {
args.push_back(b()->getInt64(d));
}
args.push_back(b()->getInt64(kernel_channels));
args.push_back(b()->getInt64(kernel_filters));
for (int64_t d : output_dims) {
args.push_back(b()->getInt64(d));
}
for (int64_t d : strides) {
args.push_back(b()->getInt64(d));
}
for (const auto& p : padding) {
args.push_back(b()->getInt64(p.first));
args.push_back(b()->getInt64(p.second));
}
for (int64_t d : base_dilation) {
args.push_back(b()->getInt64(d));
}
for (int64_t d : window_dilation) {
args.push_back(b()->getInt64(d));
}
args.push_back(b()->getInt64(convolution->feature_group_count()));
VLOG(1) << "Ir emitter emitted Convolution to runtime:" << fn_name;
EmitCallToFunc(fn_name, args, b()->getVoidTy(),
true,
true);
return absl::OkStatus();
}
}
return DefaultAction(convolution);
}
absl::Status IrEmitter::HandleFft(HloInstruction* fft) {
auto operand = fft->operand(0);
TF_RETURN_IF_ERROR(ElementTypesSameAndSupported(
*fft, {operand},
{F32, F64, C64, C128}));
TF_RET_CHECK(LayoutUtil::IsMonotonicWithDim0Major(operand->shape().layout()));
TF_RET_CHECK(LayoutUtil::IsMonotonicWithDim0Major(fft->shape().layout()));
VLOG(3) << "operand=" << ShapeUtil::HumanStringWithLayout(operand->shape());
VLOG(3) << "fft=" << ShapeUtil::HumanStringWithLayout(fft->shape());
llvm::Value* operand_address = GetEmittedValueFor(operand);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(fft));
const std::vector<int64_t>& fft_length = fft->fft_length();
const int fft_rank = fft_length.size();
absl::InlinedVector<int64_t, 4> operand_shape_flat(fft_rank + 1);
int64_t input_batch = 1;
int64_t input_batch_length = fft->shape().dimensions_size() - fft_rank;
for (int i = 0; i < input_batch_length; i++) {
input_batch *= operand->shape().dimensions(i);
}
operand_shape_flat[0] = input_batch;
for (int i = 0; i < fft_rank; ++i) {
operand_shape_flat[i + 1] =
operand->shape().dimensions(i + input_batch_length);
}
bool multi_threaded_eigen =
hlo_module_config_.debug_options().xla_cpu_multi_thread_eigen();
const char* fn_name = multi_threaded_eigen
? runtime::kDuccFftSymbolName
: runtime::kDuccSingleThreadedFftSymbolName;
auto* fft_lengths =
EmitGlobalForLiteral(LiteralUtil::CreateR1<int64_t>(fft_length));
auto* input_shape =
EmitGlobalForLiteral(LiteralUtil::CreateR1<int64_t>(operand_shape_flat));
EmitCallToFunc(fn_name,
{GetExecutableRunOptionsArgument(), GetEmittedValueFor(fft),
operand_address, b()->getInt32(fft->fft_type()),
b()->getInt32(operand->shape().element_type() == F64 ||
operand->shape().element_type() == C128),
b()->getInt32(fft_rank), input_shape, fft_lengths},
b()->getVoidTy(), true,
false,
true);
return absl::OkStatus();
}
absl::Status IrEmitter::HandleAllReduceSingleReplica(HloInstruction* crs) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(crs));
if (crs->operand_count() == 1) {
return EmitMemcpy(*crs->operand(0), *crs);
}
std::vector<llvm::Value*> operand_ptrs;
for (int64_t i = 0; i < crs->operand_count(); ++i) {
llvm::Value* in_ptr = GetEmittedValueFor(crs->operand(i));
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_slice,
assignment_.GetUniqueSlice(crs, {i}));
const Shape& operand_shape = crs->operand(i)->shape();
CHECK(operand_shape.IsArray())
<< "Operands to all-reduce must be arrays: " << crs->ToString();
operand_ptrs.push_back(EmitBufferPointer(out_slice, operand_shape));
MemCpy(operand_ptrs.back(), llvm::Align(1), in_ptr,
llvm::Align(1), ShapeUtil::ByteSizeOf(operand_shape));
}
llvm_ir::EmitTuple(GetIrArrayFor(crs), operand_ptrs, b());
return absl::OkStatus();
}
static bool DataTypeIsSupportedByReduceScatter(PrimitiveType datatype) {
switch (datatype) {
case PRED:
case S8:
case U8:
case S16:
case U16:
case S32:
case U32:
case S64:
case U64:
case F16:
case F32:
case F64:
case C64:
case C128:
return true;
default:
return false;
}
}
absl::Status IrEmitter::HandleAllReduceMultipleReplica(HloInstruction* crs) {
CHECK_GE(crs->operand_count(), 1);
PrimitiveType datatype = crs->operand(0)->shape().element_type();
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(crs));
if (!DataTypeIsSupportedByReduceScatter(datatype)) {
return Unimplemented("AllReduce for datatype '%s' is not supported",
primitive_util::LowercasePrimitiveTypeName(datatype));
}
if (!MatchReductionComputation(crs->to_apply()).has_value()) {
return Unimplemented("AllReduce for computation '%s' is not supported",
crs->to_apply()->ToString());
}
std::string replica_groups = ReplicaGroupsToString(crs->replica_groups());
int32_t replica_groups_size = replica_groups.size();
llvm::Value* replica_groups_v = b()->CreateGlobalStringPtr(replica_groups);
bool is_tuple = crs->operand_count() > 1;
std::vector<llvm::Value*> input_buffer_ptrs;
std::vector<llvm::Value*> output_buffer_ptrs;
if (is_tuple) {
CHECK(crs->shape().IsTuple());
for (int64_t i = 0; i < crs->operand_count(); i++) {
const HloInstruction* op = crs->operand(i);
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_slice,
assignment_.GetUniqueSlice(crs, {i}));
const Shape& operand_shape = crs->operand(i)->shape();
CHECK(operand_shape.IsArray())
<< "Operands to all-reduce must be arrays: " << crs->ToString();
output_buffer_ptrs.push_back(EmitBufferPointer(out_slice, operand_shape));
input_buffer_ptrs.push_back(GetEmittedValueFor(op));
}
} else {
Shape shape = crs->operand(0)->shape();
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice input_slice,
assignment_.GetUniqueSlice(crs->operand(0), {}));
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice,
assignment_.GetUniqueSlice(crs, {}));
input_buffer_ptrs.push_back(EmitBufferPointer(input_slice, shape));
output_buffer_ptrs.push_back(EmitBufferPointer(output_slice, shape));
}
llvm::Value* input_buffers =
EncodeArrayFunctionArguments(input_buffer_ptrs, "input_buffers", b());
llvm::Value* output_buffers =
EncodeArrayFunctionArguments(output_buffer_ptrs, "output_buffers", b());
int32_t shape_length;
TF_ASSIGN_OR_RETURN(llvm::Value * shape_ptr,
llvm_ir::EncodeSelfDescribingShapeConstant(
crs->shape(), &shape_length, b()));
bool use_global_device_ids =
Cast<HloAllReduceInstruction>(crs)->use_global_device_ids();
EmitCallToFunc(
runtime::kAllReduceSymbolName,
{GetExecutableRunOptionsArgument(),
replica_groups_v,
b()->getInt32(replica_groups_size),
b()->getInt32(static_cast<int32_t>(crs->channel_id().has_value())),
b()->getInt32(static_cast<int32_t>(use_global_device_ids)),
b()->getInt64(crs->channel_id().has_value()
? *crs->channel_id()
: crs->GetModule()->unique_id()),
b()->getInt32(
static_cast<int32_t>(*MatchReductionComputation(crs->to_apply()))),
shape_ptr,
b()->getInt32(shape_length),
b()->getInt32(crs->operand_count()),
input_buffers,
output_buffers},
b()->getVoidTy());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleAllReduce(HloInstruction* crs) {
if (hlo_module_config_.replica_count() == 1 &&
hlo_module_config_.num_partitions() == 1) {
return HandleAllReduceSingleReplica(crs);
}
return HandleAllReduceMultipleReplica(crs);
}
absl::Status IrEmitter::HandleReduceScatter(HloInstruction* rs) {
CHECK_EQ(rs->operand_count(), 1);
PrimitiveType datatype = rs->operand(0)->shape().element_type();
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(rs));
if (!DataTypeIsSupportedByReduceScatter(datatype)) {
return Unimplemented("ReduceScatter for datatype '%s' is not supported",
primitive_util::LowercasePrimitiveTypeName(datatype));
}
if (!MatchReductionComputation(rs->to_apply()).has_value()) {
return Unimplemented("ReduceScatter for computation '%s' is not supported",
rs->to_apply()->ToString());
}
std::string replica_groups = ReplicaGroupsToString(rs->replica_groups());
int32_t replica_groups_size = replica_groups.size();
llvm::Value* replica_groups_v = b()->CreateGlobalStringPtr(replica_groups);
Shape shape = rs->operand(0)->shape();
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice input_slice,
assignment_.GetUniqueSlice(rs->operand(0), {}));
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice,
assignment_.GetUniqueSlice(rs, {}));
llvm::Value* input_buffer = EmitBufferPointer(input_slice, shape);
llvm::Value* output_buffer = EmitBufferPointer(output_slice, shape);
bool use_global_device_ids =
Cast<HloReduceScatterInstruction>(rs)->use_global_device_ids();
EmitCallToFunc(
runtime::kReduceScatterSymbolName,
{GetExecutableRunOptionsArgument(),
replica_groups_v,
b()->getInt32(replica_groups_size),
b()->getInt32(static_cast<int32_t>(rs->channel_id().has_value())),
b()->getInt32(static_cast<int32_t>(use_global_device_ids)),
b()->getInt64(rs->channel_id().has_value()
? *rs->channel_id()
: rs->GetModule()->unique_id()),
b()->getInt32(
static_cast<int32_t>(*MatchReductionComputation(rs->to_apply()))),
b()->getInt32(static_cast<int32_t>(datatype)),
b()->getInt64(ShapeUtil::ElementsIn(rs->shape())),
input_buffer,
output_buffer},
b()->getVoidTy());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleAllToAll(HloInstruction* instruction) {
auto* instr = Cast<HloAllToAllInstruction>(instruction);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(instruction));
CHECK(!instr->split_dimension() && instr->shape().IsTuple())
<< "Only tuple AllToAll is supported";
std::string replica_groups =
ReplicaGroupsToString(instruction->replica_groups());
int32_t replica_groups_size = replica_groups.size();
llvm::Value* replica_groups_v = b()->CreateGlobalStringPtr(replica_groups);
int64_t buffer_size = -1;
std::vector<llvm::Value*> input_buffer_ptrs;
std::vector<llvm::Value*> output_buffer_ptrs;
for (int64_t i = 0; i < instruction->operand_count(); i++) {
const HloInstruction* op = instruction->operand(i);
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_slice,
assignment_.GetUniqueSlice(instruction, {i}));
const Shape& operand_shape = instruction->operand(i)->shape();
CHECK(operand_shape.IsArray())
<< "Operands to all-to-all must be arrays: " << instruction->ToString();
output_buffer_ptrs.push_back(EmitBufferPointer(out_slice, operand_shape));
input_buffer_ptrs.push_back(GetEmittedValueFor(op));
CHECK(buffer_size == -1 || buffer_size == out_slice.size());
buffer_size = out_slice.size();
}
llvm::Value* input_buffers =
EncodeArrayFunctionArguments(input_buffer_ptrs, "input_buffers", b());
llvm::Value* output_buffers =
EncodeArrayFunctionArguments(output_buffer_ptrs, "output_buffers", b());
EmitCallToFunc(
runtime::kAllToAllSymbolName,
{
GetExecutableRunOptionsArgument(),
b()->getInt32(
static_cast<int32_t>(instruction->channel_id().has_value())),
b()->getInt64(instruction->channel_id().has_value()
? *instruction->channel_id()
: instruction->GetModule()->unique_id()),
replica_groups_v,
b()->getInt32(replica_groups_size),
b()->getInt32(instruction->operand_count()),
b()->getInt64(buffer_size),
input_buffers,
output_buffers,
},
b()->getVoidTy());
llvm_ir::EmitTuple(GetIrArrayFor(instruction), output_buffer_ptrs, b());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleAllGather(HloInstruction* instruction) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(instruction));
std::string replica_groups =
ReplicaGroupsToString(instruction->replica_groups());
int32_t replica_groups_size = replica_groups.size();
llvm::Value* replica_groups_v = b()->CreateGlobalStringPtr(replica_groups);
std::vector<llvm::Value*> input_buffer_ptrs;
std::vector<llvm::Value*> output_buffer_ptrs;
const HloInstruction* op = instruction->operand(0);
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice in_slice,
assignment_.GetUniqueSlice(op, {}));
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_slice,
assignment_.GetUniqueSlice(instruction, {}));
const Shape& operand_shape = op->shape();
CHECK(op->shape().IsArray())
<< "Operand to all-gather must be arrays: " << instruction->ToString();
llvm::Value* output_buffer = EmitBufferPointer(out_slice, operand_shape);
llvm::Value* input_buffer = GetEmittedValueFor(op);
int64_t buffer_size = in_slice.size();
bool use_global_device_ids =
Cast<HloAllGatherInstruction>(instruction)->use_global_device_ids();
EmitCallToFunc(
runtime::kAllGatherSymbolName,
{
GetExecutableRunOptionsArgument(),
b()->getInt32(
static_cast<int32_t>(instruction->channel_id().has_value())),
b()->getInt32(static_cast<int32_t>(use_global_device_ids)),
b()->getInt64(instruction->channel_id().has_value()
? *instruction->channel_id()
: instruction->GetModule()->unique_id()),
replica_groups_v,
b()->getInt32(replica_groups_size),
b()->getInt64(buffer_size),
input_buffer,
output_buffer,
},
b()->getVoidTy());
llvm_ir::EmitTuple(GetIrArrayFor(instruction), output_buffer_ptrs, b());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleCollectivePermute(HloInstruction* crs) {
auto* instr = Cast<HloCollectivePermuteInstruction>(crs);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(instr));
std::string source_target_pairs = absl::StrJoin(
instr->source_target_pairs(), ",", absl::PairFormatter("="));
llvm::Value* source_target_pairs_v =
b()->CreateGlobalStringPtr(source_target_pairs);
Shape shape = crs->operand(0)->shape();
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice input_slice,
assignment_.GetUniqueSlice(crs->operand(0), {}));
llvm::Value* input_buffer = EmitBufferPointer(input_slice, shape);
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice,
assignment_.GetUniqueSlice(crs, {}));
llvm::Value* output_buffer = EmitBufferPointer(output_slice, shape);
EmitCallToFunc(
runtime::kCollectivePermuteSymbolName,
{GetExecutableRunOptionsArgument(),
b()->getInt32(static_cast<int32_t>(crs->channel_id().has_value())),
b()->getInt64(crs->channel_id().has_value()
? *crs->channel_id()
: crs->GetModule()->unique_id()),
b()->getInt32(ShapeUtil::ByteSizeOf(shape)),
input_buffer,
output_buffer,
source_target_pairs_v,
b()->getInt32(source_target_pairs.size())},
b()->getVoidTy());
return absl::OkStatus();
}
absl::Status IrEmitter::HandlePartitionId(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo));
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice,
assignment_.GetUniqueSlice(hlo, {}));
llvm::Value* output_buffer = EmitBufferPointer(output_slice, hlo->shape());
EmitCallToFunc(runtime::kPartitionIdSymbolName,
{GetExecutableRunOptionsArgument(),
output_buffer},
b()->getVoidTy());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleReplicaId(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo));
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice,
assignment_.GetUniqueSlice(hlo, {}));
llvm::Value* output_buffer = EmitBufferPointer(output_slice, hlo->shape());
EmitCallToFunc(runtime::kReplicaIdSymbolName,
{GetExecutableRunOptionsArgument(),
output_buffer},
b()->getVoidTy());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleParameter(HloInstruction* parameter) {
VLOG(2) << "HandleParameter: " << parameter->ToString();
return EmitTargetAddressForOp(parameter);
}
static bool ReductionPreservesLayout(const HloInstruction& reduce) {
DCHECK_EQ(reduce.opcode(), HloOpcode::kReduce);
absl::flat_hash_map<int64_t, int64_t> unreduced_dim_map;
absl::flat_hash_set<int64_t> reduced_dims(reduce.dimensions().begin(),
reduce.dimensions().end());
const Shape& operand_shape = reduce.operand(0)->shape();
const Shape& result_shape = reduce.shape();
int64_t delta = 0;
for (int64_t i = 0; i < operand_shape.dimensions_size(); i++) {
if (reduced_dims.contains(i)) {
delta++;
} else {
InsertOrDie(&unreduced_dim_map, i, i - delta);
}
}
int64_t result_dim_idx = 0;
for (int64_t operand_dim_idx = 0;
operand_dim_idx < operand_shape.dimensions_size(); operand_dim_idx++) {
int64_t operand_dim =
operand_shape.layout().minor_to_major(operand_dim_idx);
if (!reduced_dims.contains(operand_dim)) {
if (FindOrDie(unreduced_dim_map, operand_dim) !=
result_shape.layout().minor_to_major(result_dim_idx++)) {
return false;
}
}
}
CHECK_EQ(result_dim_idx, result_shape.dimensions_size());
return true;
}
IrEmitter::ReductionGenerator IrEmitter::MatchReductionGenerator(
HloComputation* function, std::string* failure_reason) const {
CHECK_EQ(function->num_parameters(), 2);
auto root_instruction = function->root_instruction();
CHECK(ShapeUtil::IsScalar(root_instruction->shape()));
if (root_instruction->operand_count() != 2) {
*failure_reason = "root instruction is not a binary operation";
return nullptr;
}
const Shape& root_shape = root_instruction->shape();
if (ShapeUtil::ElementIsComplex(root_shape)) {
*failure_reason = "complex values not supported";
return nullptr;
}
bool root_is_floating_point = ShapeUtil::ElementIsFloating(root_shape);
bool root_is_integral = ShapeUtil::ElementIsIntegral(root_shape);
bool root_is_signed = ShapeUtil::ElementIsSigned(root_shape);
auto lhs = root_instruction->operand(0);
auto rhs = root_instruction->operand(1);
auto param_0 = function->parameter_instruction(0);
auto param_1 = function->parameter_instruction(1);
if (!(lhs == param_0 && rhs == param_1) &&
!(rhs == param_0 && lhs == param_1)) {
*failure_reason =
"root instruction is not a binary operation on the incoming arguments";
return nullptr;
}
CHECK(ShapeUtil::IsScalar(lhs->shape()) && ShapeUtil::IsScalar(rhs->shape()));
switch (root_instruction->opcode()) {
default:
*failure_reason = "did not recognize root instruction opcode";
return nullptr;
case HloOpcode::kAdd:
return [root_is_integral](llvm::IRBuilder<>* b, llvm::Value* lhs,
llvm::Value* rhs) {
return root_is_integral ? b->CreateAdd(lhs, rhs)
: b->CreateFAdd(lhs, rhs);
};
case HloOpcode::kMultiply:
return [root_is_integral](llvm::IRBuilder<>* b, llvm::Value* lhs,
llvm::Value* rhs) {
return root_is_integral ? b->CreateMul(lhs, rhs)
: b->CreateFMul(lhs, rhs);
};
case HloOpcode::kAnd:
return [](llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) {
return b->CreateAnd(lhs, rhs);
};
case HloOpcode::kOr:
return [](llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) {
return b->CreateOr(lhs, rhs);
};
case HloOpcode::kXor:
return [](llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) {
return b->CreateXor(lhs, rhs);
};
case HloOpcode::kMaximum:
return [root_is_floating_point, root_is_signed, this](
llvm::IRBuilder<>* b, llvm::Value* lhs,
llvm::Value* rhs) -> llvm::Value* {
if (root_is_floating_point) {
return llvm_ir::EmitFloatMax(
lhs, rhs, b,
hlo_module_config_.debug_options().xla_cpu_enable_fast_min_max());
}
return b->CreateSelect(
b->CreateICmp(root_is_signed ? llvm::ICmpInst::ICMP_SGE
: llvm::ICmpInst::ICMP_UGE,
lhs, rhs),
lhs, rhs);
};
case HloOpcode::kMinimum:
return [root_is_floating_point, root_is_signed, this](
llvm::IRBuilder<>* b, llvm::Value* lhs,
llvm::Value* rhs) -> llvm::Value* {
if (root_is_floating_point) {
return llvm_ir::EmitFloatMin(
lhs, rhs, b,
hlo_module_config_.debug_options().xla_cpu_enable_fast_min_max());
}
return b->CreateSelect(
b->CreateICmp(root_is_signed ? llvm::ICmpInst::ICMP_SLE
: llvm::ICmpInst::ICMP_ULE,
lhs, rhs),
lhs, rhs);
};
}
}
IrEmitter::ShardedVectorType IrEmitter::CreateShardedVectorType(
PrimitiveType element_type, unsigned element_count) {
int vector_register_size_in_elements =
target_machine_features_.vector_register_byte_size(
*compute_function()->function()) /
ShapeUtil::ByteSizeOfPrimitiveType(element_type);
ShardedVectorType sharded_vector_type;
llvm::Type* element_ir_type =
llvm_ir::PrimitiveTypeToIrType(element_type, module_);
for (int i = 0, e = 1 + Log2Ceiling(element_count); i < e; i++) {
const unsigned current_size_fragment = 1u << i;
if (!(element_count & current_size_fragment)) {
continue;
}
if (current_size_fragment == 1) {
sharded_vector_type.push_back(element_ir_type);
continue;
}
if (current_size_fragment >= vector_register_size_in_elements) {
auto vector_type = llvm::VectorType::get(
element_ir_type, vector_register_size_in_elements, false);
sharded_vector_type.insert(
sharded_vector_type.end(),
current_size_fragment / vector_register_size_in_elements,
vector_type);
CHECK_EQ(current_size_fragment % vector_register_size_in_elements, 0);
continue;
}
sharded_vector_type.push_back(
llvm::VectorType::get(element_ir_type, current_size_fragment, false));
}
return sharded_vector_type;
}
absl::StatusOr<IrEmitter::ShardedVector>
IrEmitter::EmitInnerLoopForVectorizedReduction(
const ReductionGenerator& reduction_generator,
const llvm_ir::IrArray::Index& output_index,
const ShardedVectorType& accumulator_type, HloInstruction* init_value,
HloInstruction* arg, absl::Span<const int64_t> dimensions,
llvm::Align element_alignment) {
ShardedVector accumulator;
accumulator.reserve(accumulator_type.size());
for (auto accumulator_shard_type : accumulator_type) {
accumulator.push_back(llvm_ir::EmitAllocaAtFunctionEntry(
accumulator_shard_type, "accumulator", b(), 0));
}
llvm::Value* init_value_ssa =
Load(IrShapeType(init_value->shape()), GetEmittedValueFor(init_value));
for (llvm::Value* accumulator_shard : accumulator) {
llvm::Value* initial_value;
auto shard_type =
llvm::cast<llvm::AllocaInst>(accumulator_shard)->getAllocatedType();
if (auto vector_type = llvm::dyn_cast<llvm::VectorType>(shard_type)) {
initial_value =
VectorSplat(vector_type->getElementCount(), init_value_ssa);
} else {
initial_value = init_value_ssa;
}
AlignedStore(initial_value, accumulator_shard, element_alignment);
}
llvm_ir::ForLoopNest reduction_loop_nest(IrName(arg, "vectorized_inner"),
b());
std::vector<llvm::Value*> input_multi_index =
reduction_loop_nest.AddLoopsForShapeOnDimensions(arg->shape(), dimensions,
"reduction_dim");
SetToFirstInsertPoint(reduction_loop_nest.GetInnerLoopBodyBasicBlock(), b());
llvm_ir::IrArray arg_array(GetIrArrayFor(arg));
llvm_ir::IrArray::Index::const_iterator it = output_index.begin();
for (auto& i : input_multi_index) {
if (i == nullptr) {
i = *it++;
}
}
CHECK(output_index.end() == it);
llvm_ir::IrArray::Index input_index(input_multi_index, arg->shape(),
b()->getInt64Ty());
llvm::Value* input_address =
arg_array.EmitArrayElementAddress(input_index, b());
for (int i = 0; i < accumulator.size(); i++) {
auto alloca = llvm::cast<llvm::AllocaInst>(accumulator[i]);
auto current_accumulator_value = AlignedLoad(
alloca->getAllocatedType(), accumulator[i], element_alignment);
auto addend = AlignedLoad(alloca->getAllocatedType(), input_address,
element_alignment);
arg_array.AnnotateLoadStoreInstructionWithMetadata(addend);
auto reduced_result =
reduction_generator(b(), current_accumulator_value, addend);
AlignedStore(reduced_result, accumulator[i], element_alignment);
if (i != (accumulator.size() - 1)) {
input_address =
ConstInBoundsGEP1_32(reduced_result->getType(), input_address, 1);
}
}
SetToFirstInsertPoint(reduction_loop_nest.GetOuterLoopExitBasicBlock(), b());
ShardedVector result_ssa;
result_ssa.reserve(accumulator.size());
for (auto accumulator_shard : accumulator) {
auto alloca = llvm::cast<llvm::AllocaInst>(accumulator_shard);
result_ssa.push_back(AlignedLoad(alloca->getAllocatedType(),
accumulator_shard, element_alignment));
}
return result_ssa;
}
void IrEmitter::EmitShardedVectorStore(
llvm::Value* store_address, const std::vector<llvm::Value*>& value_to_store,
llvm::Align alignment, const llvm_ir::IrArray& containing_array) {
for (int i = 0; i < value_to_store.size(); i++) {
auto store_instruction =
AlignedStore(value_to_store[i], store_address, alignment);
containing_array.AnnotateLoadStoreInstructionWithMetadata(
store_instruction);
if (i != (value_to_store.size() - 1)) {
store_address =
ConstInBoundsGEP1_32(value_to_store[i]->getType(), store_address, 1);
}
}
}
absl::StatusOr<bool> IrEmitter::EmitVectorizedReduce(
HloInstruction* reduce, HloInstruction* arg, HloInstruction* init_value,
absl::Span<const int64_t> dimensions, HloComputation* function,
std::string* failure_reason) {
if (!reduce->shape().IsArray()) {
*failure_reason = "vectorization of variadic reduce not implemented";
return false;
}
if (!ReductionPreservesLayout(*reduce)) {
return false;
}
ReductionGenerator reduction_generator =
MatchReductionGenerator(function, failure_reason);
if (!reduction_generator) {
return false;
}
int vector_register_size_in_elements =
target_machine_features_.vector_register_byte_size(
*compute_function()->function()) /
ShapeUtil::ByteSizeOfPrimitiveType(reduce->shape().element_type());
if (vector_register_size_in_elements == 0) {
return false;
}
int vectorization_factor_in_bytes =
target_machine_features_.vectorization_factor_in_bytes();
const int vectorization_factor =
vectorization_factor_in_bytes /
ShapeUtil::ByteSizeOfPrimitiveType(reduce->shape().element_type());
bool is_reduction_over_minor_dimension = absl::c_linear_search(
dimensions, LayoutUtil::Minor(arg->shape().layout(), 0));
llvm::Align element_alignment(tsl::MathUtil::GCD<unsigned>(
ShapeUtil::ByteSizeOfPrimitiveType(reduce->shape().element_type()),
MinimumAlignmentForPrimitiveType(reduce->shape().element_type())));
if (is_reduction_over_minor_dimension) {
*failure_reason = "reduction over minor dimension not implemented";
return false;
}
CHECK(!reduce->shape().IsTuple());
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(reduce));
llvm_ir::ForLoopNest loop_nest(IrName(reduce), b());
std::vector<llvm::Value*> array_multi_index(
reduce->shape().dimensions_size());
for (int i = LayoutUtil::MinorToMajor(reduce->shape()).size() - 1; i > 0;
--i) {
int64_t dimension = LayoutUtil::Minor(reduce->shape().layout(), i);
int64_t start_index = 0;
int64_t end_index = reduce->shape().dimensions(dimension);
std::unique_ptr<llvm_ir::ForLoop> loop = loop_nest.AddLoop(
start_index, end_index, absl::StrFormat("dim.%d", dimension));
array_multi_index[dimension] = loop->GetIndVarValue();
}
int64_t innermost_dimension = LayoutUtil::Minor(reduce->shape().layout(), 0);
int64_t innermost_dimension_size =
reduce->shape().dimensions(innermost_dimension);
if (llvm::BasicBlock* innermost_body_bb =
loop_nest.GetInnerLoopBodyBasicBlock()) {
SetToFirstInsertPoint(innermost_body_bb, b());
}
auto outermost_loop_exit_block = loop_nest.GetOuterLoopExitBasicBlock();
if (innermost_dimension_size >= vectorization_factor) {
int64_t start_index = 0;
int64_t end_index = (innermost_dimension_size / vectorization_factor) *
vectorization_factor;
std::unique_ptr<llvm_ir::ForLoop> loop =
loop_nest.AddLoop(start_index, end_index, vectorization_factor,
absl::StrFormat("dim.%d", innermost_dimension));
array_multi_index[innermost_dimension] = loop->GetIndVarValue();
SetToFirstInsertPoint(loop->GetBodyBasicBlock(), b());
ShardedVectorType vector_type = CreateShardedVectorType(
reduce->shape().element_type(), vectorization_factor);
llvm_ir::IrArray::Index array_index(array_multi_index, reduce->shape(),
b()->getInt64Ty());
TF_ASSIGN_OR_RETURN(std::vector<llvm::Value*> accumulator,
EmitInnerLoopForVectorizedReduction(
reduction_generator, array_index, vector_type,
init_value, arg, dimensions, element_alignment));
llvm_ir::IrArray target_array = GetIrArrayFor(reduce);
llvm::Value* output_address =
target_array.EmitArrayElementAddress(array_index, b());
EmitShardedVectorStore(output_address, accumulator, element_alignment,
target_array);
if (auto exit_terminator = loop->GetExitBasicBlock()->getTerminator()) {
CHECK_GT(LayoutUtil::MinorToMajor(reduce->shape()).size(), 1);
b()->SetInsertPoint(exit_terminator);
} else {
CHECK_EQ(LayoutUtil::MinorToMajor(reduce->shape()).size(), 1);
b()->SetInsertPoint(loop->GetExitBasicBlock());
}
}
if (innermost_dimension_size % vectorization_factor) {
array_multi_index[innermost_dimension] =
b()->getInt64(innermost_dimension_size -
(innermost_dimension_size % vectorization_factor));
ShardedVectorType vector_type = CreateShardedVectorType(
reduce->shape().element_type(),
innermost_dimension_size % vectorization_factor);
llvm_ir::IrArray::Index array_index(array_multi_index, reduce->shape(),
b()->getInt64Ty());
llvm::IRBuilderBase::FastMathFlagGuard guard(*b());
llvm::FastMathFlags flags = b()->getFastMathFlags();
flags.setAllowReassoc(true);
b()->setFastMathFlags(flags);
TF_ASSIGN_OR_RETURN(std::vector<llvm::Value*> accumulator,
EmitInnerLoopForVectorizedReduction(
reduction_generator, array_index, vector_type,
init_value, arg, dimensions, element_alignment));
llvm_ir::IrArray target_array = GetIrArrayFor(reduce);
llvm::Value* output_address =
target_array.EmitArrayElementAddress(array_index, b());
EmitShardedVectorStore(output_address, accumulator, element_alignment,
target_array);
}
if (outermost_loop_exit_block) {
b()->SetInsertPoint(outermost_loop_exit_block);
}
return true;
}
absl::Status IrEmitter::HandleReduce(HloInstruction* reduce) {
auto arg = reduce->mutable_operand(0);
auto init_value = reduce->mutable_operand(1);
absl::Span<const int64_t> dimensions(reduce->dimensions());
HloComputation* function = reduce->to_apply();
bool saved_allow_reassociation = allow_reassociation_;
allow_reassociation_ = true;
auto cleanup = absl::MakeCleanup([saved_allow_reassociation, this]() {
allow_reassociation_ = saved_allow_reassociation;
});
if (!options::VectorizedReduceDisabled(hlo_module_config_)) {
std::string vectorization_failure_reason;
TF_ASSIGN_OR_RETURN(
bool vectorization_successful,
EmitVectorizedReduce(reduce, arg, init_value, dimensions, function,
&vectorization_failure_reason));
if (vectorization_successful) {
VLOG(1) << "Successfully vectorized reduction " << reduce->ToString()
<< "\n";
return absl::OkStatus();
} else {
VLOG(1) << "Could not vectorize reduction " << reduce->ToString() << ": "
<< vectorization_failure_reason;
}
}
return DefaultAction(reduce);
}
absl::Status IrEmitter::HandleSend(HloInstruction* send) {
return Unimplemented("Send is not implemented on CPU.");
}
absl::Status IrEmitter::HandleSendDone(HloInstruction* send_done) {
return Unimplemented("Send-done is not implemented on CPU.");
}
absl::Status IrEmitter::HandleScatter(HloInstruction*) {
return Unimplemented("Scatter is not implemented on CPUs.");
}
absl::Status IrEmitter::HandleSlice(HloInstruction* slice) {
VLOG(2) << "HandleSlice: " << slice->ToString();
auto operand = slice->operand(0);
if (ShouldEmitParallelLoopFor(*slice)) {
return DefaultAction(slice);
}
if (!LayoutUtil::Equal(operand->shape().layout(), slice->shape().layout())) {
return DefaultAction(slice);
}
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(slice));
if (ShapeUtil::IsZeroElementArray(slice->shape())) {
return absl::OkStatus();
}
const Layout& layout = operand->shape().layout();
const int64_t num_dims = operand->shape().dimensions_size();
absl::flat_hash_set<int64_t> inner_dims;
for (int64_t dim : LayoutUtil::MinorToMajor(layout)) {
if (operand->shape().dimensions(dim) != slice->shape().dimensions(dim)) {
break;
}
inner_dims.insert(dim);
}
const bool is_trivial_copy = (inner_dims.size() == num_dims);
if (is_trivial_copy) {
if (ShapeUtil::IsEffectiveScalar(slice->shape())) {
return DefaultAction(slice);
} else {
return EmitMemcpy(*slice, *operand);
}
}
const Shape logical_element_shape = ShapeUtil::FilterDimensions(
[&inner_dims](int64_t dim) { return inner_dims.contains(dim); },
operand->shape());
const int64_t primitive_elements_per_logical_element =
ShapeUtil::ElementsIn(logical_element_shape);
const int64_t memcpy_dim = LayoutUtil::Minor(layout, inner_dims.size());
const bool memcpy_is_contiguous = slice->slice_strides(memcpy_dim) == 1;
const int64_t memcpy_logical_elements =
memcpy_is_contiguous
? slice->slice_limits(memcpy_dim) - slice->slice_starts(memcpy_dim)
: 1;
llvm::SmallVector<int64_t> outer_dims;
for (int64_t i = 0; i < num_dims - inner_dims.size() - 1; ++i) {
outer_dims.push_back(LayoutUtil::Major(layout, i));
}
if (!memcpy_is_contiguous) {
outer_dims.push_back(memcpy_dim);
}
llvm_ir::IrArray target_array = GetIrArrayFor(slice);
const int64_t num_outer_loops = outer_dims.size();
llvm_ir::ForLoopNest loops(IrName(slice), b());
std::vector<llvm::Value*> target_multi_index =
loops.AddLoopsForShapeOnDimensions(slice->shape(), outer_dims, "slice");
std::replace(target_multi_index.begin(), target_multi_index.end(),
static_cast<llvm::Value*>(nullptr),
static_cast<llvm::Value*>(b()->getInt64(0)));
llvm_ir::IrArray::Index target_index(target_multi_index, slice->shape(),
b()->getInt64Ty());
if (num_outer_loops > 0) {
SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), b());
}
llvm_ir::IrArray source_array = GetIrArrayFor(operand);
const llvm_ir::IrArray::Index source_index = target_index.SourceIndexOfSlice(
operand->shape(), slice->slice_starts(),
slice->slice_strides(), b());
llvm::Value* memcpy_dest =
target_array.EmitArrayElementAddress(target_index, b(), "slice.dest");
llvm::Value* memcpy_source =
source_array.EmitArrayElementAddress(source_index, b(), "slice.source");
const int64_t memcpy_elements =
primitive_elements_per_logical_element * memcpy_logical_elements;
EmitTransferElements(memcpy_dest, memcpy_source, memcpy_elements,
slice->shape().element_type(), target_array,
source_array);
if (VLOG_IS_ON(2)) {
const int64_t memcpy_bytes =
ShapeUtil::ByteSizeOf(logical_element_shape) * memcpy_elements;
VLOG(2) << " emitted copy of " << memcpy_bytes << " bytes inside "
<< num_outer_loops << " loops";
}
if (num_outer_loops > 0) {
SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), b());
}
return absl::OkStatus();
}
absl::Status IrEmitter::HandleDynamicSlice(HloInstruction* dynamic_slice) {
if (ShapeUtil::IsScalar(dynamic_slice->shape())) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(dynamic_slice));
return EmitMemcpy(*dynamic_slice->operand(0), *dynamic_slice);
}
return DefaultAction(dynamic_slice);
}
absl::Status IrEmitter::HandleDynamicUpdateSlice(
HloInstruction* dynamic_update_slice) {
auto update = dynamic_update_slice->operand(1);
if (ShapeUtil::IsScalar(dynamic_update_slice->shape())) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(dynamic_update_slice));
return EmitMemcpy(*update, *dynamic_update_slice);
} else if (llvm_ir::CanUpdateDynamicSliceInPlace(dynamic_update_slice,
assignment_)) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(dynamic_update_slice));
auto operands = GetIrArraysForOperandsOf(dynamic_update_slice);
return llvm_ir::EmitDynamicUpdateSliceInPlace(
operands, GetIrArrayFor(dynamic_update_slice),
IrName(dynamic_update_slice, "in_place"), b());
}
return DefaultAction(dynamic_update_slice);
}
absl::Status IrEmitter::HandleRecv(HloInstruction* recv) {
return Unimplemented("Recv is not implemented on CPU.");
}
absl::Status IrEmitter::HandleRecvDone(HloInstruction* recv_done) {
return Unimplemented("Recv-done is not implemented on CPU.");
}
absl::Status IrEmitter::HandlePad(HloInstruction* pad) {
CHECK_EQ(pad->operand_count(), 2);
const auto operand = pad->operand(0);
const auto padding_value = pad->operand(1);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(pad));
return HandlePad(pad, GetIrArrayFor(operand), GetIrArrayFor(padding_value),
GetIrArrayFor(pad));
}
absl::Status IrEmitter::HandlePad(HloInstruction* pad,
const llvm_ir::IrArray& operand_array,
const llvm_ir::IrArray& padding_value_array,
const llvm_ir::IrArray& output_array) {
CHECK_EQ(pad->operand_count(), 2);
for (auto& padding_dimension : pad->padding_config().dimensions()) {
if (padding_dimension.edge_padding_low() < 0 ||
padding_dimension.edge_padding_high() < 0) {
return InternalStrCat(
"Encountered negative padding in IrEmitter on CPU. "
"This should have been eliminated at the HLO level. ",
pad->ToString());
}
}
const HloInstruction* padding_value = pad->operand(1);
const auto index_type = b()->getInt64Ty();
const auto index = llvm_ir::IrArray::Index(index_type);
llvm::Value* padding_value_addr = padding_value_array.EmitArrayElementAddress(
index, b(), "padding_value_addr", true, nullptr);
const llvm_ir::ElementGenerator element_generator =
[this, padding_value,
padding_value_addr](const llvm_ir::IrArray::Index& target_index) {
return b()->CreateLoad(IrShapeType(padding_value->shape()),
padding_value_addr);
};
TF_RETURN_IF_ERROR(EmitTargetElementLoop(
pad, "initialize", element_generator,
std::optional<const llvm_ir::IrArray>(output_array)));
llvm_ir::ForLoopNest loops(IrName(pad, "assign"), b());
const HloInstruction* operand = pad->operand(0);
const llvm_ir::IrArray::Index operand_index =
loops.AddLoopsForShape(operand->shape(), "operand");
SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), b());
llvm::Value* operand_data =
operand_array.EmitReadArrayElement(operand_index, b());
const PaddingConfig& padding_config = pad->padding_config();
std::vector<llvm::Value*> output_multi_index;
for (size_t i = 0; i < operand_index.size(); ++i) {
llvm::Value* offset =
Mul(operand_index[i],
b()->getInt64(padding_config.dimensions(i).interior_padding() + 1));
llvm::Value* index = Add(
offset, b()->getInt64(padding_config.dimensions(i).edge_padding_low()));
output_multi_index.push_back(index);
}
llvm_ir::IrArray::Index output_index(
output_multi_index, output_array.GetShape(), operand_index.GetType());
output_array.EmitWriteArrayElement(output_index, operand_data, b());
SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), b());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleFusion(HloInstruction* fusion) {
auto* root = fusion->fused_expression_root();
if (llvm_ir::CanEmitFusedDynamicUpdateSliceInPlace(fusion, assignment_)) {
VLOG(3) << "HandleFusion FusedDynamicUpdateSliceInPlace";
CpuElementalIrEmitter elemental_emitter(hlo_module_config_, this, module_);
FusedIrEmitter fused_emitter(elemental_emitter);
BindFusionArguments(fusion, &fused_emitter);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(fusion));
return llvm_ir::EmitFusedDynamicUpdateSliceInPlace(
fusion, GetIrArrayFor(fusion), &fused_emitter, b());
} else if (fusion->IsLoopFusion()) {
VLOG(3) << "HandleFusion kLoop";
CpuElementalIrEmitter elemental_emitter(hlo_module_config_, this, module_);
FusedIrEmitter fused_emitter(elemental_emitter);
BindFusionArguments(fusion, &fused_emitter);
TF_ASSIGN_OR_RETURN(auto generator, fused_emitter.GetGenerator(
*fusion->fused_expression_root()));
return EmitTargetElementLoop(fusion, "kLoop_fusion", generator,
std::nullopt);
} else if (fusion->IsOutputFusion()) {
VLOG(3) << "HandleFusion kOutput";
int64_t dot_op_index =
root->operand(0)->opcode() == HloOpcode::kDot ? 0 : 1;
const HloInstruction* dot = root->operand(dot_op_index);
CHECK_EQ(dot->opcode(), HloOpcode::kDot)
<< dot->ToString() << " "
<< fusion->fused_instructions_computation()->ToString();
int64_t dot_lhs_param_number = dot->operand(0)->parameter_number();
int64_t dot_rhs_param_number = dot->operand(1)->parameter_number();
int64_t addend_param_number =
root->operand(1 - dot_op_index)->parameter_number();
Shape target_shape = fusion->shape();
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(fusion));
llvm_ir::IrArray target_array = GetIrArrayFor(fusion);
llvm_ir::IrArray lhs_array(
GetIrArrayFor(fusion->operand(dot_lhs_param_number)));
llvm_ir::IrArray rhs_array(
GetIrArrayFor(fusion->operand(dot_rhs_param_number)));
llvm_ir::IrArray addend_array(
GetIrArrayFor(fusion->operand(addend_param_number)));
TF_RETURN_IF_ERROR(
EmitDotOperation(*dot, target_array, lhs_array, rhs_array,
&addend_array, GetExecutableRunOptionsArgument(), b(),
hlo_module_config_, target_machine_features_));
return absl::OkStatus();
} else {
return Unimplemented("Fusion kind not implemented on CPU");
}
}
absl::Status IrEmitter::HandleCall(HloInstruction* call) {
HloComputation* computation = call->to_apply();
llvm::Function* call_ir_function = FindOrDie(
emitted_functions_, ComputationToEmit{computation, allow_reassociation_});
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(call));
auto backend_config_or =
computation->root_instruction()->backend_config<BackendConfig>();
if (backend_config_or.ok() &&
!backend_config_or->outer_dimension_partitions().empty()) {
std::vector<llvm::Value*> call_args = GetArrayFunctionCallArguments(
{}, b(), computation->name(),
emitted_value_[call],
GetExecutableRunOptionsArgument(),
GetBufferTableArgument(),
GetStatusArgument(),
GetProfileCountersArgument());
HloInstruction* root = computation->root_instruction();
TF_RETURN_IF_ERROR(EmitCallToParallelForkJoin(
call_args, root->shape(),
backend_config_or->outer_dimension_partitions(), b(), call_ir_function,
computation->name()));
if (ComputationTransitivelyContainsCustomCall(computation)) {
EmitEarlyReturnIfErrorStatus();
}
} else {
EmitGlobalCall(*computation, computation->name());
}
return absl::OkStatus();
}
absl::Status IrEmitter::EmitSliceToDynamic(
const HloInstruction* hlo, absl::Span<const llvm_ir::IrArray> source_arrays,
const llvm_ir::IrArray& target_array) {
std::vector<llvm::Value*> dynamic_dims;
int32_t raw_data_size =
ShapeUtil::ByteSizeOf(ShapeUtil::MakeStaticShape(hlo->shape()));
llvm::Value* dest_buffer = target_array.GetBasePointer();
for (int64_t i = 1; i < hlo->operand_count(); ++i) {
const int64_t dim_index = i - 1;
llvm::Value* source_buffer = source_arrays[i].GetBasePointer();
llvm::LoadInst* dyn_dim_size = Load(IrShapeType(hlo->operand(i)->shape()),
source_buffer, "dyn_dim_size");
llvm::Value* metadata = b()->CreateConstInBoundsGEP1_32(
b()->getInt8Ty(), dest_buffer,
raw_data_size + dim_index * sizeof(int32_t));
b()->CreateStore(dyn_dim_size, metadata);
dynamic_dims.push_back(b()->CreateIntCast(dyn_dim_size, b()->getInt64Ty(),
true,
"i64_dyn_dim_size"));
}
auto loop_body_emitter =
[&](const llvm_ir::IrArray::Index& array_index) -> absl::Status {
llvm::Value* source_element =
source_arrays[0].EmitReadArrayElement(array_index, b());
llvm::Value* linear_index = array_index.Linearize(dynamic_dims, b());
llvm_ir::IrArray::Index dest_index(linear_index, target_array.GetShape(),
b());
target_array.EmitWriteArrayElement(dest_index, source_element, b());
return absl::OkStatus();
};
return llvm_ir::LoopEmitter(loop_body_emitter, target_array.GetShape(),
dynamic_dims, b())
.EmitLoop(IrName(hlo));
}
absl::Status IrEmitter::HandleSliceToDynamic(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo));
llvm_ir::IrArray target_array = GetIrArrayFor(hlo);
std::vector<llvm_ir::IrArray> source_arrays;
source_arrays.reserve(hlo->operand_count());
for (auto operand : hlo->operands()) {
source_arrays.push_back(GetIrArrayFor(operand));
}
return EmitSliceToDynamic(hlo, source_arrays, target_array);
}
absl::Status IrEmitter::HandlePadToStatic(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo));
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice data_slice,
assignment_.GetUniqueSlice(hlo, {0}));
std::vector<llvm::Value*> dynamic_dims;
std::vector<llvm::Value*> tuple_operand_ptrs;
const Shape& data_shape = ShapeUtil::GetSubshape(hlo->shape(), {0});
const Shape& input_shape = hlo->operand(0)->shape();
llvm::Value* data_address = EmitBufferPointer(data_slice, data_shape);
llvm::Type* data_type = IrShapeType(data_shape);
llvm_ir::IrArray data_array(data_address, data_type, data_shape);
llvm::Value* source_buffer = GetEmittedValueFor(hlo->operand(0));
int64_t raw_data_size =
ShapeUtil::ByteSizeOf(ShapeUtil::MakeStaticShape(input_shape));
tuple_operand_ptrs.push_back(data_array.GetBasePointer());
for (int i = 1; i < hlo->shape().tuple_shapes_size(); ++i) {
const Shape& dim_shape = ShapeUtil::GetSubshape(hlo->shape(), {i});
TF_RET_CHECK(Shape::Equal()(dim_shape, ShapeUtil::MakeScalarShape(S32)));
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice dim_size_slice,
assignment_.GetUniqueSlice(hlo, {i}));
llvm::Value* dest_dim_size_address =
EmitBufferPointer(dim_size_slice, data_shape);
const int64_t dim_index = i - 1;
llvm::Value* metadata = b()->CreateConstInBoundsGEP1_32(
b()->getInt8Ty(), source_buffer,
raw_data_size + dim_index * sizeof(int32_t));
llvm::Value* dyn_dim_size =
b()->CreateLoad(b()->getInt32Ty(), metadata, "dyn_dim_size");
b()->CreateStore(dyn_dim_size, dest_dim_size_address);
dynamic_dims.push_back(b()->CreateIntCast(dyn_dim_size, b()->getInt64Ty(),
true,
"i64_dyn_dim_size"));
tuple_operand_ptrs.push_back(dest_dim_size_address);
}
auto loop_body_emitter =
[&](const llvm_ir::IrArray::Index& array_index) -> absl::Status {
llvm::Value* linear_index = array_index.Linearize(dynamic_dims, b());
llvm_ir::IrArray::Index source_index(linear_index, input_shape, b());
llvm::Value* source_element =
GetIrArrayFor(hlo->operand(0)).EmitReadArrayElement(source_index, b());
data_array.EmitWriteArrayElement(array_index, source_element, b());
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(
llvm_ir::LoopEmitter(loop_body_emitter, input_shape, dynamic_dims, b())
.EmitLoop(IrName(hlo)));
llvm_ir::EmitTuple(GetIrArrayFor(hlo), tuple_operand_ptrs, b());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleTopK(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo));
const HloInstruction* input = hlo->operand(0);
const int64_t k = hlo->shape().tuple_shapes(0).dimensions().back();
const bool has_batch = hlo->shape().tuple_shapes(0).dimensions_size() == 2;
TF_RET_CHECK(input->shape().element_type() == F32) << hlo->ToString();
TF_RET_CHECK(LayoutUtil::IsMonotonicWithDim0Major(
hlo->shape().tuple_shapes(0).layout()))
<< hlo->ToString();
TF_RET_CHECK(LayoutUtil::IsMonotonicWithDim0Major(
hlo->shape().tuple_shapes(1).layout()))
<< hlo->ToString();
TF_RET_CHECK(
LayoutUtil::IsMonotonicWithDim0Major(hlo->operand(0)->shape().layout()))
<< hlo->ToString();
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice values_slice,
assignment_.GetUniqueSlice(hlo->operand(0), {}));
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_values_slice,
assignment_.GetUniqueSlice(hlo, {0}));
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_indices_slice,
assignment_.GetUniqueSlice(hlo, {1}));
llvm::Value* values_ptr =
EmitBufferPointer(values_slice, hlo->operand(0)->shape());
llvm::Value* out_values_ptr =
EmitBufferPointer(out_values_slice, hlo->shape().tuple_shapes(0));
llvm::Value* out_indices_ptr =
EmitBufferPointer(out_indices_slice, hlo->shape().tuple_shapes(1));
EmitCallToFunc(
runtime::kTopKF32SymbolName,
{b()->getInt64(has_batch ? input->shape().dimensions(0) : 1),
b()->getInt64(input->shape().dimensions().back()), b()->getInt64(k),
values_ptr, out_values_ptr, out_indices_ptr},
b()->getVoidTy());
llvm_ir::EmitTuple(GetIrArrayFor(hlo), {out_values_ptr, out_indices_ptr},
b());
return absl::OkStatus();
}
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
std::vector<StackAlloca> IrEmitter::EmitOneDnnOperandsAlloca(
HloInstruction* custom_call, llvm::Value*& args_val, int& arg_indx) {
std::vector<StackAlloca> operands_stack_alloca;
const int num_operands = custom_call->operand_count();
operands_stack_alloca.reserve(num_operands);
for (int i = 0; i < num_operands; ++i) {
llvm_ir::IrArray ir_array(GetIrArrayFor(custom_call->operand(i)));
StackAlloca stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), ir_array);
args_val = b()->CreateInsertValue(args_val, stack_alloca.value, arg_indx++);
operands_stack_alloca.push_back(std::move(stack_alloca));
}
return operands_stack_alloca;
}
absl::Status IrEmitter::HandleOneDnnMatMulCalls(
HloInstruction* custom_call, std::string runtime_symbol_name) {
const int nargs_offset = 3;
const int num_operands = custom_call->operand_count();
const int nargs = nargs_offset + num_operands;
int arg_indx = 0;
llvm::Type* i64_type = b()->getInt64Ty();
llvm::Type* ptr_type = b()->getPtrTy();
llvm::ArrayType* ptr_array_type = llvm::ArrayType::get(ptr_type, nargs);
llvm::Value* args_val = llvm::UndefValue::get(ptr_array_type);
llvm::Value* nargs_val = b()->getInt64(nargs);
llvm::Value* nargs_ptr =
llvm_ir::EmitAllocaAtFunctionEntry(i64_type, "nargs", b());
b()->CreateLifetimeStart(nargs_ptr, b()->getInt64(-1));
b()->CreateStore(nargs_val, nargs_ptr);
args_val = b()->CreateInsertValue(args_val, nargs_ptr, arg_indx++);
llvm::Value* run_opts_val = GetExecutableRunOptionsArgument();
args_val = b()->CreateInsertValue(args_val, run_opts_val, arg_indx++);
auto typed_custom_call = Cast<HloCustomCallInstruction>(custom_call);
auto backend_config = typed_custom_call->backend_config<BackendConfig>();
OneDnnMatMulConfig matmul_config;
matmul_config.CopyFrom(backend_config->onednn_matmul_config());
std::string str_config;
matmul_config.SerializeToString(&str_config);
llvm::Value* matmul_config_val =
b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(str_config));
args_val = b()->CreateInsertValue(args_val, matmul_config_val, arg_indx++);
auto operands_stack_alloca =
EmitOneDnnOperandsAlloca(custom_call, args_val, arg_indx);
TF_RET_CHECK(nargs == arg_indx)
<< "Number of arguments don't equal the last argument index.";
llvm::Value* args_ptr =
llvm_ir::EmitAllocaAtFunctionEntry(ptr_array_type, "matmul.args", b());
b()->CreateLifetimeStart(args_ptr, b()->getInt64(-1));
b()->CreateStore(args_val, args_ptr);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call));
StackAlloca result_stack_alloca;
StackAlloca scratch_stack_alloca;
std::vector<llvm::Value*> fn_call_args;
fn_call_args.reserve(3);
const bool use_scratchpad = custom_call->shape().IsTuple();
if (use_scratchpad) {
llvm::Value* result_slice_ptr;
llvm::Value* scratch_slice_ptr;
llvm_ir::IrArray result_array;
llvm_ir::IrArray scratch_array;
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice result_slice,
assignment_.GetUniqueSlice(custom_call, {0}));
const Shape& result_shape = custom_call->shape().tuple_shapes(0);
result_slice_ptr = EmitBufferPointer(result_slice, result_shape);
llvm::Type* ir_type = IrShapeType(result_shape);
result_array = llvm_ir::IrArray(result_slice_ptr, ir_type, result_shape);
result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array);
fn_call_args.push_back(result_stack_alloca.value);
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice scratch_slice,
assignment_.GetUniqueSlice(custom_call, {1}));
const Shape& scratch_shape = custom_call->shape().tuple_shapes(1);
scratch_slice_ptr = EmitBufferPointer(scratch_slice, scratch_shape);
llvm::Type* scratch_type = IrShapeType(scratch_shape);
scratch_array =
llvm_ir::IrArray(scratch_slice_ptr, scratch_type, scratch_shape);
scratch_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), scratch_array);
fn_call_args.push_back(scratch_stack_alloca.value);
llvm_ir::EmitTuple(GetIrArrayFor(custom_call),
{result_slice_ptr, scratch_slice_ptr}, b());
} else {
llvm_ir::IrArray result_array;
result_array = GetIrArrayFor(custom_call);
result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array);
fn_call_args.push_back(result_stack_alloca.value);
fn_call_args.push_back(llvm::ConstantPointerNull::get(b()->getPtrTy()));
}
fn_call_args.push_back(args_ptr);
EmitCallToFunc(std::move(runtime_symbol_name), fn_call_args,
b()->getVoidTy());
b()->CreateLifetimeEnd(nargs_ptr, b()->getInt64(-1));
b()->CreateLifetimeEnd(args_ptr, b()->getInt64(-1));
for (auto& alloca : operands_stack_alloca) {
alloca.EmitLifetimeEnd();
}
result_stack_alloca.EmitLifetimeEnd();
if (use_scratchpad) {
scratch_stack_alloca.EmitLifetimeEnd();
}
return absl::OkStatus();
}
absl::Status IrEmitter::HandleOneDnnConvolution(HloInstruction* custom_call) {
const int nargs_offset = 3;
const int num_operands = custom_call->operand_count();
const int nargs = nargs_offset + num_operands;
int arg_indx = 0;
llvm::Type* i64_type = b()->getInt64Ty();
llvm::Type* ptr_type = b()->getPtrTy();
llvm::ArrayType* ptr_array_type = llvm::ArrayType::get(ptr_type, nargs);
llvm::Value* args_val = llvm::UndefValue::get(ptr_array_type);
llvm::Value* nargs_val = b()->getInt64(nargs);
llvm::Value* nargs_ptr =
llvm_ir::EmitAllocaAtFunctionEntry(i64_type, "nargs", b());
b()->CreateLifetimeStart(nargs_ptr, b()->getInt64(-1));
b()->CreateStore(nargs_val, nargs_ptr);
args_val = b()->CreateInsertValue(args_val, nargs_ptr, arg_indx++);
llvm::Value* run_opts_val = GetExecutableRunOptionsArgument();
args_val = b()->CreateInsertValue(args_val, run_opts_val, arg_indx++);
auto typed_custom_call = Cast<HloCustomCallInstruction>(custom_call);
auto backend_config = typed_custom_call->backend_config<BackendConfig>();
OneDnnConvolutionConfig conv_config;
conv_config.CopyFrom(backend_config->onednn_conv_config());
std::string str_config;
conv_config.SerializeToString(&str_config);
llvm::Value* conv_config_val =
b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(str_config));
args_val = b()->CreateInsertValue(args_val, conv_config_val, arg_indx++);
auto operands_stack_alloca =
EmitOneDnnOperandsAlloca(custom_call, args_val, arg_indx);
TF_RET_CHECK(nargs == arg_indx)
<< "Number of arguments don't equal the last argument index.";
llvm::Value* args_ptr = llvm_ir::EmitAllocaAtFunctionEntry(
ptr_array_type, "convolution.args", b());
b()->CreateLifetimeStart(args_ptr, b()->getInt64(-1));
b()->CreateStore(args_val, args_ptr);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call));
llvm_ir::IrArray result_array = GetIrArrayFor(custom_call);
auto result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array);
EmitCallToFunc(runtime::kOneDnnConvolutionSymbolName,
{result_stack_alloca.value, args_ptr}, b()->getVoidTy());
b()->CreateLifetimeEnd(nargs_ptr, b()->getInt64(-1));
for (int i = 0; i < num_operands; ++i) {
operands_stack_alloca[i].EmitLifetimeEnd();
}
b()->CreateLifetimeEnd(args_ptr, b()->getInt64(-1));
result_stack_alloca.EmitLifetimeEnd();
return absl::OkStatus();
}
absl::Status IrEmitter::HandleOneDnnLayerNorm(HloInstruction* custom_call) {
const int nargs_offset = 3;
const int num_operands = custom_call->operand_count();
const int nargs = nargs_offset + num_operands;
int arg_indx = 0;
llvm::Type* i64_type = b()->getInt64Ty();
llvm::Type* ptr_type = b()->getPtrTy();
llvm::ArrayType* ptr_array_type = llvm::ArrayType::get(ptr_type, nargs);
llvm::Value* args_val = llvm::UndefValue::get(ptr_array_type);
llvm::Value* nargs_val = b()->getInt64(nargs);
llvm::Value* nargs_ptr =
llvm_ir::EmitAllocaAtFunctionEntry(i64_type, "nargs", b());
b()->CreateLifetimeStart(nargs_ptr, b()->getInt64(-1));
b()->CreateStore(nargs_val, nargs_ptr);
args_val = b()->CreateInsertValue(args_val, nargs_ptr, arg_indx++);
llvm::Value* run_opts_val = GetExecutableRunOptionsArgument();
args_val = b()->CreateInsertValue(args_val, run_opts_val, arg_indx++);
auto typed_custom_call = Cast<HloCustomCallInstruction>(custom_call);
auto backend_config = typed_custom_call->backend_config<BackendConfig>();
OneDnnNormConfig ln_config;
ln_config.CopyFrom(backend_config->onednn_layer_norm_config());
std::string str_config;
ln_config.SerializeToString(&str_config);
llvm::Value* ln_config_val =
b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(str_config));
args_val = b()->CreateInsertValue(args_val, ln_config_val, arg_indx++);
auto operands_stack_alloca =
EmitOneDnnOperandsAlloca(custom_call, args_val, arg_indx);
TF_RET_CHECK(nargs == arg_indx)
<< "Number of arguments don't equal the last argument index.";
llvm::Value* args_ptr =
llvm_ir::EmitAllocaAtFunctionEntry(ptr_array_type, "layernorm.args", b());
b()->CreateLifetimeStart(args_ptr, b()->getInt64(-1));
b()->CreateStore(args_val, args_ptr);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call));
llvm_ir::IrArray result_array = GetIrArrayFor(custom_call);
auto result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array);
EmitCallToFunc(runtime::kOneDnnLayerNormSymbolName,
{result_stack_alloca.value, args_ptr}, b()->getVoidTy());
b()->CreateLifetimeEnd(nargs_ptr, b()->getInt64(-1));
for (int i = 0; i < num_operands; ++i) {
operands_stack_alloca[i].EmitLifetimeEnd();
}
b()->CreateLifetimeEnd(args_ptr, b()->getInt64(-1));
result_stack_alloca.EmitLifetimeEnd();
return absl::OkStatus();
}
absl::Status IrEmitter::HandleOneDnnSoftmax(HloInstruction* custom_call) {
auto typed_custom_call = Cast<HloCustomCallInstruction>(custom_call);
auto backend_config = typed_custom_call->backend_config<BackendConfig>();
OneDnnSoftmaxConfig softmax_config;
softmax_config.CopyFrom(backend_config->onednn_softmax_config());
std::string str_config;
softmax_config.SerializeToString(&str_config);
llvm::Value* softmax_config_val =
b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(str_config));
auto input = custom_call->operand(0);
llvm_ir::IrArray input_array(GetIrArrayFor(input));
auto input_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), input_array);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call));
llvm_ir::IrArray result_array = GetIrArrayFor(custom_call);
auto result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array);
EmitCallToFunc(runtime::kOneDnnSoftmaxSymbolName,
{GetExecutableRunOptionsArgument(), input_stack_alloca.value,
result_stack_alloca.value, softmax_config_val},
b()->getVoidTy());
input_stack_alloca.EmitLifetimeEnd();
result_stack_alloca.EmitLifetimeEnd();
return absl::OkStatus();
}
#endif
absl::Status IrEmitter::HandleCustomCall(HloInstruction* custom_call) {
if (custom_call->custom_call_target() == "PadToStatic") {
return HandlePadToStatic(custom_call);
}
if (custom_call->custom_call_target() == "SliceToDynamic") {
return HandleSliceToDynamic(custom_call);
}
if (custom_call->custom_call_target() == "TopK") {
return HandleTopK(custom_call);
}
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
if (custom_call->custom_call_target() == "__onednn$matmul") {
return HandleOneDnnMatMulCalls(custom_call,
runtime::kOneDnnMatMulSymbolName);
}
if (custom_call->custom_call_target() == "__onednn$softmax") {
return HandleOneDnnSoftmax(custom_call);
}
if (custom_call->custom_call_target() == "__onednn$layernorm") {
return HandleOneDnnLayerNorm(custom_call);
}
if (custom_call->custom_call_target() == "__onednn$convolution") {
return HandleOneDnnConvolution(custom_call);
}
if (custom_call->custom_call_target() == "__onednn$matmul_reorder") {
return HandleOneDnnMatMulCalls(custom_call,
runtime::kOneDnnMatMulReorderSymbolName);
}
#endif
absl::Span<HloInstruction* const> operands(custom_call->operands());
auto typed_custom_call = Cast<HloCustomCallInstruction>(custom_call);
auto is_typed_ffi = typed_custom_call->api_version() ==
CustomCallApiVersion::API_VERSION_TYPED_FFI;
std::vector<llvm::Value*> operand_values;
operand_values.reserve(operands.size());
for (int64_t i = 0; i < operands.size(); ++i) {
HloInstruction* operand = operands[i];
if (is_typed_ffi) {
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
operand->shape(), [&](const Shape& shape, const ShapeIndex& index) {
if (!shape.IsArray()) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice slice,
assignment_.GetUniqueSlice(operand, index));
operand_values.push_back(EmitBufferPointer(slice, shape));
return absl::OkStatus();
}));
} else {
operand_values.push_back(GetEmittedValueFor(operand));
}
}
llvm::AllocaInst* operands_alloca =
llvm_ir::EmitAllocaAtFunctionEntryWithCount(
b()->getPtrTy(), b()->getInt32(operand_values.size()),
"cc_operands_alloca", b());
if (emit_code_for_msan_) {
const llvm::DataLayout& dl = module_->getDataLayout();
llvm::Type* intptr_type = b()->getIntPtrTy(dl);
EmitCallToFunc("__msan_unpoison",
{operands_alloca,
llvm::ConstantInt::get(
intptr_type, *operands_alloca->getAllocationSize(dl))},
b()->getVoidTy());
}
for (int64_t i = 0; i < operand_values.size(); ++i) {
llvm::Value* slot_in_operands_alloca =
InBoundsGEP(operands_alloca->getAllocatedType(), operands_alloca,
{b()->getInt64(i)});
Store(operand_values[i], slot_in_operands_alloca);
}
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call));
std::vector<llvm::Value*> tuple_ptrs;
if (custom_call->shape().IsTuple()) {
for (int i = 0; i < ShapeUtil::TupleElementCount(custom_call->shape());
++i) {
const Shape& elem_shape =
ShapeUtil::GetTupleElementShape(custom_call->shape(), i);
if (!is_typed_ffi) {
TF_RET_CHECK(!elem_shape.IsTuple()) << "Nested tuples not implemented";
}
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice slice,
assignment_.GetUniqueSlice(custom_call, {i}));
tuple_ptrs.push_back(EmitBufferPointer(slice, elem_shape));
}
llvm_ir::EmitTuple(GetIrArrayFor(custom_call), tuple_ptrs, b());
}
auto* output_address = GetEmittedValueFor(custom_call);
switch (typed_custom_call->api_version()) {
case CustomCallApiVersion::API_VERSION_ORIGINAL:
EmitCallToFunc(custom_call->custom_call_target(),
{output_address, operands_alloca}, b()->getVoidTy());
break;
case CustomCallApiVersion::API_VERSION_STATUS_RETURNING:
EmitCallToFunc(custom_call->custom_call_target(),
{output_address, operands_alloca, GetStatusArgument()},
b()->getVoidTy());
EmitEarlyReturnIfErrorStatus();
break;
case CustomCallApiVersion::API_VERSION_STATUS_RETURNING_UNIFIED: {
absl::string_view opaque = typed_custom_call->opaque();
EmitCallToFunc(custom_call->custom_call_target(),
{output_address, operands_alloca,
b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(opaque)),
b()->getInt64(opaque.size()), GetStatusArgument()},
b()->getVoidTy());
EmitEarlyReturnIfErrorStatus();
break;
}
case CustomCallApiVersion::API_VERSION_TYPED_FFI: {
std::vector<llvm::Value*> buffer_ptrs;
if (custom_call->shape().IsTuple()) {
buffer_ptrs.reserve(ShapeUtil::TupleElementCount(custom_call->shape()));
}
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
custom_call->shape(),
[&](const Shape& shape, const ShapeIndex& index) {
if (!shape.IsArray()) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice slice,
assignment_.GetUniqueSlice(custom_call, index));
buffer_ptrs.push_back(EmitBufferPointer(slice, shape));
return absl::OkStatus();
}));
llvm::AllocaInst* results_alloca =
llvm_ir::EmitAllocaAtFunctionEntryWithCount(
b()->getPtrTy(), b()->getInt32(buffer_ptrs.size()),
"ffi_results_alloca", b());
if (emit_code_for_msan_) {
const llvm::DataLayout& dl = module_->getDataLayout();
llvm::Type* intptr_type = b()->getIntPtrTy(dl);
EmitCallToFunc(
"__msan_unpoison",
{results_alloca,
llvm::ConstantInt::get(intptr_type,
*results_alloca->getAllocationSize(dl))},
b()->getVoidTy());
}
for (int i = 0; i < buffer_ptrs.size(); ++i) {
llvm::Value* tuple_slot_in_results_alloca =
InBoundsGEP(results_alloca->getAllocatedType(), results_alloca,
{b()->getInt64(i)});
Store(buffer_ptrs[i], tuple_slot_in_results_alloca);
}
EmitCallToFfi(typed_custom_call, results_alloca, operands_alloca);
EmitEarlyReturnIfErrorStatus();
break;
}
default:
return Internal(
"Unknown custom-call API version enum value: %d (%s)",
typed_custom_call->api_version(),
CustomCallApiVersion_Name(typed_custom_call->api_version()));
}
return absl::OkStatus();
}
absl::Status IrEmitter::HandleWhile(HloInstruction* xla_while) {
HloComputation* condition = xla_while->while_condition();
TF_RET_CHECK(ShapeUtil::IsScalar(condition->root_instruction()->shape()) &&
condition->root_instruction()->shape().element_type() == PRED)
<< "While condition computation must return bool; got: "
<< ShapeUtil::HumanString(condition->root_instruction()->shape());
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
xla_while->shape(),
[this, &xla_while](const Shape& ,
const ShapeIndex& index) -> absl::Status {
auto check = [this](const HloInstruction* a, const HloInstruction* b,
const ShapeIndex& index) -> absl::Status {
const BufferAllocation::Slice slice_a =
assignment_.GetUniqueSlice(a, index).value();
const BufferAllocation::Slice slice_b =
assignment_.GetUniqueSlice(b, index).value();
if (slice_a != slice_b) {
return Internal(
"instruction %s %s does not share slice with "
"instruction %s %s",
a->ToString(), slice_a.ToString(), b->ToString(),
slice_b.ToString());
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(check(xla_while, xla_while->operand(0), index));
TF_RETURN_IF_ERROR(check(
xla_while, xla_while->while_condition()->parameter_instruction(0),
index));
TF_RETURN_IF_ERROR(
check(xla_while, xla_while->while_body()->parameter_instruction(0),
index));
TF_RETURN_IF_ERROR(check(
xla_while, xla_while->while_body()->root_instruction(), index));
return absl::OkStatus();
}));
const HloInstruction* init = xla_while->operand(0);
emitted_value_[xla_while] = GetEmittedValueFor(init);
llvm::BasicBlock* header_bb = llvm::BasicBlock::Create(
module_->getContext(), IrName(xla_while, "header"),
compute_function()->function());
Br(header_bb);
b()->SetInsertPoint(header_bb);
EmitGlobalCall(*xla_while->while_condition(), IrName(xla_while, "cond"));
llvm::Value* while_predicate = ICmpNE(
Load(IrShapeType(
xla_while->while_condition()->root_instruction()->shape()),
GetBufferForGlobalCallReturnValue(*xla_while->while_condition())),
llvm::ConstantInt::get(llvm_ir::PrimitiveTypeToIrType(PRED, module_), 0));
llvm::BasicBlock* body_bb =
llvm::BasicBlock::Create(module_->getContext(), IrName(xla_while, "body"),
compute_function()->function());
llvm::BasicBlock* exit_bb = llvm::BasicBlock::Create(
module_->getContext(), IrName(xla_while, "exit"));
CondBr(while_predicate, body_bb, exit_bb);
b()->SetInsertPoint(body_bb);
EmitGlobalCall(*xla_while->while_body(), IrName(xla_while, "body"));
Br(header_bb);
llvm::Function* llvm_fn = compute_function()->function();
llvm_fn->insert(llvm_fn->end(), exit_bb);
b()->SetInsertPoint(exit_bb);
return absl::OkStatus();
}
absl::Status IrEmitter::EmitFastConcatenate(
const HloInstruction* instr,
absl::Span<const llvm_ir::IrArray> source_arrays,
const llvm_ir::IrArray& target_array) {
return ::xla::cpu::EmitFastConcatenate(instr, source_arrays, target_array,
module_, *b());
}
absl::Status EmitFastConcatenate(
const HloInstruction* instr,
absl::Span<const llvm_ir::IrArray> source_arrays,
const llvm_ir::IrArray& target_array, llvm::Module* module,
llvm::IRBuilder<>& b) {
auto* concatenate = Cast<HloConcatenateInstruction>(instr);
const Shape& output_shape = concatenate->shape();
int64_t concat_dim = concatenate->concatenate_dimension();
const Layout& output_layout = output_shape.layout();
auto output_min2maj = LayoutUtil::MinorToMajor(output_layout);
auto concat_dim_layout_itr = absl::c_find(output_min2maj, concat_dim);
std::vector<int64_t> inner_dims(output_min2maj.begin(),
concat_dim_layout_itr);
std::vector<int64_t> outer_dims(std::next(concat_dim_layout_itr),
output_min2maj.end());
llvm_ir::ForLoopNest loops(IrName(concatenate), &b);
std::vector<llvm::Value*> target_multi_index =
loops.AddLoopsForShapeOnDimensions(output_shape, outer_dims, "concat");
absl::c_replace(target_multi_index, static_cast<llvm::Value*>(nullptr),
static_cast<llvm::Value*>(b.getInt64(0)));
llvm_ir::IrArray::Index target_index(target_multi_index, output_shape,
b.getInt64Ty());
if (!outer_dims.empty()) {
SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &b);
}
PrimitiveType primitive_type = output_shape.element_type();
unsigned primitive_type_size =
ShapeUtil::ByteSizeOfPrimitiveType(primitive_type);
llvm::Value* target_region_begin =
target_array.EmitArrayElementAddress(target_index, &b, "target_region");
int64_t byte_offset_into_target_region = 0;
int64_t inner_dims_product = absl::c_accumulate(
inner_dims, int64_t{1}, [&](int64_t product, int64_t inner_dim) {
return product * output_shape.dimensions(inner_dim);
});
for (int64_t i = 0; i < source_arrays.size(); ++i) {
const Shape& input_shape = concatenate->operand(i)->shape();
const llvm_ir::IrArray& source_array = source_arrays[i];
llvm_ir::IrArray::Index source_index(target_multi_index, input_shape,
b.getInt64Ty());
llvm::Value* copy_source_address =
source_array.EmitArrayElementAddress(source_index, &b, "src_addr");
llvm::Value* copy_target_address =
b.CreateGEP(b.getInt8Ty(), target_region_begin,
b.getInt64(byte_offset_into_target_region));
::xla::cpu::EmitTransferElements(
copy_target_address, copy_source_address,
inner_dims_product * input_shape.dimensions(concat_dim), primitive_type,
target_array, source_array, module, b);
byte_offset_into_target_region += inner_dims_product *
input_shape.dimensions(concat_dim) *
primitive_type_size;
}
if (!outer_dims.empty()) {
SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &b);
}
return absl::OkStatus();
}
llvm::Value* IrEmitter::EmitPrintf(absl::string_view fmt,
absl::Span<llvm::Value* const> arguments) {
std::vector<llvm::Value*> call_args;
call_args.push_back(b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(fmt)));
absl::c_copy(arguments, std::back_inserter(call_args));
return b()->CreateCall(
b()->GetInsertBlock()->getParent()->getParent()->getOrInsertFunction(
"printf",
llvm::FunctionType::get(b()->getInt32Ty(), {b()->getPtrTy()},
true)),
call_args);
}
llvm::Value* IrEmitter::EmitPrintfToStderr(
absl::string_view fmt, absl::Span<llvm::Value* const> arguments) {
std::vector<llvm::Value*> call_args;
call_args.push_back(b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(fmt)));
absl::c_copy(arguments, std::back_inserter(call_args));
return b()->CreateCall(
b()->GetInsertBlock()->getParent()->getParent()->getOrInsertFunction(
runtime::kPrintfToStderrSymbolName,
llvm::FunctionType::get(b()->getInt32Ty(), {b()->getPtrTy()},
true)),
call_args);
}
llvm::Value* IrEmitter::EmitCallToFunc(
std::string func_name, const std::vector<llvm::Value*>& arguments,
llvm::Type* return_type, bool does_not_throw, bool only_accesses_arg_memory,
bool only_accesses_inaccessible_mem_or_arg_mem) {
std::vector<llvm::Type*> types;
types.reserve(arguments.size());
absl::c_transform(arguments, std::back_inserter(types),
[&](llvm::Value* val) { return val->getType(); });
llvm::FunctionType* func_type =
llvm::FunctionType::get(return_type, types, false);
auto func = llvm::dyn_cast<llvm::Function>(
module_->getOrInsertFunction(func_name, func_type).getCallee());
func->setCallingConv(llvm::CallingConv::C);
if (does_not_throw) {
func->setDoesNotThrow();
}
if (only_accesses_arg_memory) {
func->setOnlyAccessesArgMemory();
}
if (only_accesses_inaccessible_mem_or_arg_mem) {
func->setOnlyAccessesInaccessibleMemOrArgMem();
}
return b()->CreateCall(func, arguments);
}
template <typename T>
static const Shape& GetShape(T&& arg) {
if constexpr (std::is_convertible_v<absl::remove_cvref_t<decltype(arg)>,
Shape>) {
return arg;
} else {
return arg->shape();
}
};
struct EncodedInfo {
llvm::AllocaInst* alloca;
int64_t size;
};
template <typename Args>
static EncodedInfo StoreEncodedTypes(std::string_view alloca_name,
const Args& args, llvm::IRBuilder<>& ir) {
int64_t total_elements = 0;
for (int64_t i = 0; i < args.size(); ++i) {
total_elements += ShapeUtil::GetLeafCount(GetShape(args[i]));
}
llvm::AllocaInst* types_alloca = llvm_ir::EmitAllocaAtFunctionEntryWithCount(
ir.getInt32Ty(), ir.getInt64(total_elements), alloca_name, &ir);
int64_t element_id = 0;
auto store_type = [&](const Shape& shape, const ShapeIndex& index) {
if (shape.IsTuple()) {
return;
}
llvm::Value* slot_in_types_alloca = ir.CreateConstInBoundsGEP1_32(
ir.getInt32Ty(), types_alloca, element_id++);
ir.CreateStore(ir.getInt32(shape.element_type()), slot_in_types_alloca);
};
for (int64_t i = 0; i < args.size(); ++i) {
ShapeUtil::ForEachSubshape(GetShape(args[i]), store_type);
}
CHECK_EQ(element_id, total_elements);
return {types_alloca, total_elements};
};
template <typename Args>
static EncodedInfo StoreEncodedShapes(std::string_view alloca_name,
const Args& args, llvm::IRBuilder<>& ir) {
int64_t total_dims = 0;
int64_t total_dim_counts = 0;
for (int64_t i = 0; i < args.size(); ++i) {
ShapeUtil::ForEachSubshape(
GetShape(args[i]), [&](const Shape& shape, const ShapeIndex& index) {
if (!shape.IsArray()) {
return;
}
total_dims += shape.dimensions().size();
++total_dim_counts;
});
}
int64_t shapes_encoding_size = total_dim_counts
+ total_dims;
llvm::AllocaInst* shapes_alloca = llvm_ir::EmitAllocaAtFunctionEntryWithCount(
ir.getInt64Ty(), ir.getInt64(shapes_encoding_size), alloca_name, &ir);
int64_t slot_id = 0;
auto store_shape = [&](const Shape& shape, const ShapeIndex& index) {
if (!shape.IsArray()) {
return;
}
llvm::Value* alloca_slot = ir.CreateConstInBoundsGEP1_64(
ir.getInt64Ty(), shapes_alloca, slot_id++);
ir.CreateStore(ir.getInt64(shape.dimensions().size()), alloca_slot);
for (int64_t dim : shape.dimensions()) {
alloca_slot = ir.CreateConstInBoundsGEP1_64(ir.getInt64Ty(),
shapes_alloca, slot_id++);
ir.CreateStore(ir.getInt64(dim), alloca_slot);
}
};
for (int64_t i = 0; i < args.size(); ++i) {
ShapeUtil::ForEachSubshape(GetShape(args[i]), store_shape);
}
CHECK_EQ(slot_id, shapes_encoding_size);
return {shapes_alloca, shapes_encoding_size};
};
llvm::Value* IrEmitter::EmitCallToFfi(HloCustomCallInstruction* custom_call,
llvm::AllocaInst* results_alloca,
llvm::AllocaInst* operands_alloca) {
const auto& operands = absl::MakeSpan(custom_call->operands());
const auto& shape = custom_call->shape();
const auto& result_shapes =
shape.IsTuple() ? shape.tuple_shapes() : std::vector<Shape>({shape});
EncodedInfo operand_types_encoded =
StoreEncodedTypes("operands_types", operands, *b());
EncodedInfo operand_shapes_encoded =
StoreEncodedShapes("operands_shapes", operands, *b());
EncodedInfo result_types_encoded =
StoreEncodedTypes("results_types", result_shapes, *b());
EncodedInfo result_shapes_encoded =
StoreEncodedShapes("results_shapes", result_shapes, *b());
const absl::string_view target = custom_call->custom_call_target();
const absl::string_view opaque = custom_call->opaque();
const auto target_ref = llvm_ir::AsStringRef(target);
const auto opaque_ref = llvm_ir::AsStringRef(opaque);
std::vector<llvm::Value*> arguments = {
GetExecutableRunOptionsArgument(),
b()->CreateGlobalStringPtr(target_ref),
b()->getInt64(target.size()),
results_alloca,
operands_alloca,
b()->CreateGlobalStringPtr(opaque_ref),
b()->getInt64(opaque.size()),
GetStatusArgument(),
operand_types_encoded.alloca,
b()->getInt64(operand_types_encoded.size),
operand_shapes_encoded.alloca,
result_types_encoded.alloca,
b()->getInt64(result_types_encoded.size),
result_shapes_encoded.alloca,
};
return EmitCallToFunc(runtime::kHandleFfiCallSymbolName, arguments,
b()->getVoidTy(),
false,
true);
}
void IrEmitter::EmitTransferElements(llvm::Value* target, llvm::Value* source,
int64_t element_count,
PrimitiveType primitive_type,
const llvm_ir::IrArray& target_array,
const llvm_ir::IrArray& source_array) {
::xla::cpu::EmitTransferElements(target, source, element_count,
primitive_type, target_array, source_array,
module_, *b());
}
void EmitTransferElements(llvm::Value* target, llvm::Value* source,
int64_t element_count, PrimitiveType primitive_type,
const llvm_ir::IrArray& target_array,
const llvm_ir::IrArray& source_array,
llvm::Module* module, llvm::IRBuilder<>& b) {
unsigned primitive_type_size =
ShapeUtil::ByteSizeOfPrimitiveType(primitive_type);
llvm::Align element_alignment(tsl::MathUtil::GCD<unsigned>(
primitive_type_size,
::xla::cpu::MinimumAlignmentForPrimitiveType(primitive_type)));
llvm::Type* primitive_llvm_type =
llvm_ir::PrimitiveTypeToIrType(primitive_type, module);
if (element_count == 1) {
auto* load_instruction =
b.CreateAlignedLoad(primitive_llvm_type, source, element_alignment);
source_array.AnnotateLoadStoreInstructionWithMetadata(load_instruction);
auto* store_instruction =
b.CreateAlignedStore(load_instruction, target, element_alignment);
target_array.AnnotateLoadStoreInstructionWithMetadata(store_instruction);
} else {
auto* memcpy_instruction = b.CreateMemCpy(
target, llvm::Align(element_alignment), source,
llvm::Align(element_alignment),
element_count * primitive_type_size);
std::map<int, llvm::MDNode*> merged_metadata =
llvm_ir::MergeMetadata(&module->getContext(), source_array.metadata(),
target_array.metadata());
for (const auto& kind_md_pair : merged_metadata) {
memcpy_instruction->setMetadata(kind_md_pair.first, kind_md_pair.second);
}
}
}
absl::Status IrEmitter::CanDoFastConcatenate(
const HloInstruction* instr) const {
if (ShouldEmitParallelLoopFor(*instr)) {
return absl::Status(
absl::StatusCode::kFailedPrecondition,
"Cannot generate memcpy-based concat for the parallel CPU backend");
}
const auto* concatenate = Cast<HloConcatenateInstruction>(instr);
const Shape& output_shape = concatenate->shape();
for (auto* op : concatenate->operands()) {
if (!LayoutUtil::Equal(op->shape().layout(), output_shape.layout())) {
return absl::Status(absl::StatusCode::kFailedPrecondition,
"Operand has mismatching layouts");
}
}
return absl::OkStatus();
}
absl::Status IrEmitter::HandleConcatenate(HloInstruction* concatenate) {
absl::Status fast_impl_reason = CanDoFastConcatenate(concatenate);
if (fast_impl_reason.ok()) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(concatenate));
llvm_ir::IrArray target_array = GetIrArrayFor(concatenate);
std::vector<llvm_ir::IrArray> source_arrays;
source_arrays.reserve(concatenate->operands().size());
for (HloInstruction* operand : concatenate->operands()) {
source_arrays.emplace_back(GetIrArrayFor(operand));
}
TF_RETURN_IF_ERROR(::xla::cpu::EmitFastConcatenate(
concatenate, source_arrays, target_array, module_, *b()));
VLOG(1) << "Emitted fast concatenate for " << concatenate->ToString();
return absl::OkStatus();
}
VLOG(1) << "Could not emit fast concatenate for " << concatenate->ToString()
<< ": " << fast_impl_reason.message();
return DefaultAction(concatenate);
}
absl::Status IrEmitter::HandleConditional(HloInstruction* conditional) {
auto branch_index = conditional->operand(0);
int num_branches = conditional->branch_count();
TF_RET_CHECK(ShapeUtil::IsScalar(branch_index->shape()) &&
(branch_index->shape().element_type() == PRED ||
branch_index->shape().element_type() == S32))
<< "Branch index on a conditional must be scalar bool or int32_t; got: "
<< ShapeUtil::HumanString(branch_index->shape());
for (int b = 0; b < num_branches; ++b) {
HloComputation* br_computation = conditional->branch_computation(b);
TF_RET_CHECK(ShapeUtil::Equal(conditional->shape(),
br_computation->root_instruction()->shape()))
<< "Shape of conditional should be same as the shape of the " << b
<< "th branch computation; got: "
<< ShapeUtil::HumanString(conditional->shape()) << " and "
<< ShapeUtil::HumanString(br_computation->root_instruction()->shape());
}
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(conditional));
if (branch_index->shape().element_type() == PRED) {
llvm::LoadInst* pred_value = Load(
GetIrArrayFor(branch_index).GetBasePointeeType(),
GetIrArrayFor(branch_index).GetBasePointer(), "load_predicate_value");
llvm::Value* pred_cond =
ICmpNE(pred_value,
llvm::ConstantInt::get(
llvm_ir::PrimitiveTypeToIrType(PRED, module_), 0),
"boolean_predicate");
llvm_ir::LlvmIfData if_data =
llvm_ir::EmitIfThenElse(pred_cond, "conditional", b());
SetToFirstInsertPoint(if_data.true_block, b());
EmitGlobalCall(*conditional->branch_computation(0),
IrName(conditional, "_true"));
SetToFirstInsertPoint(if_data.false_block, b());
EmitGlobalCall(*conditional->branch_computation(1),
IrName(conditional, "_false"));
SetToFirstInsertPoint(if_data.after_block, b());
return absl::OkStatus();
}
llvm::LoadInst* branch_index_value = Load(
GetIrArrayFor(branch_index).GetBasePointeeType(),
GetIrArrayFor(branch_index).GetBasePointer(), "load_branch_index_value");
auto case_block = b()->GetInsertBlock();
llvm::BasicBlock* after_block;
if (case_block->getTerminator() == nullptr) {
after_block = llvm_ir::CreateBasicBlock(nullptr, "case-after", b());
b()->SetInsertPoint(case_block);
b()->CreateBr(after_block);
} else {
after_block =
case_block->splitBasicBlock(b()->GetInsertPoint(), "case-after");
}
case_block->getTerminator()->eraseFromParent();
auto default_block = llvm_ir::CreateBasicBlock(nullptr, "case-default", b());
b()->SetInsertPoint(default_block);
EmitGlobalCall(*conditional->branch_computation(num_branches - 1),
IrName(conditional, "_default"));
b()->CreateBr(after_block);
b()->SetInsertPoint(case_block);
llvm::SwitchInst* case_inst =
b()->CreateSwitch(branch_index_value, default_block, num_branches - 1);
for (int br = 0; br < num_branches - 1; ++br) {
auto branch_block = llvm_ir::CreateBasicBlock(
nullptr, absl::StrCat("case-branch", br), b());
b()->SetInsertPoint(branch_block);
EmitGlobalCall(*conditional->branch_computation(br),
IrName(conditional, absl::StrCat("_branch", br)));
b()->CreateBr(after_block);
case_inst->addCase(b()->getInt32(br), branch_block);
}
SetToFirstInsertPoint(after_block, b());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleAfterAll(HloInstruction* after_all) {
TF_RET_CHECK(ByteSizeOf(after_all->shape()) == 0);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(after_all));
return absl::OkStatus();
}
absl::Status IrEmitter::HandleBatchNormGrad(HloInstruction* batch_norm_grad) {
return Unimplemented("BatchNormGrad should be rewritten for CPU.");
}
absl::Status IrEmitter::HandleBatchNormTraining(
HloInstruction* batch_norm_training) {
return Unimplemented("BatchNormTraining should be rewritten for CPU.");
}
absl::Status IrEmitter::HandleGetDimensionSize(HloInstruction* get_size) {
return Unimplemented("GetDimensionSize should be rewritten for CPU.");
}
absl::Status IrEmitter::HandleSetDimensionSize(HloInstruction* set_size) {
return Unimplemented("SetDimensionSize should be rewritten for CPU.");
}
absl::Status IrEmitter::HandleAddDependency(HloInstruction* add_dependency) {
emitted_value_[add_dependency] =
GetEmittedValueFor(add_dependency->operand(0));
return absl::OkStatus();
}
absl::Status IrEmitter::HandleRng(HloInstruction* rng) {
return Unimplemented("Rng should be expanded for CPU.");
}
absl::Status IrEmitter::HandleRngBitGenerator(HloInstruction* rng) {
return Unimplemented("RngBitGenerator should be expanded for CPU.");
}
absl::Status IrEmitter::HandleRngGetAndUpdateState(HloInstruction* rng_state) {
VLOG(2) << "RngGetAndUpdateState: " << rng_state->ToString();
llvm::Value* old_state = llvm_ir::RngGetAndUpdateState(
Cast<HloRngGetAndUpdateStateInstruction>(rng_state)->delta(), module_,
b());
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(rng_state));
llvm::Value* address = GetEmittedValueFor(rng_state);
llvm::StoreInst* store = Store(old_state, address);
store->setAlignment(llvm::Align(IrEmitter::MinimumAlignmentForPrimitiveType(
rng_state->shape().element_type())));
return absl::OkStatus();
}
absl::Status IrEmitter::HandleStochasticConvert(HloInstruction* instruction) {
return Unimplemented("StochasticConvert should be decomposed for CPU.");
}
absl::Status IrEmitter::FinishVisit(HloInstruction* root) {
VLOG(2) << "FinishVisit root: " << root->ToString();
if (root->opcode() == HloOpcode::kOutfeed) {
VLOG(2) << " outfeed with value: "
<< llvm_ir::DumpToString(GetEmittedValueFor(root->operand(0)));
} else {
VLOG(2) << " value: " << llvm_ir::DumpToString(GetEmittedValueFor(root));
}
auto record_complete_computation = [&](llvm::Value* prof_counter) {
if (prof_counter) {
profiling_state_.RecordCompleteComputation(b(), prof_counter);
}
};
record_complete_computation(GetProfileCounterFor(*root->parent()));
return absl::OkStatus();
}
template <typename T>
llvm::Value* IrEmitter::GetProfileCounterCommon(
const T& hlo,
const absl::flat_hash_map<const T*, int64_t>& profile_index_map) {
auto it = profile_index_map.find(&hlo);
if (it == profile_index_map.end()) {
return nullptr;
}
int64_t prof_counter_idx = it->second;
std::string counter_name = IrName("prof_counter", hlo.name());
return GEP(b()->getInt64Ty(), GetProfileCountersArgument(),
b()->getInt64(prof_counter_idx), counter_name);
}
llvm::Value* IrEmitter::GetProfileCounterFor(
const HloInstruction& instruction) {
return GetProfileCounterCommon<HloInstruction>(instruction,
instruction_to_profile_idx_);
}
llvm::Value* IrEmitter::GetProfileCounterFor(
const HloComputation& computation) {
return GetProfileCounterCommon<HloComputation>(computation,
computation_to_profile_idx_);
}
void IrEmitter::ProfilingState::UpdateProfileCounter(llvm::IRBuilder<>* b,
llvm::Value* prof_counter,
llvm::Value* cycle_end,
llvm::Value* cycle_start) {
auto* cycle_diff = b->CreateSub(cycle_end, cycle_start);
llvm::LoadInst* old_cycle_count = b->CreateLoad(
llvm::cast<llvm::GetElementPtrInst>(prof_counter)->getSourceElementType(),
prof_counter, "old_cycle_count");
auto* new_cycle_count =
b->CreateAdd(cycle_diff, old_cycle_count, "new_cycle_count");
b->CreateStore(new_cycle_count, prof_counter);
}
llvm::Value* IrEmitter::ProfilingState::ReadCycleCounter(llvm::IRBuilder<>* b) {
llvm::Module* module = b->GetInsertBlock()->getModule();
if (!use_rdtscp_) {
llvm::Function* func_llvm_readcyclecounter =
llvm::Intrinsic::getDeclaration(module,
llvm::Intrinsic::readcyclecounter);
return b->CreateCall(func_llvm_readcyclecounter);
}
llvm::Function* func_llvm_x86_rdtscp =
llvm::Intrinsic::getDeclaration(module, llvm::Intrinsic::x86_rdtscp);
llvm::Value* rdtscp_call = b->CreateCall(func_llvm_x86_rdtscp);
return b->CreateExtractValue(rdtscp_call, {0});
}
void IrEmitter::ProfilingState::RecordCycleStart(llvm::IRBuilder<>* b,
HloInstruction* hlo) {
auto* cycle_start = ReadCycleCounter(b);
cycle_start->setName(IrName(hlo, "cycle_start"));
cycle_starts_[hlo] = cycle_start;
if (first_read_cycle_start_ == nullptr) {
first_read_cycle_start_ = cycle_start;
}
}
void IrEmitter::ProfilingState::RecordCycleDelta(llvm::IRBuilder<>* b,
HloInstruction* hlo,
llvm::Value* prof_counter) {
auto* cycle_end = ReadCycleCounter(b);
cycle_end->setName(IrName(hlo, "cycle_end"));
auto* cycle_start = cycle_starts_[hlo];
UpdateProfileCounter(b, prof_counter, cycle_end, cycle_start);
last_read_cycle_end_ = cycle_end;
}
void IrEmitter::ProfilingState::RecordCompleteComputation(
llvm::IRBuilder<>* b, llvm::Value* prof_counter) {
if (last_read_cycle_end_ && first_read_cycle_start_) {
UpdateProfileCounter(b, prof_counter, last_read_cycle_end_,
first_read_cycle_start_);
}
}
void IrEmitter::TracingState::EmitTracingStart(llvm::IRBuilder<>* b,
HloInstruction* hlo,
llvm::Value* run_options) {
if (!enabled_) {
return;
}
llvm::Type* void_ptr_type = b->getPtrTy();
llvm::FunctionType* fn_type = llvm::FunctionType::get(
b->getInt64Ty(),
{void_ptr_type, void_ptr_type, void_ptr_type, b->getInt64Ty()},
false);
llvm::Function* function = b->GetInsertBlock()->getParent();
llvm::Module* module = function->getParent();
const char* fn_name = runtime::kTracingStartSymbolName;
llvm::FunctionCallee trace_func =
module->getOrInsertFunction(fn_name, fn_type);
if (auto* fn = llvm::dyn_cast<llvm::Function>(trace_func.getCallee())) {
fn->setCallingConv(llvm::CallingConv::C);
fn->setDoesNotThrow();
fn->setOnlyAccessesArgMemory();
}
auto* hlo_name = b->CreateGlobalStringPtr(hlo->name());
auto* hlo_module = b->CreateGlobalStringPtr(hlo->GetModule()->name());
auto* program_id = b->getInt64(hlo->GetModule()->unique_id());
auto* activity_id = b->CreateCall(
trace_func, {run_options, hlo_name, hlo_module, program_id});
activity_id->setName(IrName(hlo, "activity_id"));
activity_ids_[hlo] = activity_id;
}
void IrEmitter::TracingState::EmitTracingEnd(llvm::IRBuilder<>* b,
HloInstruction* hlo,
llvm::Value* run_options) {
if (!enabled_) {
return;
}
llvm::FunctionType* fn_type =
llvm::FunctionType::get(b->getVoidTy(), {b->getPtrTy(), b->getInt64Ty()},
false);
llvm::Function* function = b->GetInsertBlock()->getParent();
llvm::Module* module = function->getParent();
const char* fn_name = runtime::kTracingEndSymbolName;
llvm::FunctionCallee trace_func =
module->getOrInsertFunction(fn_name, fn_type);
if (auto* fn = llvm::dyn_cast<llvm::Function>(trace_func.getCallee())) {
fn->setCallingConv(llvm::CallingConv::C);
fn->setDoesNotThrow();
fn->setOnlyAccessesArgMemory();
}
auto* activity_id = activity_ids_.at(hlo);
b->CreateCall(trace_func, {run_options, activity_id});
}
namespace {
bool IsHloVeryCheap(const HloInstruction* hlo) {
return hlo->opcode() == HloOpcode::kBitcast ||
hlo->opcode() == HloOpcode::kTuple ||
hlo->opcode() == HloOpcode::kGetTupleElement ||
hlo->opcode() == HloOpcode::kParameter ||
hlo->opcode() == HloOpcode::kConstant ||
hlo->opcode() == HloOpcode::kReplicaId;
}
}
absl::Status IrEmitter::Preprocess(HloInstruction* hlo) {
VLOG(3) << "Visiting: " << hlo->ToString();
if (instruction_to_profile_idx_.count(hlo) ||
(hlo_module_config_.cpu_traceme_enabled() && !IsHloVeryCheap(hlo) &&
hlo->parent()->IsEntryComputation())) {
tracing_state_.EmitTracingStart(b(), hlo,
GetExecutableRunOptionsArgument());
profiling_state_.RecordCycleStart(b(), hlo);
}
return absl::OkStatus();
}
absl::Status IrEmitter::Postprocess(HloInstruction* hlo) {
if (auto* prof_counter = GetProfileCounterFor(*hlo)) {
profiling_state_.RecordCycleDelta(b(), hlo, prof_counter);
}
if (instruction_to_profile_idx_.count(hlo) ||
(hlo_module_config_.cpu_traceme_enabled() && !IsHloVeryCheap(hlo) &&
hlo->parent()->IsEntryComputation())) {
tracing_state_.EmitTracingEnd(b(), hlo, GetExecutableRunOptionsArgument());
}
return absl::OkStatus();
}
llvm_ir::IrArray IrEmitter::GetIrArrayFor(const HloInstruction* hlo) {
llvm::Value* value_for_op = GetEmittedValueFor(hlo);
llvm::Type* ir_type = IrShapeType(hlo->shape());
llvm_ir::IrArray array(value_for_op, ir_type, hlo->shape());
AddAliasingInformationToIrArray(*hlo, &array);
return array;
}
std::vector<llvm_ir::IrArray> IrEmitter::GetIrArraysForOperandsOf(
const HloInstruction* hlo) {
std::vector<llvm_ir::IrArray> arrays;
std::transform(
hlo->operands().begin(), hlo->operands().end(),
std::back_inserter(arrays),
[&](const HloInstruction* operand) { return GetIrArrayFor(operand); });
return arrays;
}
llvm::Value* IrEmitter::GetEmittedValueFor(const HloInstruction* hlo) {
auto it = emitted_value_.find(hlo);
if (it == emitted_value_.end()) {
LOG(FATAL) << "could not find emitted value for: " << hlo->ToString();
}
return it->second;
}
llvm::Type* IrEmitter::IrShapeType(const Shape& shape) {
return llvm_ir::ShapeToIrType(shape, module_);
}
llvm::Value* IrEmitter::GetProfileCountersArgument() {
return compute_function()->profile_counters_arg();
}
llvm::Value* IrEmitter::GetStatusArgument() {
return compute_function()->status_arg();
}
llvm::Value* IrEmitter::GetBufferTableArgument() {
return compute_function()->buffer_table_arg();
}
llvm::Value* IrEmitter::GetExecutableRunOptionsArgument() {
return compute_function()->exec_run_options_arg();
}
llvm::BasicBlock* IrEmitter::GetReturnBlock() {
return compute_function()->return_block();
}
void IrEmitter::EmitEarlyReturnIfErrorStatus() {
llvm::Value* succeeded =
EmitCallToFunc(runtime::kStatusIsSuccessSymbolName, {GetStatusArgument()},
b()->getInt1Ty(), true,
true);
llvm_ir::EmitEarlyReturn(succeeded, b(), GetReturnBlock());
}
llvm::Value* IrEmitter::EmitThreadLocalBufferPointer(
const BufferAllocation::Slice& slice, const Shape& target_shape) {
const BufferAllocation& allocation = *slice.allocation();
llvm::Value* tempbuf_address = [&]() -> llvm::Value* {
auto param_it =
computation_parameter_allocations_.find(slice.allocation()->index());
if (param_it != computation_parameter_allocations_.end()) {
int64_t param_number = param_it->second;
llvm::Value* params = compute_function()->parameters_arg();
llvm::Value* param_address_offset = llvm_ir::EmitBufferIndexingGEP(
params, b()->getPtrTy(), param_number, b());
llvm::LoadInst* param_address_untyped =
Load(b()->getPtrTy(), param_address_offset);
if (!target_shape.IsOpaque()) {
AttachAlignmentMetadataForLoad(param_address_untyped, target_shape);
AttachDereferenceableMetadataForLoad(param_address_untyped,
target_shape);
}
return param_address_untyped;
}
const auto& assigned_buffers = allocation.assigned_buffers();
CHECK_EQ(1, assigned_buffers.size());
const Shape& shape = assigned_buffers.begin()->first->shape();
std::pair<llvm::Function*, BufferAllocation::Slice> key = {
compute_function()->function(), slice};
auto buf_it = thread_local_buffers_.find(key);
if (buf_it == thread_local_buffers_.end()) {
llvm::Value* buffer = llvm_ir::EmitAllocaAtFunctionEntry(
IrShapeType(shape), absl::StrCat("thread_local", slice.ToString()),
b(), MinimumAlignmentForShape(target_shape));
auto it_inserted_pair = thread_local_buffers_.insert({key, buffer});
CHECK(it_inserted_pair.second);
buf_it = it_inserted_pair.first;
}
return buf_it->second;
}();
return tempbuf_address;
}
llvm::Value* IrEmitter::EmitGlobalBufferPointer(
const BufferAllocation::Slice& slice, const Shape& target_shape) {
const BufferAllocation& allocation = *slice.allocation();
llvm::Value* tempbuf_address_ptr = llvm_ir::EmitBufferIndexingGEP(
GetBufferTableArgument(), b()->getPtrTy(), slice.index(), b());
llvm::LoadInst* tempbuf_address_base =
Load(b()->getPtrTy(), tempbuf_address_ptr);
AttachInvariantLoadMetadataForLoad(tempbuf_address_base);
AttachAlignmentMetadataForLoad(tempbuf_address_base, allocation.size());
AttachDereferenceableMetadataForLoad(tempbuf_address_base, allocation.size());
llvm::Value* tempbuf_address_untyped = tempbuf_address_base;
if (slice.offset() > 0) {
tempbuf_address_untyped = InBoundsGEP(
b()->getInt8Ty(), tempbuf_address_base, b()->getInt64(slice.offset()));
}
return tempbuf_address_untyped;
}
llvm::Value* IrEmitter::EmitBufferPointer(const BufferAllocation::Slice& slice,
const Shape& target_shape) {
if (slice.allocation()->is_thread_local()) {
return EmitThreadLocalBufferPointer(slice, target_shape);
} else if (slice.allocation()->is_constant()) {
return FindOrDie(constant_buffer_to_global_, slice.allocation()->index());
} else {
return EmitGlobalBufferPointer(slice, target_shape);
}
}
absl::Status IrEmitter::EmitTargetAddressForOp(const HloInstruction* op) {
const Shape& target_shape = op->shape();
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice slice,
assignment_.GetUniqueTopLevelSlice(op));
llvm::Value* addr = EmitBufferPointer(slice, target_shape);
addr->setName(IrName(op));
emitted_value_[op] = addr;
return absl::OkStatus();
}
absl::Status IrEmitter::EmitTargetElementLoop(
const HloInstruction* target_op, absl::string_view desc,
const llvm_ir::ElementGenerator& element_generator,
std::optional<llvm_ir::IrArray> result_array_opt) {
VLOG(2) << "EmitTargetElementLoop: " << target_op->ToString();
llvm_ir::IrArray target_array;
if (result_array_opt.has_value()) {
target_array = result_array_opt.value();
} else {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(target_op));
target_array = GetIrArrayFor(target_op);
}
const Shape& target_shape = target_op->shape();
if (target_shape.IsTuple() &&
(target_op->opcode() == HloOpcode::kFusion ||
target_op->opcode() == HloOpcode::kReduce ||
target_op->opcode() == HloOpcode::kReduceWindow)) {
TF_RET_CHECK(num_dynamic_loop_bounds_ == 0);
std::vector<llvm_ir::IrArray> output_arrays;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(target_shape); ++i) {
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice slice,
assignment_.GetUniqueSlice(target_op, {i}));
const Shape& element_shape = ShapeUtil::GetSubshape(target_shape, {i});
llvm::Value* op_target_address = EmitBufferPointer(slice, element_shape);
llvm::Type* op_target_type = IrShapeType(element_shape);
output_arrays.push_back(
llvm_ir::IrArray(op_target_address, op_target_type, element_shape));
}
TF_RETURN_IF_ERROR(
llvm_ir::LoopEmitter(element_generator, output_arrays, b())
.EmitLoop(IrName(target_op, desc)));
std::vector<llvm::Value*> tuple_operand_ptrs;
tuple_operand_ptrs.reserve(output_arrays.size());
for (int64_t i = 0; i < output_arrays.size(); ++i) {
tuple_operand_ptrs.push_back(output_arrays[i].GetBasePointer());
}
llvm_ir::EmitTuple(target_array, tuple_operand_ptrs, b());
} else {
if (ShouldEmitParallelLoopFor(*target_op)) {
std::vector<std::pair<llvm::Value*, llvm::Value*>> dynamic_loop_bounds =
compute_function()->GetDynamicLoopBounds();
TF_RETURN_IF_ERROR(ParallelLoopEmitter(element_generator, target_array,
&dynamic_loop_bounds, b())
.EmitLoop(IrName(target_op, desc)));
} else {
TF_RETURN_IF_ERROR(
llvm_ir::LoopEmitter(element_generator, target_array, b())
.EmitLoop(IrName(target_op, desc)));
}
}
return absl::OkStatus();
}
absl::Status IrEmitter::EmitMemcpy(const HloInstruction& source,
const HloInstruction& destination) {
llvm::Value* source_value = GetEmittedValueFor(&source);
llvm::Value* destination_value = GetEmittedValueFor(&destination);
int64_t source_size = ByteSizeOf(source.shape());
MemCpy(destination_value, llvm::Align(1), source_value,
llvm::Align(1), source_size);
return absl::OkStatus();
}
absl::Status IrEmitter::ElementTypesSameAndSupported(
const HloInstruction& instruction,
absl::Span<const HloInstruction* const> operands,
absl::Span<const PrimitiveType> supported_types) {
for (auto operand : operands) {
TF_RET_CHECK(
ShapeUtil::SameElementType(operands[0]->shape(), operand->shape()));
}
TF_RET_CHECK(!operands.empty());
PrimitiveType primitive_type = operands[0]->shape().element_type();
if (!absl::c_linear_search(supported_types, primitive_type)) {
return Unimplemented("unsupported operand type %s in op %s",
PrimitiveType_Name(primitive_type),
HloOpcodeString(instruction.opcode()));
}
return absl::OkStatus();
}
absl::Status IrEmitter::DefaultAction(HloInstruction* hlo) {
ElementalIrEmitter::HloToElementGeneratorMap operand_to_generator;
for (const HloInstruction* operand : hlo->operands()) {
operand_to_generator[operand] = [=](const llvm_ir::IrArray::Index& index) {
return GetIrArrayFor(operand).EmitReadArrayElement(index, b());
};
}
CpuElementalIrEmitter elemental_emitter(hlo_module_config_, this, module_);
return EmitTargetElementLoop(
hlo, "elemental_loop",
elemental_emitter.MakeElementGenerator(hlo, operand_to_generator),
std::nullopt);
}
llvm::Value* IrEmitter::EmitScalarReturningThreadLocalCall(
const HloComputation& callee, absl::Span<llvm::Value* const> parameters,
absl::string_view name) {
std::vector<llvm::Value*> return_value =
EmitThreadLocalCall(callee, parameters, name, false);
CHECK_EQ(return_value.size(), 1);
return return_value[0];
}
std::vector<llvm::Value*> IrEmitter::EmitThreadLocalCall(
const HloComputation& callee, absl::Span<llvm::Value* const> parameters,
absl::string_view name, bool is_reducer, bool in_compute_function) {
CHECK(absl::c_binary_search(thread_local_computations_, &callee));
const Shape& return_shape = callee.root_instruction()->shape();
bool is_scalar_return = ShapeUtil::IsScalar(return_shape);
bool is_tuple_of_scalars_return =
return_shape.IsTuple() &&
absl::c_all_of(return_shape.tuple_shapes(), [&](const Shape& shape) {
return ShapeUtil::IsScalar(shape);
});
CHECK(is_scalar_return || is_tuple_of_scalars_return);
std::vector<llvm::Value*> parameter_addrs;
for (llvm::Value* parameter : parameters) {
CHECK(!parameter->getType()->isPointerTy());
llvm::Value* parameter_addr = llvm_ir::EmitAllocaAtFunctionEntry(
parameter->getType(), "arg_addr", b());
Store(parameter, parameter_addr);
parameter_addrs.push_back(parameter_addr);
}
llvm::Type* return_value_buffer_type =
llvm_ir::ShapeToIrType(return_shape, module_);
std::string retval_alloca_name = absl::StrCat(name, "_return_value_addr");
int retval_alignment =
is_scalar_return
? MinimumAlignmentForPrimitiveType(return_shape.element_type())
: 0;
llvm::AllocaInst* return_value_buffer = llvm_ir::EmitAllocaAtFunctionEntry(
return_value_buffer_type, retval_alloca_name, b(), retval_alignment);
std::vector<llvm::Value*> allocas_for_returned_scalars;
if (is_scalar_return) {
allocas_for_returned_scalars.push_back(return_value_buffer);
} else {
constexpr int max_tuple_size = 1000;
CHECK_LT(return_shape.tuple_shapes_size(), max_tuple_size)
<< "Multivalue function can not return more than 1000 elements to avoid"
<< " stack smashing";
allocas_for_returned_scalars =
llvm_ir::EmitTupleAllocasAtFunctionEntry(return_shape, b());
llvm_ir::IrArray tuple_array(return_value_buffer, return_value_buffer_type,
return_shape);
EmitTuple(tuple_array, allocas_for_returned_scalars, b());
}
llvm::Value* null_ptr = llvm::Constant::getNullValue(b()->getPtrTy());
Call(
FindOrDie(emitted_functions_,
ComputationToEmit{&callee, allow_reassociation_ || is_reducer}),
GetArrayFunctionCallArguments(
parameter_addrs, b(), name,
return_value_buffer,
in_compute_function ? GetExecutableRunOptionsArgument() : null_ptr,
null_ptr,
in_compute_function ? GetStatusArgument() : null_ptr,
in_compute_function ? GetProfileCountersArgument() : null_ptr));
if (ComputationTransitivelyContainsCustomCall(&callee)) {
DCHECK(!in_compute_function) << "Custom call inside nested computations "
"are not supported by Thunks runtime";
EmitEarlyReturnIfErrorStatus();
}
std::vector<llvm::Value*> returned_scalars;
returned_scalars.reserve(allocas_for_returned_scalars.size());
for (llvm::Value* addr : allocas_for_returned_scalars) {
returned_scalars.push_back(
Load(llvm::cast<llvm::AllocaInst>(addr)->getAllocatedType(), addr));
}
return returned_scalars;
}
void IrEmitter::EmitGlobalCall(const HloComputation& callee,
absl::string_view name) {
CHECK(absl::c_binary_search(global_computations_, &callee));
Call(FindOrDie(emitted_functions_,
ComputationToEmit{&callee, allow_reassociation_}),
GetArrayFunctionCallArguments(
{}, b(), name,
llvm::Constant::getNullValue(b()->getPtrTy()),
GetExecutableRunOptionsArgument(),
GetBufferTableArgument(),
GetStatusArgument(),
GetProfileCountersArgument()));
if (ComputationTransitivelyContainsCustomCall(&callee)) {
EmitEarlyReturnIfErrorStatus();
}
}
llvm::Value* IrEmitter::GetBufferForGlobalCallReturnValue(
const HloComputation& callee) {
const HloInstruction* root_inst = callee.root_instruction();
if (root_inst->opcode() == HloOpcode::kOutfeed) {
return llvm::Constant::getNullValue(b()->getPtrTy());
}
const BufferAllocation::Slice root_buffer =
assignment_.GetUniqueTopLevelSlice(root_inst).value();
return EmitBufferPointer(root_buffer, root_inst->shape());
}
void IrEmitter::BindFusionArguments(const HloInstruction* fusion,
FusedIrEmitter* fused_emitter) {
for (int i = 0; i < fusion->operand_count(); i++) {
const HloInstruction* operand = fusion->operand(i);
fused_emitter->BindGenerator(
*fusion->fused_parameter(i),
[this, operand](llvm_ir::IrArray::Index index) {
return GetIrArrayFor(operand).EmitReadArrayElement(index, b());
});
}
}
}
} | #include "xla/service/cpu/ir_emitter.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/Casting.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/ir_function.h"
#include "xla/service/cpu/target_machine_features_fake.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/logical_buffer.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
using IrEmitterTest = HloTestBase;
static std::pair<llvm::Function*, llvm::BasicBlock*> CreateFunction(
llvm::LLVMContext& context, llvm::Module* module, llvm::IRBuilder<>* b) {
llvm::PointerType* ptrtype = llvm::PointerType::getUnqual(context);
llvm::FunctionType* ftype = llvm::FunctionType::get(ptrtype, ptrtype, false);
llvm::Function* function = llvm::dyn_cast<llvm::Function>(
module->getOrInsertFunction("func2", ftype).getCallee());
llvm::BasicBlock* return_block =
llvm::BasicBlock::Create(context, "", function);
b->SetInsertPoint(return_block);
[[maybe_unused]] llvm::ReturnInst* ret = b->CreateRet(
llvm::ConstantPointerNull::get(llvm::PointerType::getUnqual(context)));
return std::make_pair(function, return_block);
}
TEST_F(IrEmitterTest, ComputeFuncStack) {
llvm::LLVMContext context;
auto module = std::make_unique<llvm::Module>("test", context);
const char* hlo_text = R"(
HloModule m
ENTRY main {
ROOT %zero = f32[] constant(0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo, ParseAndReturnUnverifiedModule(hlo_text));
const HloInstruction* zero = FindInstruction(hlo.get(), "zero");
ASSERT_NE(zero, nullptr);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> buffer_assignment,
BufferAssigner::Run(
hlo.get(), std::make_unique<DependencyHloOrdering>(hlo.get()),
backend().compiler()->BufferSizeBytesFunction(),
[](LogicalBuffer::Color) { return 1; }));
TargetMachineFeaturesWithFakeAlignmentLogic target_machine(
[](int64_t size) { return 1; });
IrEmitter ir_emitter(nullptr, *hlo, *buffer_assignment, module.get(), {}, {},
{}, &target_machine, false);
llvm::IRBuilder<>* b = ir_emitter.b();
ASSERT_NE(b, nullptr);
const std::pair<llvm::Function*, llvm::BasicBlock*> fb =
CreateFunction(context, module.get(), b);
llvm::Function* function = fb.first;
llvm::BasicBlock* return_block = fb.second;
ASSERT_NE(function, nullptr);
ASSERT_NE(return_block, nullptr);
const auto funcname = "func1";
const auto linkagetype = llvm::GlobalValue::LinkageTypes::ExternalLinkage;
const HloModuleConfig module_config;
ir_emitter.PushComputeFunction(funcname, linkagetype, module_config,
module.get(), 0);
ASSERT_EQ(ir_emitter.compute_function()->function()->getName().str(),
funcname);
ir_emitter.PushComputeFunction(b, module.get(), 0, function, nullptr,
return_block);
ASSERT_EQ(ir_emitter.compute_function()->function(), function);
ir_emitter.PopComputeFunction();
ASSERT_EQ(ir_emitter.compute_function()->function()->getName().str(),
funcname);
ir_emitter.PopComputeFunction();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/ir_emitter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/ir_emitter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
519dbf87-0273-4254-a829-e982aa632a86 | cpp | tensorflow/tensorflow | load | tensorflow/cc/experimental/libexport/load.cc | tensorflow/cc/experimental/libexport/load_test.cc | #include "tensorflow/cc/experimental/libexport/load.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
namespace tensorflow {
namespace libexport {
using protobuf::RepeatedPtrField;
absl::StatusOr<TFPackage> TFPackage::Load(const std::string& path) {
TFPackage tf_package;
const string saved_model_pb_path = io::JoinPath(path, kSavedModelFilenamePb);
const string saved_model_pbtxt_path =
io::JoinPath(path, kSavedModelFilenamePbTxt);
if (Env::Default()->FileExists(saved_model_pb_path).ok()) {
TF_RETURN_IF_ERROR(ReadBinaryProto(Env::Default(), saved_model_pb_path,
&tf_package.saved_model_proto_));
} else if (Env::Default()->FileExists(saved_model_pbtxt_path).ok()) {
TF_RETURN_IF_ERROR(ReadTextProto(Env::Default(), saved_model_pbtxt_path,
&tf_package.saved_model_proto_));
} else {
return Status(absl::StatusCode::kNotFound,
"Could not find SavedModel .pb or .pbtxt at supplied export "
"directory path: " +
path);
}
const std::string variables_dir =
tensorflow::io::JoinPath(path, tensorflow::kSavedModelVariablesDirectory);
if (Env::Default()->FileExists(variables_dir).ok()) {
tf_package.has_checkpoint_ = true;
tf_package.variables_filepath_ = tensorflow::io::JoinPath(
variables_dir, tensorflow::kSavedModelVariablesFilename);
tf_package.variable_reader_ = std::make_unique<tensorflow::BundleReader>(
tensorflow::Env::Default(), tf_package.variables_filepath_);
tensorflow::Tensor object_graph_tensor;
TF_RETURN_IF_ERROR(tf_package.variable_reader_->Lookup(
tensorflow::kObjectGraphProtoKey, &object_graph_tensor));
const auto* object_graph_string =
reinterpret_cast<const tensorflow::tstring*>(
object_graph_tensor.tensor_data().data());
tf_package.trackable_object_graph_.ParseFromString(*object_graph_string);
} else {
tf_package.has_checkpoint_ = false;
LOG(INFO)
<< "No checkpoint found, assuming this is a program-only SavedModel";
}
const auto& nodes =
tf_package.saved_model_proto_.meta_graphs(0).graph_def().node();
for (const auto& node : nodes) {
tf_package.graph_def_nodes_by_name_[node.name()] = &node;
}
return tf_package;
}
absl::StatusOr<std::string> TFPackage::GetVariableCheckpointKey(int index) {
const auto& trackable_object = trackable_object_graph_.nodes(index);
const TrackableObjectGraph::TrackableObject::SerializedTensor*
serialized_tensor = nullptr;
for (auto& maybe_serialized_tensor : trackable_object.attributes()) {
if (maybe_serialized_tensor.name() == "VARIABLE_VALUE") {
serialized_tensor = &maybe_serialized_tensor;
}
}
if (serialized_tensor == nullptr) {
return tensorflow::Status(absl::StatusCode::kInternal,
"Failed to find variable value field.");
}
return serialized_tensor->checkpoint_key();
}
const SavedObjectGraph& TFPackage::GetObjectGraph() {
return saved_model_proto_.mutable_meta_graphs(0)->object_graph_def();
}
absl::StatusOr<const tensorflow::NodeDef*> TFPackage::GetGraphDefNode(
std::string name) {
const auto& iter = graph_def_nodes_by_name_.find(name);
if (iter == graph_def_nodes_by_name_.end()) {
return tensorflow::Status(absl::StatusCode::kInternal,
absl::StrCat("Failed to find node named ", name));
}
return iter->second;
}
const RepeatedPtrField<FunctionDef>& TFPackage::GetFunctionDefs() {
auto& function_library =
saved_model_proto_.mutable_meta_graphs(0)->graph_def().library();
return function_library.function();
}
}
} | #include "tensorflow/cc/experimental/libexport/load.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace libexport {
namespace {
TEST(LoadTest, TestDiskSavedModelLoad) {
absl::StatusOr<TFPackage> result = TFPackage::Load("test");
EXPECT_FALSE(result.status().ok());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/experimental/libexport/load.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/experimental/libexport/load_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
49811fe3-1d03-4cbf-b0e2-07760a03d2b0 | cpp | tensorflow/tensorflow | fuse_binary_into_preceding_affine | tensorflow/lite/toco/graph_transformations/fuse_binary_into_preceding_affine.cc | tensorflow/lite/toco/graph_transformations/tests/fuse_binary_into_preceding_affine_test.cc | #include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/runtime/types.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
namespace {
int GetBiasIndex(const Operator& op) {
if (op.type == OperatorType::kConv ||
op.type == OperatorType::kFullyConnected ||
op.type == OperatorType::kDepthwiseConv) {
return 2;
} else if (op.type == OperatorType::kTransposeConv) {
return 3;
}
LOG(FATAL) << "Unhandled operator type";
return 0;
}
void FuseAddOrSubParamsIntoPrecedingAffine(Model* model, Operator* preceding_op,
const Operator* add_or_sub_op,
int index_of_constant_input) {
CHECK(add_or_sub_op->type == OperatorType::kAdd ||
add_or_sub_op->type == OperatorType::kSub);
CHECK(index_of_constant_input == 0 || index_of_constant_input == 1);
if (preceding_op->inputs.size() < 3) {
LOG(FATAL) << "Missing bias parameter";
}
const auto bias_ind = GetBiasIndex(*preceding_op);
auto& bias = model->GetArray(preceding_op->inputs[bias_ind]);
bias.minmax = nullptr;
const auto& operand =
model->GetArray(add_or_sub_op->inputs[index_of_constant_input]);
const Shape& bias_shape = bias.shape();
const Shape& operand_shape = operand.shape();
auto& bias_buffer = bias.GetMutableBuffer<ArrayDataType::kFloat>();
float* const bias_data = bias_buffer.data.data();
const auto& operand_buffer = operand.GetBuffer<ArrayDataType::kFloat>();
const float* const operand_data = operand_buffer.data.data();
const int depth = bias_shape.dims(bias_shape.dimensions_count() - 1);
int operand_channel_increment = 0;
if (operand_shape.dimensions_count() >= 1 &&
operand_shape.dims(operand_shape.dimensions_count() - 1) ==
bias_shape.dims(bias_shape.dimensions_count() - 1)) {
operand_channel_increment = 1;
} else if (operand_shape.dimensions_count() == 0 ||
operand_shape.dims(operand_shape.dimensions_count() - 1) == 1) {
operand_channel_increment = 0;
} else {
LOG(FATAL) << "Operand shape mismatch.";
}
enum class OpType { BiasPlusOperand, BiasMinusOperand, OperandMinusBias };
const OpType optype = (add_or_sub_op->type == OperatorType::kAdd)
? OpType::BiasPlusOperand
: (index_of_constant_input == 1)
? OpType::BiasMinusOperand
: OpType::OperandMinusBias;
int operand_channel = 0;
for (int i = 0; i < depth; i++) {
float& bias_val = bias_data[i];
const float operand_val = operand_data[operand_channel];
if (optype == OpType::BiasPlusOperand) {
bias_val += operand_val;
} else if (optype == OpType::BiasMinusOperand) {
bias_val -= operand_val;
} else if (optype == OpType::OperandMinusBias) {
bias_val = operand_val - bias_val;
} else {
LOG(FATAL) << "Should not get here.";
}
operand_channel += operand_channel_increment;
}
}
void FuseMulOrDivParamsIntoPrecedingAffine(Model* model, Operator* preceding_op,
const Operator* mul_or_div_op,
int index_of_constant_input) {
CHECK(mul_or_div_op->type == OperatorType::kMul ||
mul_or_div_op->type == OperatorType::kDiv);
CHECK(index_of_constant_input == 0 || index_of_constant_input == 1);
CHECK(mul_or_div_op->type != OperatorType::kDiv ||
index_of_constant_input == 1);
if (preceding_op->inputs.size() < 3) {
LOG(FATAL) << "Missing bias parameter";
}
const auto& weights_name = preceding_op->inputs[1];
const auto bias_ind = GetBiasIndex(*preceding_op);
const auto& bias_name = preceding_op->inputs[bias_ind];
auto& weights = model->GetArray(weights_name);
DropMinMax(model, weights_name);
auto& bias = model->GetArray(bias_name);
DropMinMax(model, bias_name);
const auto& operand =
model->GetArray(mul_or_div_op->inputs[index_of_constant_input]);
const Shape& weights_shape = weights.shape();
const Shape& bias_shape = bias.shape();
const Shape& operand_shape = operand.shape();
auto& weights_buffer = weights.GetMutableBuffer<ArrayDataType::kFloat>();
float* const weights_data = weights_buffer.data.data();
auto& bias_buffer = bias.GetMutableBuffer<ArrayDataType::kFloat>();
float* const bias_data = bias_buffer.data.data();
const auto& operand_buffer = operand.GetBuffer<ArrayDataType::kFloat>();
const float* const operand_data = operand_buffer.data.data();
int operand_channel_increment = 0;
if (operand_shape.dimensions_count() >= 1 &&
operand_shape.dims(operand_shape.dimensions_count() - 1) ==
bias_shape.dims(bias_shape.dimensions_count() - 1)) {
operand_channel_increment = 1;
} else if (operand_shape.dimensions_count() == 0 ||
operand_shape.dims(operand_shape.dimensions_count() - 1) == 1) {
operand_channel_increment = 0;
} else {
LOG(FATAL) << "Operand shape mismatch.";
}
int output_depth;
if (preceding_op->type == OperatorType::kConv ||
preceding_op->type == OperatorType::kFullyConnected ||
preceding_op->type == OperatorType::kTransposeConv) {
output_depth = weights_shape.dims(0);
} else if (preceding_op->type == OperatorType::kDepthwiseConv) {
output_depth = weights_shape.dims(weights_shape.dimensions_count() - 1);
} else {
LOG(FATAL) << "Should not get here";
}
const int weights_size = RequiredBufferSizeForShape(weights_shape);
const int weights_per_depth = weights_size / output_depth;
CHECK_EQ(weights_size, weights_per_depth * output_depth);
int operand_channel = 0;
for (int c = 0; c < output_depth; c++) {
if (mul_or_div_op->type == OperatorType::kMul) {
bias_data[c] *= operand_data[operand_channel];
} else if (mul_or_div_op->type == OperatorType::kDiv) {
bias_data[c] /= operand_data[operand_channel];
} else {
LOG(FATAL) << "Should not get here";
}
if (preceding_op->type == OperatorType::kConv ||
preceding_op->type == OperatorType::kFullyConnected) {
for (int i = 0; i < weights_per_depth; i++) {
if (mul_or_div_op->type == OperatorType::kMul) {
weights_data[c * weights_per_depth + i] *=
operand_data[operand_channel];
} else if (mul_or_div_op->type == OperatorType::kDiv) {
weights_data[c * weights_per_depth + i] /=
operand_data[operand_channel];
} else {
LOG(FATAL) << "Should not get here";
}
}
} else if (preceding_op->type == OperatorType::kDepthwiseConv) {
for (int k = 0; k < weights_per_depth; k++) {
if (mul_or_div_op->type == OperatorType::kMul) {
weights_data[k * output_depth + c] *= operand_data[operand_channel];
} else if (mul_or_div_op->type == OperatorType::kDiv) {
weights_data[k * output_depth + c] /= operand_data[operand_channel];
} else {
LOG(FATAL) << "Should not get here";
}
}
} else {
LOG(FATAL) << "Should not get here";
}
operand_channel += operand_channel_increment;
}
}
}
::tensorflow::Status FuseBinaryIntoPrecedingAffine::Run(Model* model,
std::size_t op_index,
bool* modified) {
*modified = false;
const auto binary_it = model->operators.begin() + op_index;
const auto* binary_op = binary_it->get();
if (binary_op->type != OperatorType::kAdd &&
binary_op->type != OperatorType::kMul &&
binary_op->type != OperatorType::kSub &&
binary_op->type != OperatorType::kDiv) {
return absl::OkStatus();
}
CHECK_EQ(binary_op->inputs.size(), 2);
const bool is_input_constant[2] = {
IsConstantParameterArray(*model, binary_op->inputs[0]),
IsConstantParameterArray(*model, binary_op->inputs[1]),
};
if (!is_input_constant[0] && !is_input_constant[1]) {
return absl::OkStatus();
}
if (is_input_constant[0] && is_input_constant[1]) {
return absl::OkStatus();
}
const int index_of_constant_input = is_input_constant[0] ? 0 : 1;
const int index_of_variable_input = is_input_constant[0] ? 1 : 0;
CHECK(is_input_constant[index_of_constant_input]);
CHECK(!is_input_constant[index_of_variable_input]);
if (binary_op->type == OperatorType::kDiv) {
if (index_of_constant_input != 1) {
AddMessageF("Not fusing %s because the denominator is not constant",
LogName(*binary_op));
return absl::OkStatus();
}
}
Operator* preceding_op =
GetOpWithOutput(*model, binary_op->inputs[index_of_variable_input]);
if (!preceding_op) {
AddMessageF("Not fusing %s because it is not the output of another op",
LogName(*binary_op));
return absl::OkStatus();
}
for (const std::string& output_array : model->flags.output_arrays()) {
if (preceding_op->outputs[0] == output_array) {
return absl::OkStatus();
}
}
if (preceding_op->type != OperatorType::kConv &&
preceding_op->type != OperatorType::kFullyConnected &&
preceding_op->type != OperatorType::kDepthwiseConv &&
preceding_op->type != OperatorType::kTransposeConv) {
AddMessageF(
"Not fusing %s because the preceding %s is not of one of the supported "
"types",
LogName(*binary_op), LogName(*preceding_op));
return absl::OkStatus();
}
if (preceding_op->type == OperatorType::kTransposeConv &&
binary_op->type != OperatorType::kAdd) {
AddMessageF("Not fusing %s to preceding %s", LogName(*binary_op),
LogName(*preceding_op));
return absl::OkStatus();
}
if (preceding_op->fused_activation_function !=
FusedActivationFunctionType::kNone) {
AddMessageF(
"Not fusing %s because the preceding %s has a fused activation "
"function",
LogName(*binary_op), LogName(*preceding_op));
return absl::OkStatus();
}
if (preceding_op->inputs.size() < 3) {
AddMessageF(
"Not fusing %s because the preceding %s does not have a bias vector",
LogName(*binary_op), LogName(*preceding_op));
return absl::OkStatus();
}
const auto& weights_name = preceding_op->inputs[1];
const auto bias_ind = GetBiasIndex(*preceding_op);
const auto& bias_name = preceding_op->inputs[bias_ind];
const auto& weights = model->GetArray(weights_name);
const auto& bias = model->GetArray(bias_name);
if (weights.data_type != ArrayDataType::kFloat ||
bias.data_type != ArrayDataType::kFloat) {
AddMessageF(
"Not fusing %s into preceding %s because one of weights or bias array "
"is not float (types are %s and %s)",
LogName(*binary_op), LogName(*preceding_op),
ArrayDataTypeName(weights.data_type),
ArrayDataTypeName(bias.data_type));
return absl::OkStatus();
}
const int count_ops_consuming_bias = CountOpsWithInput(*model, bias_name);
const int count_ops_consuming_weights =
CountOpsWithInput(*model, weights_name);
if (binary_op->type == OperatorType::kAdd ||
binary_op->type == OperatorType::kSub) {
if (!bias.buffer) {
AddMessageF(
"Not fusing %s because the preceding %s has a non-constant bias "
"array",
LogName(*binary_op), LogName(*preceding_op));
return absl::OkStatus();
}
if (count_ops_consuming_bias > 1) {
AddMessageF(
"Not fusing %s because the bias of the preceding %s is consumed by "
"another op",
LogName(*binary_op), LogName(*preceding_op));
return absl::OkStatus();
}
} else {
if (!weights.buffer || !bias.buffer) {
AddMessageF(
"Not fusing %s because the preceding %s has non-constant weights or "
"bias arrays",
LogName(*binary_op), LogName(*preceding_op));
return absl::OkStatus();
}
if (count_ops_consuming_weights > 1 || count_ops_consuming_bias > 1) {
AddMessageF(
"Not fusing %s because the weights or bias of the preceding %s is "
"consumed by another op",
LogName(*binary_op), LogName(*preceding_op));
return absl::OkStatus();
}
}
int count_ops_consuming_output =
CountOpsWithInput(*model, preceding_op->outputs[0]);
DCHECK_GE(count_ops_consuming_output, 1);
if (count_ops_consuming_output > 1) {
AddMessageF(
"Not fusing %s because the output of the preceding %s is consumed by "
"another op",
LogName(*binary_op), LogName(*preceding_op));
return absl::OkStatus();
}
AddMessageF("Fusing %s into the preceding %s", LogName(*binary_op),
LogName(*preceding_op));
if (binary_op->type == OperatorType::kAdd ||
binary_op->type == OperatorType::kSub) {
FuseAddOrSubParamsIntoPrecedingAffine(model, preceding_op, binary_op,
index_of_constant_input);
} else if (binary_op->type == OperatorType::kMul ||
binary_op->type == OperatorType::kDiv) {
FuseMulOrDivParamsIntoPrecedingAffine(model, preceding_op, binary_op,
index_of_constant_input);
} else {
LOG(FATAL) << "should not get here";
}
model->EraseArray(preceding_op->outputs[0]);
preceding_op->outputs[0] = binary_op->outputs[0];
preceding_op->fused_activation_function =
binary_op->fused_activation_function;
DeleteOpAndArrays(model, binary_op);
*modified = true;
return absl::OkStatus();
}
} | #include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
namespace toco {
namespace {
std::vector<testing::Matcher<float>> ArrayFloatNear(
const std::vector<float>& values, float max_abs_error = 1e-5) {
std::vector<testing::Matcher<float>> matchers;
matchers.reserve(values.size());
for (const float& v : values) {
matchers.emplace_back(testing::FloatNear(v, max_abs_error));
}
return matchers;
}
}
class FuseBinaryIntoPrecedingAffineTest : public ::testing::Test {
protected:
FuseBinaryIntoPrecedingAffineTest() {}
void SetUp() override { model_ = std::make_unique<Model>(); }
void CreateArray(const std::string& name, const std::vector<int>& shape) {
Array& array = model_->GetOrCreateArray(name);
array.data_type = ArrayDataType::kFloat;
Shape* array_shape = array.mutable_shape();
*(array_shape->mutable_dims()) = shape;
}
void CreateConstantArray(const std::string& name,
const std::vector<int>& shape,
const std::vector<float>& data) {
CreateArray(name, shape);
Array& array = model_->GetOrCreateArray(name);
auto& array_buffer = array.GetMutableBuffer<ArrayDataType::kFloat>();
int bufsize = 1;
for (int dim : shape) {
bufsize *= dim;
}
array_buffer.data.resize(bufsize);
float* buf_ptr = array_buffer.data.data();
for (int i = 0; i < bufsize; ++i) {
buf_ptr[i] = data[i];
}
}
std::unique_ptr<Model> model_;
};
TEST_F(FuseBinaryIntoPrecedingAffineTest, FuseAddIntoTransposeConv) {
{
CreateConstantArray("OutputShape",
{1, 2}, {2, 2});
CreateConstantArray("TransConvWeight", {2, 2}, {1.0, 2.0, 3.0, 4.0});
CreateConstantArray("TransConvBias", {1}, {1.0});
CreateArray("TransConvInput",
{2, 2});
CreateArray("TransConvOutput", {2, 2});
CreateConstantArray("AddInput2", {1}, {2.0});
CreateArray("AddOutput", {2, 2});
auto* tc_op = new TransposeConvOperator;
tc_op->inputs = {"OutputShape", "TransConvWeight", "TransConvInput",
"TransConvBias"};
tc_op->outputs = {"TransConvOutput"};
model_->operators.push_back(std::unique_ptr<Operator>(tc_op));
auto* add_op = new AddOperator;
add_op->inputs = {"TransConvOutput", "AddInput2"};
add_op->outputs = {"AddOutput"};
model_->operators.push_back(std::unique_ptr<Operator>(add_op));
}
toco::FuseBinaryIntoPrecedingAffine transformation;
bool modified;
ASSERT_TRUE(transformation.Run(model_.get(), 1, &modified).ok());
EXPECT_TRUE(modified);
ASSERT_EQ(model_->operators.size(), 1);
const auto& op = model_->operators[0];
ASSERT_EQ(op->type, OperatorType::kTransposeConv);
ASSERT_EQ(op->inputs.size(), 4);
auto& weights_array = model_->GetArray(op->inputs[1]);
EXPECT_THAT(weights_array.GetBuffer<toco::ArrayDataType::kFloat>().data,
ElementsAreArray(ArrayFloatNear({1.0, 2.0, 3.0, 4.0})));
auto& bias_array = model_->GetArray(op->inputs[3]);
EXPECT_THAT(bias_array.GetBuffer<toco::ArrayDataType::kFloat>().data,
ElementsAreArray(ArrayFloatNear({3.0})));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/fuse_binary_into_preceding_affine.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/tests/fuse_binary_into_preceding_affine_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ef6cb643-5703-419f-9a58-6885cf50b9d9 | cpp | tensorflow/tensorflow | run_hlo_module | third_party/xla/xla/tools/run_hlo_module.cc | third_party/xla/xla/tools/run_hlo_module_test.cc | #include "xla/tools/run_hlo_module.h"
#include <functional>
#include <iomanip>
#include <iostream>
#include <map>
#include <memory>
#include <optional>
#include <random>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/literal_comparison.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/test_utils.h"
#include "xla/tools/hlo_control_flow_flattening.h"
#include "xla/tools/hlo_decomposer.h"
#include "xla/tools/hlo_module_loader.h"
#include "xla/tools/prepare_reference_module.h"
#include "xla/tools/run_hlo_module.pb.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
enum class ModuleResult {
kMatched,
kRan,
kSkipped,
kDidntRun,
kOtherError,
kCompilationError,
kRuntimeError,
kMismatch,
};
constexpr absl::string_view ModuleResultToString(ModuleResult result) {
switch (result) {
case ModuleResult::kMatched:
return "MATCHED";
case ModuleResult::kRan:
return "RAN";
case ModuleResult::kSkipped:
return "SKIPPED";
case ModuleResult::kDidntRun:
return "DIDN'T RUN";
case ModuleResult::kOtherError:
return "OTHER ERROR";
case ModuleResult::kCompilationError:
return "COMPILATION ERROR";
case ModuleResult::kRuntimeError:
return "RUNTIME ERROR";
case ModuleResult::kMismatch:
return "MISMATCH";
}
}
void WriteLiteralToTempFile(const LiteralSlice& literal,
const std::string& name) {
auto* env = tsl::Env::Default();
std::string binary_filename;
std::string text_filename;
std::string outdir;
if (tsl::io::GetTestUndeclaredOutputsDir(&outdir)) {
std::string filename = tsl::io::JoinPath(
outdir, absl::StrFormat("tempfile-%d-%s", env->NowMicros(), name));
binary_filename = absl::StrCat(filename, ".pb");
text_filename = absl::StrCat(filename, ".txt");
} else {
binary_filename = tsl::io::GetTempFilename(absl::StrCat(name, ".pb"));
text_filename = tsl::io::GetTempFilename(absl::StrCat(name, ".txt"));
}
TF_CHECK_OK(tsl::WriteBinaryProto(env, binary_filename, literal.ToProto()));
TF_CHECK_OK(tsl::WriteStringToFile(env, text_filename, literal.ToString()));
LOG(ERROR) << "wrote Literal to " << name << " binary: " << binary_filename
<< " text: " << text_filename;
}
void OnMiscompare(const LiteralSlice& expected, const LiteralSlice& actual,
const LiteralSlice& mismatches,
const ShapeIndex& ,
const literal_comparison::ErrorBuckets& ) {
LOG(INFO) << "expected: " << ShapeUtil::HumanString(expected.shape()) << " "
<< literal_comparison::ToStringTruncated(expected);
LOG(INFO) << "actual: " << ShapeUtil::HumanString(actual.shape()) << " "
<< literal_comparison::ToStringTruncated(actual);
LOG(INFO) << "Dumping literals to temp files...";
WriteLiteralToTempFile(expected, "expected");
WriteLiteralToTempFile(actual, "actual");
WriteLiteralToTempFile(mismatches, "mismatches");
}
absl::StatusOr<Literal> ExecuteWithRunner(
std::unique_ptr<HloModule> module,
const BufferAssignmentProto* buffer_assignment_proto,
absl::Span<const Literal> args, HloRunnerInterface* runner,
bool run_hlo_passes) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(
VerifyHloModule(module.get(), false,
true),
absl::StrCat("(on ", runner->Name(), ")"));
std::cerr << "Running HLO module with runner " << runner->Name() << "...\n";
XLA_VLOG_LINES(1, module->ToString());
const auto start = std::chrono::high_resolution_clock::now();
ExecutionProfile profile;
auto result_status =
(buffer_assignment_proto == nullptr)
? runner->Execute(std::move(module), args, run_hlo_passes, &profile)
: runner->ExecuteWithBufferAssignment(std::move(module),
buffer_assignment_proto, args,
run_hlo_passes, &profile);
const auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = end - start;
std::cerr << "... compiled and ran in " << diff.count() << "s.\n";
double run_time = static_cast<double>(profile.compute_time_ns()) / 1e9;
std::cerr << "execution time for runner " << runner->Name() << ": "
<< run_time << "s.\n";
TF_RETURN_WITH_CONTEXT_IF_ERROR(
result_status.status(),
absl::StrCat("Failed to execute on ", runner->Name()));
return std::move(result_status).value();
}
void UseCpuThunkRuntime(HloModule& module) {
auto debug_options = module.config().debug_options();
debug_options.set_xla_cpu_use_thunk_runtime(true);
module.mutable_config().set_debug_options(debug_options);
}
absl::Status RunAndCompareInternal(
std::unique_ptr<HloModule> test_module,
const BufferAssignmentProto* buffer_assignment_proto,
HloRunnerInterface* test_runner, HloRunnerInterface* reference_runner,
std::minstd_rand0* engine, const RunHloModuleOptions& options,
xla::RunHloModuleIterationLiterals* iteration_literals_proto,
std::function<absl::Status(const HloModule&, HloRunnerInterface*,
HloModule*)>
reference_module_modifier_hook,
std::function<void(HloModuleConfig*)> config_modifier_hook,
ModuleResult* test_run_result, ModuleResult* reference_run_result) {
auto copy_result_on_failure = [](auto status, ModuleResult result,
ModuleResult* out_result) {
if (!status.ok() && out_result != nullptr) {
*out_result = result;
}
return status;
};
if (!config_modifier_hook) {
config_modifier_hook = [](HloModuleConfig* config) {
config->set_seed(42);
};
}
if (options.flatten_control_flow) {
HloControlFlowFlattening control_flow_flattening(
HloControlFlowFlattening::Options{1});
TF_RETURN_IF_ERROR(
copy_result_on_failure(control_flow_flattening.Run(test_module.get()),
ModuleResult::kCompilationError, test_run_result)
.status());
}
TF_ASSIGN_OR_RETURN(
auto args, copy_result_on_failure(
MakeFakeArguments(test_module.get(), engine,
options.use_large_float_range,
options.treat_gte_as_data_formatting),
ModuleResult::kOtherError, test_run_result));
if (iteration_literals_proto != nullptr &&
iteration_literals_proto->arguments_size() != 0) {
if (iteration_literals_proto->arguments_size() != args.size()) {
if (test_run_result != nullptr) {
*test_run_result = ModuleResult::kOtherError;
}
return xla::InvalidArgument(
"Failed to use input literals as arguments; mismatched "
"number of expected arguments.");
} else {
for (int i = 0; i < args.size(); ++i) {
if (!literal_comparison::EqualShapes(
xla::Shape(args[i].shape()),
xla::Shape(iteration_literals_proto->arguments(i).shape()))
.ok()) {
if (test_run_result != nullptr) {
*test_run_result = ModuleResult::kOtherError;
}
return xla::InvalidArgument(
"Failed to use input literals for argument %d "
"because of a shape mismatch.",
i);
}
TF_ASSIGN_OR_RETURN(
args[i],
copy_result_on_failure(xla::Literal::CreateFromProto(
iteration_literals_proto->arguments(i)),
ModuleResult::kOtherError, test_run_result));
}
}
}
if (options.print_literals) {
for (int i = 0; i < args.size(); ++i) {
std::cout << "\n** Argument " << i << " **\n"
<< args[i].ToString() << "\n";
}
}
if (iteration_literals_proto != nullptr &&
iteration_literals_proto->arguments_size() == 0) {
for (int i = 0; i < args.size(); ++i) {
*iteration_literals_proto->add_arguments() = args[i].ToProto();
}
}
std::unique_ptr<HloModule> reference_module;
if (reference_runner != nullptr) {
bool skip_deoptimization = options.reference_platform == options.platform;
TF_ASSIGN_OR_RETURN(
reference_module,
copy_result_on_failure(
PrepareReferenceModule(
*test_module, test_runner, config_modifier_hook,
reference_module_modifier_hook, skip_deoptimization),
ModuleResult::kCompilationError, reference_run_result));
}
if (options.force_use_cpu_thunk_runtime_for_test) {
UseCpuThunkRuntime(*test_module);
}
TF_ASSIGN_OR_RETURN(
auto test_result,
copy_result_on_failure(
ExecuteWithRunner(std::move(test_module), buffer_assignment_proto,
args, test_runner, options.run_test_hlo_passes),
ModuleResult::kRuntimeError, test_run_result));
if (test_run_result != nullptr) {
*test_run_result = ModuleResult::kRan;
}
if (options.print_literals) {
std::cout << "\n** Result with test runner " << test_runner->Name()
<< " **\n"
<< test_result.ToString() << "\n";
}
if (iteration_literals_proto != nullptr) {
LiteralProto test_result_proto = test_result.ToProto();
iteration_literals_proto->mutable_result()->Swap(&test_result_proto);
}
if (reference_module == nullptr) {
std::cerr << "Skipping reference runner\n";
return absl::OkStatus();
}
if (const HloInstruction* root_instruction =
reference_module->entry_computation()->root_instruction();
root_instruction->opcode() == HloOpcode::kCustomCall) {
std::cerr << "Skipping reference runner for a custom call "
<< root_instruction->custom_call_target() << "\n";
if (reference_run_result != nullptr) {
*reference_run_result = ModuleResult::kSkipped;
}
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(
auto reference_result,
copy_result_on_failure(
ExecuteWithRunner(std::move(reference_module),
nullptr, args,
reference_runner, options.run_reference_hlo_passes),
ModuleResult::kRuntimeError, reference_run_result));
if (reference_run_result != nullptr) {
*reference_run_result = ModuleResult::kRan;
}
if (options.print_literals) {
std::cout << "\n** Result with reference runner "
<< reference_runner->Name() << " **\n"
<< reference_result.ToString() << "\n";
}
if (iteration_literals_proto != nullptr) {
LiteralProto reference_result_proto = reference_result.ToProto();
iteration_literals_proto->mutable_reference_result()->Swap(
&reference_result_proto);
}
ErrorSpec error_spec(static_cast<float>(options.abs_error_bound),
static_cast<float>(options.rel_error_bound));
absl::Status comparison_status =
literal_comparison::Near(reference_result,
test_result,
error_spec,
true, &OnMiscompare);
const ModuleResult comparison_result =
comparison_status.ok() ? ModuleResult::kMatched : ModuleResult::kMismatch;
if (test_run_result != nullptr) {
*test_run_result = comparison_result;
}
if (reference_run_result != nullptr) {
*reference_run_result = comparison_result;
}
return comparison_status;
}
struct ChunkResult {
std::string module_name;
ModuleResult test_result = ModuleResult::kDidntRun;
ModuleResult reference_result = ModuleResult::kDidntRun;
absl::Status status;
bool operator<(const ChunkResult& other) const {
if (test_result != other.test_result) {
return test_result < other.test_result;
}
return reference_result < other.reference_result;
}
};
std::string BuildResultsTable(absl::Span<const ChunkResult> chunk_results,
size_t num_modules) {
constexpr int kStatusWidth = 21;
constexpr int kNameWidth = 30;
constexpr int kThreeColumnsWidth = 5 + 2 * kStatusWidth + kNameWidth;
constexpr int kTableWidth = kThreeColumnsWidth + 30;
std::ostringstream strstr;
auto print_row = [&](absl::string_view reference, absl::string_view test,
absl::string_view module_name, absl::string_view error) {
std::string formatted_error = absl::StrReplaceAll(
error, {{"\n", absl::StrCat("\n", std::string(kThreeColumnsWidth, ' '),
"|")}});
strstr << " " << std::left << std::setw(kStatusWidth) << reference << "| "
<< std::setw(kStatusWidth) << test << "| " << std::setw(kNameWidth)
<< module_name << "| " << formatted_error << "\n";
};
auto print_line = [&](int line_width) {
strstr << std::string(line_width, '-') << "\n";
};
print_row("Reference", "Test", "Module", "Status");
print_line(kTableWidth);
std::map<std::pair<ModuleResult, ModuleResult>, int> result_counts;
for (const ChunkResult& chunk_result : chunk_results) {
const std::pair<ModuleResult, ModuleResult> result_pair(
chunk_result.reference_result, chunk_result.test_result);
++result_counts[result_pair];
print_row(ModuleResultToString(chunk_result.reference_result),
ModuleResultToString(chunk_result.test_result),
chunk_result.module_name, chunk_result.status.ToString());
}
print_line(kTableWidth);
print_row("Reference", "Test", "Module", "Status");
print_line(kTableWidth);
strstr << "\n\n";
print_line(kThreeColumnsWidth);
print_row("Reference", "Test", "Total count", "");
print_line(kThreeColumnsWidth);
for (const auto& [result, count] : result_counts) {
print_row(ModuleResultToString(result.first),
ModuleResultToString(result.second), absl::StrCat(count), "");
}
print_line(kThreeColumnsWidth);
if (chunk_results.size() < num_modules) {
strstr << "\n(did not " << (num_modules - chunk_results.size())
<< " modules due to earlier failures)\n\n";
}
return strstr.str();
}
absl::Status RunIsolatedAndCompare(
std::unique_ptr<HloModule> test_module,
const BufferAssignmentProto* buffer_assignment_proto,
HloRunnerInterface* test_runner, HloRunnerInterface* reference_runner,
std::minstd_rand0* engine, const RunHloModuleOptions& options,
xla::RunHloModuleIterationLiterals* iteration_literals_proto,
std::function<absl::Status(const HloModule&, HloRunnerInterface*,
HloModule*)>
reference_module_modifier_hook,
std::function<void(HloModuleConfig*)> config_modifier_hook) {
CHECK(test_module);
CHECK(iteration_literals_proto == nullptr)
<< "Cannot run decomposed module if input literals are provided.";
if (options.run_test_hlo_passes || (options.run_reference_hlo_passes &&
!options.reference_platform.empty())) {
LOG(WARNING)
<< "!!! Warning !!! When running decomposed module, running HLO "
"passes is likely not what you want. If you have unoptimized "
"HLO, first convert it to the optimized e.g. using the "
"hlo-opt tool, and then isolate without HLO passes.";
}
std::vector<ChunkResult> chunk_results;
TF_ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<HloModule>> modules,
DecomposeHloModule(*test_module, true));
absl::Status status = absl::OkStatus();
for (std::unique_ptr<HloModule>& module : modules) {
const std::string module_name = module->name();
ModuleResult test_module_result = ModuleResult::kDidntRun;
ModuleResult reference_module_result = ModuleResult::kDidntRun;
absl::Status chunk_status = RunAndCompareInternal(
std::move(module), buffer_assignment_proto, test_runner,
reference_runner, engine, options, iteration_literals_proto,
reference_module_modifier_hook, config_modifier_hook,
&test_module_result, &reference_module_result);
chunk_results.push_back({std::move(module_name), test_module_result,
reference_module_result, chunk_status});
status.Update(chunk_status);
}
absl::c_sort(chunk_results);
std::cout << BuildResultsTable(chunk_results, modules.size());
return status;
}
}
absl::Status RunAndCompare(
std::unique_ptr<HloModule> test_module,
const BufferAssignmentProto* buffer_assignment_proto,
HloRunnerInterface* test_runner, HloRunnerInterface* reference_runner,
std::minstd_rand0* engine, const RunHloModuleOptions& options,
xla::RunHloModuleIterationLiterals* iteration_literals_proto,
std::function<absl::Status(const HloModule&, HloRunnerInterface*,
HloModule*)>
reference_module_modifier_hook,
std::function<void(HloModuleConfig*)> config_modifier_hook) {
if (options.isolate_instructions) {
return RunIsolatedAndCompare(
std::move(test_module), buffer_assignment_proto, test_runner,
reference_runner, engine, options, iteration_literals_proto,
reference_module_modifier_hook, config_modifier_hook);
}
return RunAndCompareInternal(
std::move(test_module), buffer_assignment_proto, test_runner,
reference_runner, engine, options, iteration_literals_proto,
reference_module_modifier_hook, config_modifier_hook, nullptr, nullptr);
}
absl::Status RunAndCompare(
const std::string& hlo_filename, HloRunnerInterface* test_runner,
HloRunnerInterface* reference_runner, std::minstd_rand0* engine,
const RunHloModuleOptions& options,
xla::RunHloModuleIterationLiterals* iteration_literals_proto,
std::function<absl::Status(const HloModule&, HloRunnerInterface*,
HloModule*)>
reference_module_modifier_hook,
std::function<void(HloModuleConfig*)> config_modifier_hook,
std::function<absl::Status(const RunHloModuleOptions& options,
HloModule& module)>
compilation_env_modifier_hook) {
std::string input_format = options.input_format;
if (input_format.empty()) {
input_format = std::string(tsl::io::Extension(hlo_filename));
}
BufferAssignmentProto buffer_assignment_proto;
TF_ASSIGN_OR_RETURN(
auto test_module,
LoadModuleFromFile(
hlo_filename, input_format, hlo_module_loader_details::Config(),
config_modifier_hook,
options.use_buffer_assignment_from_proto ? &buffer_assignment_proto
: nullptr));
HloVerifier verifier(
HloVerifierOpts{}.WithLayoutSensitive(false).WithAllowMixedPrecision(
true));
TF_RETURN_IF_ERROR(verifier.Run(test_module.get()).status());
if (compilation_env_modifier_hook) {
TF_CHECK_OK(compilation_env_modifier_hook(options, *test_module))
<< "Could not adjust the compilation environment for user provided "
"hlo module.";
}
if (options.print_literals) {
std::cout << "\n** Buffer assignment proto **\n"
<< buffer_assignment_proto.DebugString() << "\n";
}
std::unique_ptr<RunHloModuleIterationLiterals> iteration_literals_proto_local;
if (iteration_literals_proto == nullptr) {
if (!options.force_fake_data && !options.isolate_instructions &&
(input_format == "pb" || input_format == "pbtxt")) {
LOG(INFO) << "Using input data from the user-provided snapshot.";
TF_ASSIGN_OR_RETURN(iteration_literals_proto_local,
LoadInputFromFile(hlo_filename, input_format));
iteration_literals_proto = iteration_literals_proto_local.get();
} else if (input_format == "pb" || input_format == "pbtxt") {
LOG(INFO)
<< "Ignoring input data from snapshot and using fake data instead.";
}
}
return RunAndCompare(
std::move(test_module),
options.use_buffer_assignment_from_proto ? &buffer_assignment_proto
: nullptr,
test_runner, reference_runner, engine, options, iteration_literals_proto,
reference_module_modifier_hook, config_modifier_hook);
}
void ReadInputLiteralsFromFile(const std::string& file_path,
RunHloModuleLiterals* input_literals_proto) {
if (!tsl::ReadTextOrBinaryProto(tsl::Env::Default(), file_path,
input_literals_proto)
.ok() ||
input_literals_proto->iterations().empty()) {
xla::RunHloModuleIterationLiterals iteration_literals_proto;
if (!tsl::ReadTextOrBinaryProto(tsl::Env::Default(), file_path,
&iteration_literals_proto)
.ok()) {
LOG(QFATAL) << "Failed to deserialize input literals from file "
<< file_path << "\n";
}
input_literals_proto->clear_iterations();
*input_literals_proto->add_iterations() = iteration_literals_proto;
}
}
} | #include "xla/tools/run_hlo_module.h"
#include <string>
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/tools/run_hlo_module.pb.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
RunHloModuleIterationLiterals GetTestProto() {
RunHloModuleIterationLiterals result;
*result.add_arguments() = LiteralUtil::CreateR1<float>({0.1, 0.2}).ToProto();
*result.add_arguments() = LiteralUtil::CreateR1<float>({0.3, 0.4}).ToProto();
*result.mutable_result() = LiteralUtil::CreateR1<float>({0.5, 0.6}).ToProto();
*result.mutable_reference_result() =
LiteralUtil::CreateR1<float>({0.5, 0.6}).ToProto();
return result;
}
TEST(ReadInputLiteralsFromFile, ReadRunHloModuleLiteralsBinaryProto) {
std::string file_path;
auto env = tsl::Env::Default();
EXPECT_TRUE(env->LocalTempFilename(&file_path));
auto proto = GetTestProto();
RunHloModuleLiterals wrapped_proto;
*wrapped_proto.add_iterations() = proto;
TF_ASSERT_OK(tsl::WriteBinaryProto(env, file_path, wrapped_proto));
RunHloModuleLiterals result;
ReadInputLiteralsFromFile(file_path, &result);
EXPECT_EQ(result.SerializeAsString(), wrapped_proto.SerializeAsString());
}
TEST(ReadInputLiteralsFromFile, ReadRunHloModuleLiteralsTextProto) {
std::string file_path;
auto env = tsl::Env::Default();
EXPECT_TRUE(env->LocalTempFilename(&file_path));
auto proto = GetTestProto();
RunHloModuleLiterals wrapped_proto;
*wrapped_proto.add_iterations() = proto;
TF_ASSERT_OK(tsl::WriteTextProto(env, file_path, wrapped_proto));
RunHloModuleLiterals result;
ReadInputLiteralsFromFile(file_path, &result);
EXPECT_EQ(result.SerializeAsString(), wrapped_proto.SerializeAsString());
}
TEST(ReadInputLiteralsFromFile, ReadRunHloModuleIterationLiteralsBinaryProto) {
std::string file_path;
auto env = tsl::Env::Default();
EXPECT_TRUE(env->LocalTempFilename(&file_path));
auto proto = GetTestProto();
TF_ASSERT_OK(tsl::WriteBinaryProto(env, file_path, proto));
RunHloModuleLiterals result;
ReadInputLiteralsFromFile(file_path, &result);
EXPECT_EQ(result.iterations_size(), 1);
EXPECT_EQ(result.iterations(0).SerializeAsString(),
proto.SerializeAsString());
}
TEST(ReadInputLiteralsFromFile, ReadRunHloModuleIterationLiteralsTextProto) {
std::string file_path;
auto env = tsl::Env::Default();
EXPECT_TRUE(env->LocalTempFilename(&file_path));
auto proto = GetTestProto();
TF_ASSERT_OK(tsl::WriteTextProto(env, file_path, proto));
RunHloModuleLiterals result;
ReadInputLiteralsFromFile(file_path, &result);
EXPECT_EQ(result.iterations_size(), 1);
EXPECT_EQ(result.iterations(0).SerializeAsString(),
proto.SerializeAsString());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/run_hlo_module.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/run_hlo_module_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1425c71e-adda-4781-a015-51ddbb051313 | cpp | google/quiche | quic_crypto_stream | quiche/quic/core/quic_crypto_stream.cc | quiche/quic/core/quic_crypto_stream_test.cc | #include "quiche/quic/core/quic_crypto_stream.h"
#include <algorithm>
#include <optional>
#include <string>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/crypto_handshake.h"
#include "quiche/quic/core/frames/quic_crypto_frame.h"
#include "quiche/quic/core/quic_connection.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
#define ENDPOINT \
(session()->perspective() == Perspective::IS_SERVER ? "Server: " \
: "Client:" \
" ")
QuicCryptoStream::QuicCryptoStream(QuicSession* session)
: QuicStream(
QuicVersionUsesCryptoFrames(session->transport_version())
? QuicUtils::GetInvalidStreamId(session->transport_version())
: QuicUtils::GetCryptoStreamId(session->transport_version()),
session,
true,
QuicVersionUsesCryptoFrames(session->transport_version())
? CRYPTO
: BIDIRECTIONAL),
substreams_{{{this}, {this}, {this}}} {
DisableConnectionFlowControlForThisStream();
}
QuicCryptoStream::~QuicCryptoStream() {}
QuicByteCount QuicCryptoStream::CryptoMessageFramingOverhead(
QuicTransportVersion version, QuicConnectionId connection_id) {
QUICHE_DCHECK(
QuicUtils::IsConnectionIdValidForVersion(connection_id, version));
quiche::QuicheVariableLengthIntegerLength retry_token_length_length =
quiche::VARIABLE_LENGTH_INTEGER_LENGTH_1;
quiche::QuicheVariableLengthIntegerLength length_length =
quiche::VARIABLE_LENGTH_INTEGER_LENGTH_2;
if (!QuicVersionHasLongHeaderLengths(version)) {
retry_token_length_length = quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0;
length_length = quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0;
}
return QuicPacketCreator::StreamFramePacketOverhead(
version, connection_id.length(), 0, true,
true, PACKET_4BYTE_PACKET_NUMBER,
retry_token_length_length, length_length,
0);
}
void QuicCryptoStream::OnCryptoFrame(const QuicCryptoFrame& frame) {
QUIC_BUG_IF(quic_bug_12573_1,
!QuicVersionUsesCryptoFrames(session()->transport_version()))
<< "Versions less than 47 shouldn't receive CRYPTO frames";
EncryptionLevel level = session()->connection()->last_decrypted_level();
if (!IsCryptoFrameExpectedForEncryptionLevel(level)) {
OnUnrecoverableError(
IETF_QUIC_PROTOCOL_VIOLATION,
absl::StrCat("CRYPTO_FRAME is unexpectedly received at level ", level));
return;
}
CryptoSubstream& substream =
substreams_[QuicUtils::GetPacketNumberSpace(level)];
substream.sequencer.OnCryptoFrame(frame);
EncryptionLevel frame_level = level;
if (substream.sequencer.NumBytesBuffered() >
BufferSizeLimitForLevel(frame_level)) {
OnUnrecoverableError(QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA,
"Too much crypto data received");
}
}
void QuicCryptoStream::OnStreamFrame(const QuicStreamFrame& frame) {
if (QuicVersionUsesCryptoFrames(session()->transport_version())) {
QUIC_PEER_BUG(quic_peer_bug_12573_2)
<< "Crypto data received in stream frame instead of crypto frame";
OnUnrecoverableError(QUIC_INVALID_STREAM_DATA, "Unexpected stream frame");
}
QuicStream::OnStreamFrame(frame);
}
void QuicCryptoStream::OnDataAvailable() {
EncryptionLevel level = session()->connection()->last_decrypted_level();
if (!QuicVersionUsesCryptoFrames(session()->transport_version())) {
OnDataAvailableInSequencer(sequencer(), level);
return;
}
OnDataAvailableInSequencer(
&substreams_[QuicUtils::GetPacketNumberSpace(level)].sequencer, level);
}
void QuicCryptoStream::OnDataAvailableInSequencer(
QuicStreamSequencer* sequencer, EncryptionLevel level) {
struct iovec iov;
while (sequencer->GetReadableRegion(&iov)) {
absl::string_view data(static_cast<char*>(iov.iov_base), iov.iov_len);
if (!crypto_message_parser()->ProcessInput(data, level)) {
OnUnrecoverableError(crypto_message_parser()->error(),
crypto_message_parser()->error_detail());
return;
}
sequencer->MarkConsumed(iov.iov_len);
if (one_rtt_keys_available() &&
crypto_message_parser()->InputBytesRemaining() == 0) {
sequencer->ReleaseBufferIfEmpty();
}
}
}
void QuicCryptoStream::WriteCryptoData(EncryptionLevel level,
absl::string_view data) {
if (!QuicVersionUsesCryptoFrames(session()->transport_version())) {
WriteOrBufferDataAtLevel(data, false, level,
nullptr);
return;
}
if (data.empty()) {
QUIC_BUG(quic_bug_10322_1) << "Empty crypto data being written";
return;
}
const bool had_buffered_data = HasBufferedCryptoFrames();
QuicStreamSendBuffer* send_buffer =
&substreams_[QuicUtils::GetPacketNumberSpace(level)].send_buffer;
QuicStreamOffset offset = send_buffer->stream_offset();
if (GetQuicFlag(quic_bounded_crypto_send_buffer)) {
QUIC_BUG_IF(quic_crypto_stream_offset_lt_bytes_written,
offset < send_buffer->stream_bytes_written());
uint64_t current_buffer_size =
offset - std::min(offset, send_buffer->stream_bytes_written());
if (current_buffer_size > 0) {
QUIC_CODE_COUNT(quic_received_crypto_data_with_non_empty_send_buffer);
if (BufferSizeLimitForLevel(level) <
(current_buffer_size + data.length())) {
QUIC_BUG(quic_crypto_send_buffer_overflow)
<< absl::StrCat("Too much data for crypto send buffer with level: ",
EncryptionLevelToString(level),
", current_buffer_size: ", current_buffer_size,
", data length: ", data.length(),
", SNI: ", crypto_negotiated_params().sni);
OnUnrecoverableError(QUIC_INTERNAL_ERROR,
"Too much data for crypto send buffer");
return;
}
}
}
send_buffer->SaveStreamData(data);
if (kMaxStreamLength - offset < data.length()) {
QUIC_BUG(quic_bug_10322_2) << "Writing too much crypto handshake data";
OnUnrecoverableError(QUIC_INTERNAL_ERROR,
"Writing too much crypto handshake data");
return;
}
if (had_buffered_data) {
return;
}
size_t bytes_consumed = stream_delegate()->SendCryptoData(
level, data.length(), offset, NOT_RETRANSMISSION);
send_buffer->OnStreamDataConsumed(bytes_consumed);
}
size_t QuicCryptoStream::BufferSizeLimitForLevel(EncryptionLevel) const {
return GetQuicFlag(quic_max_buffered_crypto_bytes);
}
bool QuicCryptoStream::OnCryptoFrameAcked(const QuicCryptoFrame& frame,
QuicTime::Delta ) {
QuicByteCount newly_acked_length = 0;
if (!substreams_[QuicUtils::GetPacketNumberSpace(frame.level)]
.send_buffer.OnStreamDataAcked(frame.offset, frame.data_length,
&newly_acked_length)) {
OnUnrecoverableError(QUIC_INTERNAL_ERROR,
"Trying to ack unsent crypto data.");
return false;
}
return newly_acked_length > 0;
}
void QuicCryptoStream::OnStreamReset(const QuicRstStreamFrame& ) {
stream_delegate()->OnStreamError(QUIC_INVALID_STREAM_ID,
"Attempt to reset crypto stream");
}
void QuicCryptoStream::NeuterUnencryptedStreamData() {
NeuterStreamDataOfEncryptionLevel(ENCRYPTION_INITIAL);
}
void QuicCryptoStream::NeuterStreamDataOfEncryptionLevel(
EncryptionLevel level) {
if (!QuicVersionUsesCryptoFrames(session()->transport_version())) {
for (const auto& interval : bytes_consumed_[level]) {
QuicByteCount newly_acked_length = 0;
send_buffer().OnStreamDataAcked(
interval.min(), interval.max() - interval.min(), &newly_acked_length);
}
return;
}
QuicStreamSendBuffer* send_buffer =
&substreams_[QuicUtils::GetPacketNumberSpace(level)].send_buffer;
QuicIntervalSet<QuicStreamOffset> to_ack = send_buffer->bytes_acked();
to_ack.Complement(0, send_buffer->stream_offset());
for (const auto& interval : to_ack) {
QuicByteCount newly_acked_length = 0;
send_buffer->OnStreamDataAcked(
interval.min(), interval.max() - interval.min(), &newly_acked_length);
}
}
void QuicCryptoStream::OnStreamDataConsumed(QuicByteCount bytes_consumed) {
if (QuicVersionUsesCryptoFrames(session()->transport_version())) {
QUIC_BUG(quic_bug_10322_3)
<< "Stream data consumed when CRYPTO frames should be in use";
}
if (bytes_consumed > 0) {
bytes_consumed_[session()->connection()->encryption_level()].Add(
stream_bytes_written(), stream_bytes_written() + bytes_consumed);
}
QuicStream::OnStreamDataConsumed(bytes_consumed);
}
bool QuicCryptoStream::HasPendingCryptoRetransmission() const {
if (!QuicVersionUsesCryptoFrames(session()->transport_version())) {
return false;
}
for (const auto& substream : substreams_) {
if (substream.send_buffer.HasPendingRetransmission()) {
return true;
}
}
return false;
}
void QuicCryptoStream::WritePendingCryptoRetransmission() {
QUIC_BUG_IF(quic_bug_12573_3,
!QuicVersionUsesCryptoFrames(session()->transport_version()))
<< "Versions less than 47 don't write CRYPTO frames";
for (uint8_t i = INITIAL_DATA; i <= APPLICATION_DATA; ++i) {
auto packet_number_space = static_cast<PacketNumberSpace>(i);
QuicStreamSendBuffer* send_buffer =
&substreams_[packet_number_space].send_buffer;
while (send_buffer->HasPendingRetransmission()) {
auto pending = send_buffer->NextPendingRetransmission();
size_t bytes_consumed = stream_delegate()->SendCryptoData(
GetEncryptionLevelToSendCryptoDataOfSpace(packet_number_space),
pending.length, pending.offset, HANDSHAKE_RETRANSMISSION);
send_buffer->OnStreamDataRetransmitted(pending.offset, bytes_consumed);
if (bytes_consumed < pending.length) {
return;
}
}
}
}
void QuicCryptoStream::WritePendingRetransmission() {
while (HasPendingRetransmission()) {
StreamPendingRetransmission pending =
send_buffer().NextPendingRetransmission();
QuicIntervalSet<QuicStreamOffset> retransmission(
pending.offset, pending.offset + pending.length);
EncryptionLevel retransmission_encryption_level = ENCRYPTION_INITIAL;
for (size_t i = 0; i < NUM_ENCRYPTION_LEVELS; ++i) {
if (retransmission.Intersects(bytes_consumed_[i])) {
retransmission_encryption_level = static_cast<EncryptionLevel>(i);
retransmission.Intersection(bytes_consumed_[i]);
break;
}
}
pending.offset = retransmission.begin()->min();
pending.length =
retransmission.begin()->max() - retransmission.begin()->min();
QuicConsumedData consumed = RetransmitStreamDataAtLevel(
pending.offset, pending.length, retransmission_encryption_level,
HANDSHAKE_RETRANSMISSION);
if (consumed.bytes_consumed < pending.length) {
break;
}
}
}
bool QuicCryptoStream::RetransmitStreamData(QuicStreamOffset offset,
QuicByteCount data_length,
bool ,
TransmissionType type) {
QUICHE_DCHECK(type == HANDSHAKE_RETRANSMISSION || type == PTO_RETRANSMISSION);
QuicIntervalSet<QuicStreamOffset> retransmission(offset,
offset + data_length);
EncryptionLevel send_encryption_level = ENCRYPTION_INITIAL;
for (size_t i = 0; i < NUM_ENCRYPTION_LEVELS; ++i) {
if (retransmission.Intersects(bytes_consumed_[i])) {
send_encryption_level = static_cast<EncryptionLevel>(i);
break;
}
}
retransmission.Difference(bytes_acked());
for (const auto& interval : retransmission) {
QuicStreamOffset retransmission_offset = interval.min();
QuicByteCount retransmission_length = interval.max() - interval.min();
QuicConsumedData consumed = RetransmitStreamDataAtLevel(
retransmission_offset, retransmission_length, send_encryption_level,
type);
if (consumed.bytes_consumed < retransmission_length) {
return false;
}
}
return true;
}
QuicConsumedData QuicCryptoStream::RetransmitStreamDataAtLevel(
QuicStreamOffset retransmission_offset, QuicByteCount retransmission_length,
EncryptionLevel encryption_level, TransmissionType type) {
QUICHE_DCHECK(type == HANDSHAKE_RETRANSMISSION || type == PTO_RETRANSMISSION);
const auto consumed = stream_delegate()->WritevData(
id(), retransmission_length, retransmission_offset, NO_FIN, type,
encryption_level);
QUIC_DVLOG(1) << ENDPOINT << "stream " << id()
<< " is forced to retransmit stream data ["
<< retransmission_offset << ", "
<< retransmission_offset + retransmission_length
<< "), with encryption level: " << encryption_level
<< ", consumed: " << consumed;
OnStreamFrameRetransmitted(retransmission_offset, consumed.bytes_consumed,
consumed.fin_consumed);
return consumed;
}
uint64_t QuicCryptoStream::crypto_bytes_read() const {
if (!QuicVersionUsesCryptoFrames(session()->transport_version())) {
return stream_bytes_read();
}
uint64_t bytes_read = 0;
for (const CryptoSubstream& substream : substreams_) {
bytes_read += substream.sequencer.NumBytesConsumed();
}
return bytes_read;
}
uint64_t QuicCryptoStream::BytesReadOnLevel(EncryptionLevel level) const {
return substreams_[QuicUtils::GetPacketNumberSpace(level)]
.sequencer.NumBytesConsumed();
}
uint64_t QuicCryptoStream::BytesSentOnLevel(EncryptionLevel level) const {
return substreams_[QuicUtils::GetPacketNumberSpace(level)]
.send_buffer.stream_bytes_written();
}
bool QuicCryptoStream::WriteCryptoFrame(EncryptionLevel level,
QuicStreamOffset offset,
QuicByteCount data_length,
QuicDataWriter* writer) {
QUIC_BUG_IF(quic_bug_12573_4,
!QuicVersionUsesCryptoFrames(session()->transport_version()))
<< "Versions less than 47 don't write CRYPTO frames (2)";
return substreams_[QuicUtils::GetPacketNumberSpace(level)]
.send_buffer.WriteStreamData(offset, data_length, writer);
}
void QuicCryptoStream::OnCryptoFrameLost(QuicCryptoFrame* crypto_frame) {
QUIC_BUG_IF(quic_bug_12573_5,
!QuicVersionUsesCryptoFrames(session()->transport_version()))
<< "Versions less than 47 don't lose CRYPTO frames";
substreams_[QuicUtils::GetPacketNumberSpace(crypto_frame->level)]
.send_buffer.OnStreamDataLost(crypto_frame->offset,
crypto_frame->data_length);
}
bool QuicCryptoStream::RetransmitData(QuicCryptoFrame* crypto_frame,
TransmissionType type) {
QUIC_BUG_IF(quic_bug_12573_6,
!QuicVersionUsesCryptoFrames(session()->transport_version()))
<< "Versions less than 47 don't retransmit CRYPTO frames";
QuicIntervalSet<QuicStreamOffset> retransmission(
crypto_frame->offset, crypto_frame->offset + crypto_frame->data_length);
QuicStreamSendBuffer* send_buffer =
&substreams_[QuicUtils::GetPacketNumberSpace(crypto_frame->level)]
.send_buffer;
retransmission.Difference(send_buffer->bytes_acked());
if (retransmission.Empty()) {
return true;
}
for (const auto& interval : retransmission) {
size_t retransmission_offset = interval.min();
size_t retransmission_length = interval.max() - interval.min();
EncryptionLevel retransmission_encryption_level =
GetEncryptionLevelToSendCryptoDataOfSpace(
QuicUtils::GetPacketNumberSpace(crypto_frame->level));
size_t bytes_consumed = stream_delegate()->SendCryptoData(
retransmission_encryption_level, retransmission_length,
retransmission_offset, type);
send_buffer->OnStreamDataRetransmitted(retransmission_offset,
bytes_consumed);
if (bytes_consumed < retransmission_length) {
return false;
}
}
return true;
}
void QuicCryptoStream::WriteBufferedCryptoFrames() {
QUIC_BUG_IF(quic_bug_12573_7,
!QuicVersionUsesCryptoFrames(session()->transport_version()))
<< "Versions less than 47 don't use CRYPTO frames";
for (uint8_t i = INITIAL_DATA; i <= APPLICATION_DATA; ++i) {
auto packet_number_space = static_cast<PacketNumberSpace>(i);
QuicStreamSendBuffer* send_buffer =
&substreams_[packet_number_space].send_buffer;
const size_t data_length =
send_buffer->stream_offset() - send_buffer->stream_bytes_written();
if (data_length == 0) {
continue;
}
size_t bytes_consumed = stream_delegate()->SendCryptoData(
GetEncryptionLevelToSendCryptoDataOfSpace(packet_number_space),
data_length, send_buffer->stream_bytes_written(), NOT_RETRANSMISSION);
send_buffer->OnStreamDataConsumed(bytes_consumed);
if (bytes_consumed < data_length) {
break;
}
}
}
bool QuicCryptoStream::HasBufferedCryptoFrames() const {
QUIC_BUG_IF(quic_bug_12573_8,
!QuicVersionUsesCryptoFrames(session()->transport_version()))
<< "Versions less than 47 don't use CRYPTO frames";
for (const CryptoSubstream& substream : substreams_) {
const QuicStreamSendBuffer& send_buffer = substream.send_buffer;
QUICHE_DCHECK_GE(send_buffer.stream_offset(),
send_buffer.stream_bytes_written());
if (send_buffer.stream_offset() > send_buffer.stream_bytes_written()) {
return true;
}
}
return false;
}
bool QuicCryptoStream::IsFrameOutstanding(EncryptionLevel level, size_t offset,
size_t length) const {
if (!QuicVersionUsesCryptoFrames(session()->transport_version())) {
return false;
}
return substreams_[QuicUtils::GetPacketNumberSpace(level)]
.send_buffer.IsStreamDataOutstanding(offset, length);
}
bool QuicCryptoStream::IsWaitingForAcks() const {
if (!QuicVersionUsesCryptoFrames(session()->transport_version())) {
return QuicStream::IsWaitingForAcks();
}
for (const CryptoSubstream& substream : substreams_) {
if (substream.send_buffer.stream_bytes_outstanding()) {
return true;
}
}
return false;
}
QuicCryptoStream::CryptoSubstream::CryptoSubstream(
QuicCryptoStream* crypto_stream)
: sequencer(crypto_stream),
send_buffer(crypto_stream->session()
->connection()
->helper()
->GetStreamSendBufferAllocator()) {}
#undef ENDPOINT
} | #include "quiche/quic/core/quic_crypto_stream.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "quiche/quic/core/crypto/crypto_handshake.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/crypto/null_encrypter.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
#include "quiche/quic/test_tools/quic_connection_peer.h"
#include "quiche/quic/test_tools/quic_stream_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
using testing::_;
using testing::InSequence;
using testing::Invoke;
using testing::InvokeWithoutArgs;
using testing::Return;
namespace quic {
namespace test {
namespace {
class MockQuicCryptoStream : public QuicCryptoStream,
public QuicCryptoHandshaker {
public:
explicit MockQuicCryptoStream(QuicSession* session)
: QuicCryptoStream(session),
QuicCryptoHandshaker(this, session),
params_(new QuicCryptoNegotiatedParameters) {}
MockQuicCryptoStream(const MockQuicCryptoStream&) = delete;
MockQuicCryptoStream& operator=(const MockQuicCryptoStream&) = delete;
void OnHandshakeMessage(const CryptoHandshakeMessage& message) override {
messages_.push_back(message);
}
std::vector<CryptoHandshakeMessage>* messages() { return &messages_; }
ssl_early_data_reason_t EarlyDataReason() const override {
return ssl_early_data_unknown;
}
bool encryption_established() const override { return false; }
bool one_rtt_keys_available() const override { return false; }
const QuicCryptoNegotiatedParameters& crypto_negotiated_params()
const override {
return *params_;
}
CryptoMessageParser* crypto_message_parser() override {
return QuicCryptoHandshaker::crypto_message_parser();
}
void OnPacketDecrypted(EncryptionLevel ) override {}
void OnOneRttPacketAcknowledged() override {}
void OnHandshakePacketSent() override {}
void OnHandshakeDoneReceived() override {}
void OnNewTokenReceived(absl::string_view ) override {}
std::string GetAddressToken(
const CachedNetworkParameters* )
const override {
return "";
}
bool ValidateAddressToken(absl::string_view ) const override {
return true;
}
const CachedNetworkParameters* PreviousCachedNetworkParams() const override {
return nullptr;
}
void SetPreviousCachedNetworkParams(
CachedNetworkParameters ) override {}
HandshakeState GetHandshakeState() const override { return HANDSHAKE_START; }
void SetServerApplicationStateForResumption(
std::unique_ptr<ApplicationState> ) override {}
std::unique_ptr<QuicDecrypter> AdvanceKeysAndCreateCurrentOneRttDecrypter()
override {
return nullptr;
}
std::unique_ptr<QuicEncrypter> CreateCurrentOneRttEncrypter() override {
return nullptr;
}
bool ExportKeyingMaterial(absl::string_view ,
absl::string_view ,
size_t ,
std::string* ) override {
return false;
}
SSL* GetSsl() const override { return nullptr; }
bool IsCryptoFrameExpectedForEncryptionLevel(
EncryptionLevel level) const override {
return level != ENCRYPTION_ZERO_RTT;
}
EncryptionLevel GetEncryptionLevelToSendCryptoDataOfSpace(
PacketNumberSpace space) const override {
switch (space) {
case INITIAL_DATA:
return ENCRYPTION_INITIAL;
case HANDSHAKE_DATA:
return ENCRYPTION_HANDSHAKE;
case APPLICATION_DATA:
return QuicCryptoStream::session()
->GetEncryptionLevelToSendApplicationData();
default:
QUICHE_DCHECK(false);
return NUM_ENCRYPTION_LEVELS;
}
}
private:
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters> params_;
std::vector<CryptoHandshakeMessage> messages_;
};
class QuicCryptoStreamTest : public QuicTest {
public:
QuicCryptoStreamTest()
: connection_(new MockQuicConnection(&helper_, &alarm_factory_,
Perspective::IS_CLIENT)),
session_(connection_, false) {
EXPECT_CALL(*static_cast<MockPacketWriter*>(connection_->writer()),
WritePacket(_, _, _, _, _, _))
.WillRepeatedly(Return(WriteResult(WRITE_STATUS_OK, 0)));
stream_ = new MockQuicCryptoStream(&session_);
session_.SetCryptoStream(stream_);
session_.Initialize();
message_.set_tag(kSHLO);
message_.SetStringPiece(1, "abc");
message_.SetStringPiece(2, "def");
ConstructHandshakeMessage();
}
QuicCryptoStreamTest(const QuicCryptoStreamTest&) = delete;
QuicCryptoStreamTest& operator=(const QuicCryptoStreamTest&) = delete;
void ConstructHandshakeMessage() {
CryptoFramer framer;
message_data_ = framer.ConstructHandshakeMessage(message_);
}
protected:
MockQuicConnectionHelper helper_;
MockAlarmFactory alarm_factory_;
MockQuicConnection* connection_;
MockQuicSpdySession session_;
MockQuicCryptoStream* stream_;
CryptoHandshakeMessage message_;
std::unique_ptr<QuicData> message_data_;
};
TEST_F(QuicCryptoStreamTest, NotInitiallyConected) {
EXPECT_FALSE(stream_->encryption_established());
EXPECT_FALSE(stream_->one_rtt_keys_available());
}
TEST_F(QuicCryptoStreamTest, ProcessRawData) {
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
stream_->OnStreamFrame(QuicStreamFrame(
QuicUtils::GetCryptoStreamId(connection_->transport_version()),
false,
0, message_data_->AsStringPiece()));
} else {
stream_->OnCryptoFrame(QuicCryptoFrame(ENCRYPTION_INITIAL, 0,
message_data_->AsStringPiece()));
}
ASSERT_EQ(1u, stream_->messages()->size());
const CryptoHandshakeMessage& message = (*stream_->messages())[0];
EXPECT_EQ(kSHLO, message.tag());
EXPECT_EQ(2u, message.tag_value_map().size());
EXPECT_EQ("abc", crypto_test_utils::GetValueForTag(message, 1));
EXPECT_EQ("def", crypto_test_utils::GetValueForTag(message, 2));
}
TEST_F(QuicCryptoStreamTest, ProcessBadData) {
std::string bad(message_data_->data(), message_data_->length());
const int kFirstTagIndex = sizeof(uint32_t) +
sizeof(uint16_t) +
sizeof(uint16_t);
EXPECT_EQ(1, bad[kFirstTagIndex]);
bad[kFirstTagIndex] = 0x7F;
EXPECT_CALL(*connection_, CloseConnection(QUIC_CRYPTO_TAGS_OUT_OF_ORDER,
testing::_, testing::_));
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
stream_->OnStreamFrame(QuicStreamFrame(
QuicUtils::GetCryptoStreamId(connection_->transport_version()),
false, 0, bad));
} else {
stream_->OnCryptoFrame(
QuicCryptoFrame(ENCRYPTION_INITIAL, 0, bad));
}
}
TEST_F(QuicCryptoStreamTest, NoConnectionLevelFlowControl) {
EXPECT_FALSE(
QuicStreamPeer::StreamContributesToConnectionFlowControl(stream_));
}
TEST_F(QuicCryptoStreamTest, RetransmitCryptoData) {
if (QuicVersionUsesCryptoFrames(connection_->transport_version())) {
return;
}
InSequence s;
EXPECT_EQ(ENCRYPTION_INITIAL, connection_->encryption_level());
std::string data(1350, 'a');
EXPECT_CALL(
session_,
WritevData(QuicUtils::GetCryptoStreamId(connection_->transport_version()),
1350, 0, _, _, _))
.WillOnce(Invoke(&session_, &MockQuicSpdySession::ConsumeData));
stream_->WriteOrBufferData(data, false, nullptr);
connection_->SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
EXPECT_EQ(ENCRYPTION_ZERO_RTT, connection_->encryption_level());
EXPECT_CALL(
session_,
WritevData(QuicUtils::GetCryptoStreamId(connection_->transport_version()),
1350, 1350, _, _, _))
.WillOnce(Invoke(&session_, &MockQuicSpdySession::ConsumeData));
stream_->WriteOrBufferData(data, false, nullptr);
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(ENCRYPTION_FORWARD_SECURE, connection_->encryption_level());
stream_->OnStreamFrameLost(0, 1000, false);
EXPECT_TRUE(stream_->HasPendingRetransmission());
stream_->OnStreamFrameLost(1200, 800, false);
EXPECT_CALL(
session_,
WritevData(QuicUtils::GetCryptoStreamId(connection_->transport_version()),
1000, 0, _, _, _))
.WillOnce(Invoke(&session_, &MockQuicSpdySession::ConsumeData));
EXPECT_CALL(
session_,
WritevData(QuicUtils::GetCryptoStreamId(connection_->transport_version()),
150, 1200, _, _, _))
.WillOnce(Invoke(&session_, &MockQuicSpdySession::ConsumeData));
EXPECT_CALL(
session_,
WritevData(QuicUtils::GetCryptoStreamId(connection_->transport_version()),
650, 1350, _, _, _))
.WillOnce(Invoke(&session_, &MockQuicSpdySession::ConsumeData));
stream_->OnCanWrite();
EXPECT_FALSE(stream_->HasPendingRetransmission());
EXPECT_EQ(ENCRYPTION_FORWARD_SECURE, connection_->encryption_level());
}
TEST_F(QuicCryptoStreamTest, RetransmitCryptoDataInCryptoFrames) {
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
return;
}
EXPECT_CALL(*connection_, SendCryptoData(_, _, _)).Times(0);
InSequence s;
EXPECT_EQ(ENCRYPTION_INITIAL, connection_->encryption_level());
std::string data(1350, 'a');
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_INITIAL, 1350, 0))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
stream_->WriteCryptoData(ENCRYPTION_INITIAL, data);
std::unique_ptr<NullEncrypter> encrypter =
std::make_unique<NullEncrypter>(Perspective::IS_CLIENT);
connection_->SetEncrypter(ENCRYPTION_ZERO_RTT, std::move(encrypter));
connection_->SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
EXPECT_EQ(ENCRYPTION_ZERO_RTT, connection_->encryption_level());
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_ZERO_RTT, 1350, 0))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
stream_->WriteCryptoData(ENCRYPTION_ZERO_RTT, data);
QuicCryptoFrame lost_frame = QuicCryptoFrame(ENCRYPTION_ZERO_RTT, 0, 650);
stream_->OnCryptoFrameLost(&lost_frame);
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_ZERO_RTT, 650, 0))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
stream_->WritePendingCryptoRetransmission();
connection_->SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<NullEncrypter>(Perspective::IS_CLIENT));
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(ENCRYPTION_FORWARD_SECURE, connection_->encryption_level());
lost_frame = QuicCryptoFrame(ENCRYPTION_INITIAL, 0, 1000);
stream_->OnCryptoFrameLost(&lost_frame);
EXPECT_TRUE(stream_->HasPendingCryptoRetransmission());
lost_frame = QuicCryptoFrame(ENCRYPTION_INITIAL, 1200, 150);
stream_->OnCryptoFrameLost(&lost_frame);
lost_frame = QuicCryptoFrame(ENCRYPTION_ZERO_RTT, 0, 650);
stream_->OnCryptoFrameLost(&lost_frame);
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_INITIAL, 1000, 0))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_INITIAL, 150, 1200))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_FORWARD_SECURE, 650, 0))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
stream_->WritePendingCryptoRetransmission();
EXPECT_FALSE(stream_->HasPendingCryptoRetransmission());
EXPECT_EQ(ENCRYPTION_FORWARD_SECURE, connection_->encryption_level());
}
TEST_F(QuicCryptoStreamTest, RetransmitEncryptionHandshakeLevelCryptoFrames) {
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
return;
}
EXPECT_CALL(*connection_, SendCryptoData(_, _, _)).Times(0);
InSequence s;
EXPECT_EQ(ENCRYPTION_INITIAL, connection_->encryption_level());
std::string data(1000, 'a');
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_INITIAL, 1000, 0))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
stream_->WriteCryptoData(ENCRYPTION_INITIAL, data);
std::unique_ptr<NullEncrypter> encrypter =
std::make_unique<NullEncrypter>(Perspective::IS_CLIENT);
connection_->SetEncrypter(ENCRYPTION_HANDSHAKE, std::move(encrypter));
connection_->SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
EXPECT_EQ(ENCRYPTION_HANDSHAKE, connection_->encryption_level());
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_HANDSHAKE, 1000, 0))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
stream_->WriteCryptoData(ENCRYPTION_HANDSHAKE, data);
connection_->SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<NullEncrypter>(Perspective::IS_CLIENT));
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(ENCRYPTION_FORWARD_SECURE, connection_->encryption_level());
QuicCryptoFrame lost_frame(ENCRYPTION_HANDSHAKE, 0, 200);
stream_->OnCryptoFrameLost(&lost_frame);
EXPECT_TRUE(stream_->HasPendingCryptoRetransmission());
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_HANDSHAKE, 200, 0))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
stream_->WritePendingCryptoRetransmission();
EXPECT_FALSE(stream_->HasPendingCryptoRetransmission());
}
TEST_F(QuicCryptoStreamTest, NeuterUnencryptedStreamData) {
if (QuicVersionUsesCryptoFrames(connection_->transport_version())) {
return;
}
EXPECT_EQ(ENCRYPTION_INITIAL, connection_->encryption_level());
std::string data(1350, 'a');
EXPECT_CALL(
session_,
WritevData(QuicUtils::GetCryptoStreamId(connection_->transport_version()),
1350, 0, _, _, _))
.WillOnce(Invoke(&session_, &MockQuicSpdySession::ConsumeData));
stream_->WriteOrBufferData(data, false, nullptr);
connection_->SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
EXPECT_EQ(ENCRYPTION_ZERO_RTT, connection_->encryption_level());
EXPECT_CALL(
session_,
WritevData(QuicUtils::GetCryptoStreamId(connection_->transport_version()),
1350, 1350, _, _, _))
.WillOnce(Invoke(&session_, &MockQuicSpdySession::ConsumeData));
stream_->WriteOrBufferData(data, false, nullptr);
stream_->OnStreamFrameLost(0, 1350, false);
EXPECT_TRUE(stream_->HasPendingRetransmission());
stream_->NeuterUnencryptedStreamData();
EXPECT_FALSE(stream_->HasPendingRetransmission());
stream_->OnStreamFrameLost(0, 1350, false);
EXPECT_FALSE(stream_->HasPendingRetransmission());
stream_->OnStreamFrameLost(1350, 650, false);
EXPECT_TRUE(stream_->HasPendingRetransmission());
stream_->NeuterUnencryptedStreamData();
EXPECT_TRUE(stream_->HasPendingRetransmission());
}
TEST_F(QuicCryptoStreamTest, NeuterUnencryptedCryptoData) {
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
return;
}
EXPECT_EQ(ENCRYPTION_INITIAL, connection_->encryption_level());
std::string data(1350, 'a');
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_INITIAL, 1350, 0))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
stream_->WriteCryptoData(ENCRYPTION_INITIAL, data);
connection_->SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<NullEncrypter>(Perspective::IS_CLIENT));
connection_->SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
std::unique_ptr<NullEncrypter> encrypter =
std::make_unique<NullEncrypter>(Perspective::IS_CLIENT);
connection_->SetEncrypter(ENCRYPTION_ZERO_RTT, std::move(encrypter));
EXPECT_EQ(ENCRYPTION_ZERO_RTT, connection_->encryption_level());
EXPECT_CALL(*connection_, SendCryptoData(_, _, _)).Times(0);
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_ZERO_RTT, 1350, 0))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
stream_->WriteCryptoData(ENCRYPTION_ZERO_RTT, data);
QuicCryptoFrame lost_frame(ENCRYPTION_INITIAL, 0, 1350);
stream_->OnCryptoFrameLost(&lost_frame);
EXPECT_TRUE(stream_->HasPendingCryptoRetransmission());
stream_->NeuterUnencryptedStreamData();
EXPECT_FALSE(stream_->HasPendingCryptoRetransmission());
stream_->OnCryptoFrameLost(&lost_frame);
EXPECT_FALSE(stream_->HasPendingCryptoRetransmission());
lost_frame = QuicCryptoFrame(ENCRYPTION_ZERO_RTT, 0, 650);
stream_->OnCryptoFrameLost(&lost_frame);
EXPECT_TRUE(stream_->HasPendingCryptoRetransmission());
stream_->NeuterUnencryptedStreamData();
EXPECT_TRUE(stream_->HasPendingCryptoRetransmission());
}
TEST_F(QuicCryptoStreamTest, RetransmitStreamData) {
if (QuicVersionUsesCryptoFrames(connection_->transport_version())) {
return;
}
InSequence s;
EXPECT_EQ(ENCRYPTION_INITIAL, connection_->encryption_level());
std::string data(1350, 'a');
EXPECT_CALL(
session_,
WritevData(QuicUtils::GetCryptoStreamId(connection_->transport_version()),
1350, 0, _, _, _))
.WillOnce(Invoke(&session_, &MockQuicSpdySession::ConsumeData));
stream_->WriteOrBufferData(data, false, nullptr);
connection_->SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
EXPECT_EQ(ENCRYPTION_ZERO_RTT, connection_->encryption_level());
EXPECT_CALL(
session_,
WritevData(QuicUtils::GetCryptoStreamId(connection_->transport_version()),
1350, 1350, _, _, _))
.WillOnce(Invoke(&session_, &MockQuicSpdySession::ConsumeData));
stream_->WriteOrBufferData(data, false, nullptr);
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(ENCRYPTION_FORWARD_SECURE, connection_->encryption_level());
QuicByteCount newly_acked_length = 0;
stream_->OnStreamFrameAcked(2000, 500, false, QuicTime::Delta::Zero(),
QuicTime::Zero(), &newly_acked_length);
EXPECT_EQ(500u, newly_acked_length);
EXPECT_CALL(
session_,
WritevData(QuicUtils::GetCryptoStreamId(connection_->transport_version()),
650, 1350, _, _, _))
.WillOnce(InvokeWithoutArgs([this]() {
return session_.ConsumeData(
QuicUtils::GetCryptoStreamId(connection_->transport_version()), 150,
1350, NO_FIN, HANDSHAKE_RETRANSMISSION, std::nullopt);
}));
EXPECT_FALSE(stream_->RetransmitStreamData(1350, 1350, false,
HANDSHAKE_RETRANSMISSION));
EXPECT_EQ(ENCRYPTION_FORWARD_SECURE, connection_->encryption_level());
EXPECT_CALL(
session_,
WritevData(QuicUtils::GetCryptoStreamId(connection_->transport_version()),
650, 1350, _, _, _))
.WillOnce(Invoke(&session_, &MockQuicSpdySession::ConsumeData));
EXPECT_CALL(
session_,
WritevData(QuicUtils::GetCryptoStreamId(connection_->transport_version()),
200, 2500, _, _, _))
.WillOnce(Invoke(&session_, &MockQuicSpdySession::ConsumeData));
EXPECT_TRUE(stream_->RetransmitStreamData(1350, 1350, false,
HANDSHAKE_RETRANSMISSION));
EXPECT_EQ(ENCRYPTION_FORWARD_SECURE, connection_->encryption_level());
EXPECT_CALL(session_, WritevData(_, _, _, _, _, _)).Times(0);
EXPECT_TRUE(
stream_->RetransmitStreamData(0, 0, false, HANDSHAKE_RETRANSMISSION));
}
TEST_F(QuicCryptoStreamTest, RetransmitStreamDataWithCryptoFrames) {
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
return;
}
InSequence s;
EXPECT_EQ(ENCRYPTION_INITIAL, connection_->encryption_level());
std::string data(1350, 'a');
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_INITIAL, 1350, 0))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
stream_->WriteCryptoData(ENCRYPTION_INITIAL, data);
std::unique_ptr<NullEncrypter> encrypter =
std::make_unique<NullEncrypter>(Perspective::IS_CLIENT);
connection_->SetEncrypter(ENCRYPTION_ZERO_RTT, std::move(encrypter));
connection_->SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
EXPECT_EQ(ENCRYPTION_ZERO_RTT, connection_->encryption_level());
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_ZERO_RTT, 1350, 0))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
stream_->WriteCryptoData(ENCRYPTION_ZERO_RTT, data);
connection_->SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<NullEncrypter>(Perspective::IS_CLIENT));
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(ENCRYPTION_FORWARD_SECURE, connection_->encryption_level());
QuicCryptoFrame acked_frame(ENCRYPTION_ZERO_RTT, 650, 500);
EXPECT_TRUE(
stream_->OnCryptoFrameAcked(acked_frame, QuicTime::Delta::Zero()));
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_FORWARD_SECURE, 150, 0))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
QuicCryptoFrame frame_to_retransmit(ENCRYPTION_ZERO_RTT, 0, 150);
stream_->RetransmitData(&frame_to_retransmit, HANDSHAKE_RETRANSMISSION);
EXPECT_EQ(ENCRYPTION_FORWARD_SECURE, connection_->encryption_level());
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_FORWARD_SECURE, 650, 0))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
EXPECT_CALL(*connection_,
SendCryptoData(ENCRYPTION_FORWARD_SECURE, 200, 1150))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
frame_to_retransmit = QuicCryptoFrame(ENCRYPTION_ZERO_RTT, 0, 1350);
stream_->RetransmitData(&frame_to_retransmit, HANDSHAKE_RETRANSMISSION);
EXPECT_EQ(ENCRYPTION_FORWARD_SECURE, connection_->encryption_level());
EXPECT_CALL(*connection_, SendCryptoData(_, _, _)).Times(0);
QuicCryptoFrame empty_frame(ENCRYPTION_FORWARD_SECURE, 0, 0);
stream_->RetransmitData(&empty_frame, HANDSHAKE_RETRANSMISSION);
}
TEST_F(QuicCryptoStreamTest, HasUnackedCryptoData) {
if (QuicVersionUsesCryptoFrames(connection_->transport_version())) {
return;
}
std::string data(1350, 'a');
EXPECT_CALL(
session_,
WritevData(QuicUtils::GetCryptoStreamId(connection_->transport_version()),
1350, 0, _, _, _))
.WillOnce(testing::Return(QuicConsumedData(0, false)));
stream_->WriteOrBufferData(data, false, nullptr);
EXPECT_FALSE(stream_->IsWaitingForAcks());
EXPECT_TRUE(session_.HasUnackedCryptoData());
EXPECT_CALL(
session_,
WritevData(QuicUtils::GetCryptoStreamId(connection_->transport_version()),
1350, 0, _, _, _))
.WillOnce(Invoke(&session_, &MockQuicSpdySession::ConsumeData));
stream_->OnCanWrite();
EXPECT_TRUE(stream_->IsWaitingForAcks());
EXPECT_TRUE(session_.HasUnackedCryptoData());
}
TEST_F(QuicCryptoStreamTest, HasUnackedCryptoDataWithCryptoFrames) {
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
return;
}
EXPECT_EQ(ENCRYPTION_INITIAL, connection_->encryption_level());
std::string data(1350, 'a');
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_INITIAL, 1350, 0))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
stream_->WriteCryptoData(ENCRYPTION_INITIAL, data);
EXPECT_TRUE(stream_->IsWaitingForAcks());
EXPECT_TRUE(session_.HasUnackedCryptoData());
}
TEST_F(QuicCryptoStreamTest, CryptoMessageFramingOverhead) {
for (const ParsedQuicVersion& version :
AllSupportedVersionsWithQuicCrypto()) {
SCOPED_TRACE(version);
QuicByteCount expected_overhead = 52;
if (version.HasLongHeaderLengths()) {
expected_overhead += 3;
}
if (version.HasLengthPrefixedConnectionIds()) {
expected_overhead += 1;
}
EXPECT_EQ(expected_overhead,
QuicCryptoStream::CryptoMessageFramingOverhead(
version.transport_version, TestConnectionId()));
}
}
TEST_F(QuicCryptoStreamTest, WriteCryptoDataExceedsSendBufferLimit) {
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
return;
}
EXPECT_EQ(ENCRYPTION_INITIAL, connection_->encryption_level());
int32_t buffer_limit = GetQuicFlag(quic_max_buffered_crypto_bytes);
EXPECT_FALSE(stream_->HasBufferedCryptoFrames());
int32_t over_limit = buffer_limit + 1;
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_INITIAL, over_limit, 0))
.WillOnce(Return(over_limit));
std::string large_data(over_limit, 'a');
stream_->WriteCryptoData(ENCRYPTION_INITIAL, large_data);
EXPECT_FALSE(stream_->HasBufferedCryptoFrames());
EXPECT_CALL(*connection_,
SendCryptoData(ENCRYPTION_INITIAL, buffer_limit, over_limit))
.WillOnce(Return(1));
std::string data(buffer_limit, 'a');
stream_->WriteCryptoData(ENCRYPTION_INITIAL, data);
EXPECT_TRUE(stream_->HasBufferedCryptoFrames());
EXPECT_CALL(*connection_, SendCryptoData(_, _, _)).Times(0);
std::string data2(1, 'a');
stream_->WriteCryptoData(ENCRYPTION_INITIAL, data2);
EXPECT_TRUE(stream_->HasBufferedCryptoFrames());
if (GetQuicFlag(quic_bounded_crypto_send_buffer)) {
EXPECT_CALL(*connection_, CloseConnection(QUIC_INTERNAL_ERROR, _, _));
EXPECT_QUIC_BUG(
stream_->WriteCryptoData(ENCRYPTION_INITIAL, data2),
"Too much data for crypto send buffer with level: ENCRYPTION_INITIAL, "
"current_buffer_size: 16384, data length: 1");
}
}
TEST_F(QuicCryptoStreamTest, WriteBufferedCryptoFrames) {
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
return;
}
EXPECT_FALSE(stream_->HasBufferedCryptoFrames());
InSequence s;
EXPECT_EQ(ENCRYPTION_INITIAL, connection_->encryption_level());
std::string data(1350, 'a');
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_INITIAL, 1350, 0))
.WillOnce(Return(1000));
stream_->WriteCryptoData(ENCRYPTION_INITIAL, data);
EXPECT_TRUE(stream_->HasBufferedCryptoFrames());
EXPECT_CALL(*connection_, SendCryptoData(_, _, _)).Times(0);
connection_->SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<NullEncrypter>(Perspective::IS_CLIENT));
connection_->SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
stream_->WriteCryptoData(ENCRYPTION_ZERO_RTT, data);
EXPECT_EQ(ENCRYPTION_ZERO_RTT, connection_->encryption_level());
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_INITIAL, 350, 1000))
.WillOnce(Return(350));
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_ZERO_RTT, 1350, 0))
.WillOnce(Return(1000));
stream_->WriteBufferedCryptoFrames();
EXPECT_TRUE(stream_->HasBufferedCryptoFrames());
EXPECT_EQ(ENCRYPTION_ZERO_RTT, connection_->encryption_level());
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_ZERO_RTT, 350, 1000))
.WillOnce(Return(350));
stream_->WriteBufferedCryptoFrames();
EXPECT_FALSE(stream_->HasBufferedCryptoFrames());
}
TEST_F(QuicCryptoStreamTest, LimitBufferedCryptoData) {
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
return;
}
EXPECT_CALL(*connection_,
CloseConnection(QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA, _, _));
std::string large_frame(2 * GetQuicFlag(quic_max_buffered_crypto_bytes), 'a');
QuicStreamOffset offset = 1;
stream_->OnCryptoFrame(
QuicCryptoFrame(ENCRYPTION_INITIAL, offset, large_frame));
}
TEST_F(QuicCryptoStreamTest, CloseConnectionWithZeroRttCryptoFrame) {
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
return;
}
EXPECT_CALL(*connection_,
CloseConnection(IETF_QUIC_PROTOCOL_VIOLATION, _, _));
test::QuicConnectionPeer::SetLastDecryptedLevel(connection_,
ENCRYPTION_ZERO_RTT);
QuicStreamOffset offset = 1;
stream_->OnCryptoFrame(QuicCryptoFrame(ENCRYPTION_ZERO_RTT, offset, "data"));
}
TEST_F(QuicCryptoStreamTest, RetransmitCryptoFramesAndPartialWrite) {
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
return;
}
EXPECT_CALL(*connection_, SendCryptoData(_, _, _)).Times(0);
InSequence s;
EXPECT_EQ(ENCRYPTION_INITIAL, connection_->encryption_level());
std::string data(1350, 'a');
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_INITIAL, 1350, 0))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
stream_->WriteCryptoData(ENCRYPTION_INITIAL, data);
QuicCryptoFrame lost_frame(ENCRYPTION_INITIAL, 0, 1000);
stream_->OnCryptoFrameLost(&lost_frame);
EXPECT_TRUE(stream_->HasPendingCryptoRetransmission());
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_INITIAL, 1000, 0))
.WillOnce(Return(0));
stream_->WritePendingCryptoRetransmission();
EXPECT_TRUE(stream_->HasPendingCryptoRetransmission());
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_INITIAL, 1000, 0))
.WillOnce(Invoke(connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
stream_->WritePendingCryptoRetransmission();
EXPECT_FALSE(stream_->HasPendingCryptoRetransmission());
}
TEST_F(QuicCryptoStreamTest, EmptyCryptoFrame) {
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
return;
}
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
QuicCryptoFrame empty_crypto_frame(ENCRYPTION_INITIAL, 0, nullptr, 0);
stream_->OnCryptoFrame(empty_crypto_frame);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_crypto_stream.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_crypto_stream_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
490a0c5c-8b59-4051-9d4b-b33dc00de45b | cpp | google/arolla | optimizer | arolla/expr/optimization/optimizer.cc | arolla/expr/optimization/optimizer_test.cc | #include "arolla/expr/optimization/optimizer.h"
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
constexpr int kPeepholeOptimizerIterationsLimit = 100;
}
Optimizer MakeOptimizer(std::unique_ptr<PeepholeOptimizer> peephole_optimizer) {
return [peephole_optimizer = std::shared_ptr<PeepholeOptimizer>(
std::move(peephole_optimizer))](
ExprNodePtr expr) -> absl::StatusOr<ExprNodePtr> {
ExprNodePtr previous_expr;
int iteration = 0;
do {
if (++iteration > kPeepholeOptimizerIterationsLimit) {
return absl::InternalError(absl::StrFormat(
"too many iterations of peephole optimizer; this may indicate that "
"the set of optimizations contains cycles, or just too big "
"expression unsupported by the optimizer (last iterations: %s vs "
"%s)",
GetDebugSnippet(previous_expr), GetDebugSnippet(expr)));
}
previous_expr = expr;
ASSIGN_OR_RETURN(expr, peephole_optimizer->ApplyToNode(expr));
if (expr->qtype() != previous_expr->qtype()) {
return absl::InternalError(absl::StrFormat(
"expression %s was optimized into %s, which changed its output "
"type from %s to %s; this indicates incorrect optimization",
GetDebugSnippet(previous_expr), GetDebugSnippet(expr),
previous_expr->qtype() != nullptr ? previous_expr->qtype()->name()
: "NULL",
expr->qtype() != nullptr ? expr->qtype()->name() : "NULL"));
}
} while (previous_expr->fingerprint() != expr->fingerprint());
return expr;
};
}
} | #include "arolla/expr/optimization/optimizer.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
using ::absl_testing::StatusIs;
using ::arolla::testing::WithQTypeAnnotation;
absl::StatusOr<PeepholeOptimizationPack> ChangeTypeOptimizations() {
PeepholeOptimizationPack result;
{
ASSIGN_OR_RETURN(ExprNodePtr from,
WithQTypeAnnotation(Placeholder("x"), GetQType<float>()));
ASSIGN_OR_RETURN(ExprNodePtr to, WithQTypeAnnotation(Placeholder("x"),
GetQType<int32_t>()));
ASSIGN_OR_RETURN(result.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(from, to));
}
{
ASSIGN_OR_RETURN(ExprNodePtr from,
WithQTypeAnnotation(Placeholder("x"), GetQType<double>()));
ExprNodePtr to = Placeholder("x");
ASSIGN_OR_RETURN(result.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(from, to));
}
return result;
}
TEST(Optimizer, TypeChangesAreNotAllowed) {
ASSERT_OK_AND_ASSIGN(auto peephole_optimizer,
CreatePeepholeOptimizer({ChangeTypeOptimizations}));
auto optimizer = MakeOptimizer(std::move(peephole_optimizer));
ASSERT_OK_AND_ASSIGN(ExprNodePtr float_x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
EXPECT_THAT(
optimizer(float_x),
StatusIs(absl::StatusCode::kInternal,
"expression M.annotation.qtype(L.x, FLOAT32) was optimized into "
"M.annotation.qtype(L.x, INT32), which changed its output type "
"from FLOAT32 to INT32; this indicates incorrect optimization"));
ASSERT_OK_AND_ASSIGN(ExprNodePtr double_x,
WithQTypeAnnotation(Leaf("x"), GetQType<double>()));
EXPECT_THAT(
optimizer(double_x),
StatusIs(absl::StatusCode::kInternal,
"expression M.annotation.qtype(L.x, FLOAT64) was optimized into "
"L.x, which changed its output type from FLOAT64 to NULL; this "
"indicates incorrect optimization"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/optimization/optimizer.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/optimization/optimizer_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
4f892173-9e18-4bdc-8395-880eeb4ffd46 | cpp | tensorflow/tensorflow | stablehlo_shift_left | tensorflow/lite/kernels/stablehlo_shift_left.cc | tensorflow/lite/kernels/stablehlo_shift_left_test.cc | #include <cstdint>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace stablehlo_shift_left {
namespace {
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
template <typename DataType>
TfLiteStatus EvalImpl(const TfLiteTensor* operand1,
const TfLiteTensor* operand2, TfLiteTensor* result) {
const int num_elements = NumElements(result);
const DataType* input1 = GetTensorData<DataType>(operand1);
const DataType* input2 = GetTensorData<DataType>(operand2);
DataType* output = GetTensorData<DataType>(result);
for (int i = 0; i < num_elements; ++i) {
output[i] = input1[i] << input2[i];
}
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
output->type = input1->type;
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input1->dims));
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteType data_type = input1->type;
if (data_type == kTfLiteInt8) {
return EvalImpl<int8_t>(input1, input2, output);
} else if (data_type == kTfLiteInt16) {
return EvalImpl<int16_t>(input1, input2, output);
} else if (data_type == kTfLiteInt32) {
return EvalImpl<int32_t>(input1, input2, output);
} else {
TF_LITE_KERNEL_LOG(context, "(Index Type: %s) currently not supported.\n",
TfLiteTypeGetName(data_type));
return kTfLiteError;
}
}
}
}
TfLiteRegistration* Register_STABLEHLO_SHIFT_LEFT() {
static TfLiteRegistration r = {nullptr, nullptr,
stablehlo_shift_left::Prepare,
stablehlo_shift_left::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using testing::ElementsAreArray;
class ShiftLeftOpModel : public SingleOpModel {
public:
ShiftLeftOpModel(const TensorData& input1, const TensorData& input2) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(TensorData(input1.type, GetShape(input1_)));
SetBuiltinOp(BuiltinOperator_STABLEHLO_SHIFT_LEFT, BuiltinOptions_NONE, 0);
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
protected:
int input1_;
int input2_;
int output_;
};
TEST(ShiftLeftOpTest, ShiftLeftInt32) {
ShiftLeftOpModel model({TensorType_INT32, {3}}, {TensorType_INT32, {3}});
model.PopulateTensor<int32_t>(model.input1(), {-1, 0, 1});
model.PopulateTensor<int32_t>(model.input2(), {1, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int32_t>(), ElementsAreArray({-2, 0, 8}));
}
TEST(ShiftLeftOpTest, ShiftLeftInt16) {
ShiftLeftOpModel model({TensorType_INT16, {2, 2}},
{TensorType_INT16, {2, 2}});
model.PopulateTensor<int16_t>(model.input1(), {-5, -5, 0, 6});
model.PopulateTensor<int16_t>(model.input2(), {0, 2, 0, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int16_t>(), ElementsAreArray({-5, -20, 0, 24}));
}
TEST(ShiftLeftOpTest, ShiftLeftInt8) {
ShiftLeftOpModel model({TensorType_INT8, {2, 2}}, {TensorType_INT8, {2, 2}});
model.PopulateTensor<int8_t>(model.input1(), {2, -2, -2, -4});
model.PopulateTensor<int8_t>(model.input2(), {0, 1, 0, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int8_t>(), ElementsAreArray({2, -4, -2, -128}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_shift_left.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_shift_left_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1d363942-2352-4331-8226-9a635b5b362f | cpp | tensorflow/tensorflow | collective_quantizer | third_party/xla/xla/service/collective_quantizer.cc | third_party/xla/xla/service/collective_quantizer_test.cc | #include "xla/service/collective_quantizer.h"
#include "xla/service/hlo_replication_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape_util.h"
namespace xla {
namespace {
namespace m = match;
struct ConversionSubgraph {
HloInstruction* convert = nullptr;
HloInstruction* binary = nullptr;
HloInstruction* clamp = nullptr;
HloInstruction* scale_bcast = nullptr;
std::vector<HloInstruction*> unaries;
};
template <typename... Args>
auto ScalarBroadcast(Args... args) {
return m::Broadcast(args...).WithPredicate([](const HloInstruction* instr) {
return ShapeUtil::IsScalar(instr->operand(0)->shape());
});
}
auto BitcastPreservesElementType() {
return m::Bitcast().WithPredicate([](const HloInstruction* instr) {
return ShapeUtil::SameElementType(instr->shape(),
instr->operand(0)->shape());
});
}
auto ConvertToNarrowerType() {
auto converts_to_narrower_type = [](const HloInstruction* instr) -> bool {
return ShapeUtil::ByteSizeOfPrimitiveType(instr->shape().element_type()) <
ShapeUtil::ByteSizeOfPrimitiveType(
instr->operand(0)->shape().element_type());
};
return m::Convert().WithPredicate(converts_to_narrower_type);
}
auto ConvertToWiderType() {
auto converts_to_wider_type = [](const HloInstruction* instr) -> bool {
return ShapeUtil::ByteSizeOfPrimitiveType(instr->shape().element_type()) >
ShapeUtil::ByteSizeOfPrimitiveType(
instr->operand(0)->shape().element_type());
};
return m::Convert().WithPredicate(converts_to_wider_type);
}
bool IsSupportedCollective(HloInstruction* instr) {
return instr->operand_count() == 1 &&
(instr->opcode() == HloOpcode::kAllGather ||
instr->opcode() == HloOpcode::kAllToAll ||
instr->opcode() == HloOpcode::kCollectiveBroadcast ||
instr->opcode() == HloOpcode::kCollectivePermute);
}
HloInstruction* ApplyUnaries(HloInstruction* instr,
const std::vector<HloInstruction*>& unaries) {
for (HloInstruction* unary : unaries) {
instr = instr->AddInstruction(unary->CloneWithNewOperands(
ShapeUtil::MakeShapeWithDenseLayout(
instr->shape().element_type(), unary->shape().dimensions(),
unary->shape().layout().minor_to_major()),
{instr}));
}
return instr;
}
absl::StatusOr<bool> InstrIsReplicated(HloModule* module,
HloInstruction* instr) {
if (module->config().replica_count() > 1) {
return false;
}
TF_ASSIGN_OR_RETURN(
auto replication_analysis,
HloReplicationAnalysis::Run(module,
true));
return replication_analysis->HloInstructionIsReplicatedAt(instr, {});
}
std::vector<HloInstruction*> FindDequantizationSubgraphRecursive(
HloInstruction* instr, absl::flat_hash_set<int>& visited_instrs,
std::vector<HloInstruction*> subgraph) {
if (!visited_instrs.emplace(instr->unique_id()).second) {
return {};
}
subgraph.emplace_back(instr);
if (Match(instr, ConvertToWiderType())) {
return subgraph;
}
if (instr->operand_count() == 1 || instr->opcode() == HloOpcode::kDivide) {
return FindDequantizationSubgraphRecursive(instr->mutable_operand(0),
visited_instrs, subgraph);
} else if (instr->opcode() == HloOpcode::kMultiply) {
for (HloInstruction* operand : instr->unique_operands()) {
auto binary_subgraph = FindDequantizationSubgraphRecursive(
operand, visited_instrs, subgraph);
if (!binary_subgraph.empty()) {
return binary_subgraph;
}
}
}
return {};
}
std::optional<ConversionSubgraph> IsSupportedDequantization(
HloInstruction* instr) {
ConversionSubgraph subgraph;
absl::flat_hash_set<int> visited_instrs;
std::vector<HloInstruction*> candidate_subgraph =
FindDequantizationSubgraphRecursive(instr, visited_instrs,
std::vector<HloInstruction*>{});
std::reverse(candidate_subgraph.begin(), candidate_subgraph.end());
if (candidate_subgraph.size() > 1 &&
(Match(
candidate_subgraph[1],
m::MultiplyAnyOrder(&subgraph.binary, m::Convert(&subgraph.convert),
ScalarBroadcast(&subgraph.scale_bcast))) ||
Match(candidate_subgraph[1],
m::Divide(&subgraph.binary, m::Convert(&subgraph.convert),
ScalarBroadcast(&subgraph.scale_bcast))))) {
subgraph.unaries = {candidate_subgraph.begin() + 2,
candidate_subgraph.end()};
} else if (candidate_subgraph.size() > 0 &&
Match(candidate_subgraph[0], m::Convert(&subgraph.convert))) {
subgraph.unaries = {candidate_subgraph.begin() + 1,
candidate_subgraph.end()};
} else {
VLOG(5) << "Did not find type conversion or dequantization pattern.";
return std::nullopt;
}
for (HloInstruction* unary : subgraph.unaries) {
if (!Match(unary, m::AnyOf<HloInstruction>(m::Bitcast(), m::Copy(),
m::Reshape(), m::Slice()))) {
VLOG(5) << "Unexpected instruction in unary ops.";
return std::nullopt;
}
}
return std::make_optional<ConversionSubgraph>(std::move(subgraph));
}
std::optional<ConversionSubgraph> IsSupportedQuantization(
HloInstruction* instr) {
ConversionSubgraph subgraph;
std::vector<HloInstruction*> ops;
while (instr->user_count() <= 1) {
if (Match(instr, m::AnyOf<HloInstruction>(
BitcastPreservesElementType(), m::Copy(), m::Reshape(),
m::Slice(), m::Multiply(), m::Divide(), m::Clamp()))) {
if (instr->user_count() > 0) {
ops.emplace_back(instr);
instr = instr->users()[0];
continue;
}
break;
}
if (Match(instr, ConvertToNarrowerType())) {
ops.emplace_back(instr);
break;
}
VLOG(5) << "Unsupported instruction.";
return std::nullopt;
}
if (ops.size() > 2 &&
(Match(
ops.back(),
m::Convert(&subgraph.convert,
m::Clamp(&subgraph.clamp, ScalarBroadcast(m::Constant()),
m::MultiplyAnyOrder(
&subgraph.binary, m::Op(),
ScalarBroadcast(&subgraph.scale_bcast)),
ScalarBroadcast(m::Constant())))) ||
Match(ops.back(),
m::Convert(
&subgraph.convert,
m::Clamp(&subgraph.clamp, ScalarBroadcast(m::Constant()),
m::Divide(&subgraph.binary, m::Op(),
ScalarBroadcast(&subgraph.scale_bcast)),
ScalarBroadcast(m::Constant())))))) {
subgraph.unaries = {ops.begin(), ops.end() - 3};
} else if (ops.size() > 0 &&
Match(ops.back(), m::Convert(&subgraph.convert))) {
subgraph.unaries = {ops.begin(), ops.end() - 1};
} else {
VLOG(5) << "Did not find type conversion or quantization pattern.";
return std::nullopt;
}
for (HloInstruction* unary : subgraph.unaries) {
if (!Match(unary, m::AnyOf<HloInstruction>(m::Bitcast(), m::Copy(),
m::Reshape(), m::Slice()))) {
VLOG(5) << "Unexpected instruction in unary ops.";
return std::nullopt;
}
}
return std::make_optional<ConversionSubgraph>(std::move(subgraph));
}
absl::Status MatchDequantization(HloInstruction* instr, bool* changed) {
std::optional<ConversionSubgraph> subgraph =
IsSupportedDequantization(instr->mutable_operand(0));
if (!subgraph.has_value()) {
return absl::OkStatus();
}
if (subgraph->scale_bcast) {
TF_ASSIGN_OR_RETURN(
bool scale_is_replicated,
InstrIsReplicated(instr->parent()->parent(), subgraph->scale_bcast));
if (!scale_is_replicated) {
return absl::OkStatus();
}
}
HloInstruction* new_coll_operand = subgraph->convert->mutable_operand(0);
new_coll_operand = ApplyUnaries(new_coll_operand, subgraph->unaries);
Shape new_coll_shape = ShapeUtil::ChangeElementType(
instr->shape(), new_coll_operand->shape().element_type());
HloInstruction* new_collective = instr->AddInstruction(
instr->CloneWithNewOperands(new_coll_shape, {new_coll_operand}));
Shape new_convert_shape = ShapeUtil::ChangeElementType(
new_collective->shape(), subgraph->convert->shape().element_type());
HloInstruction* new_convert =
instr->AddInstruction(subgraph->convert->CloneWithNewOperands(
new_convert_shape, {new_collective}));
HloInstruction* new_binary;
if (subgraph->binary) {
HloInstruction* new_scale_bcast = instr->AddInstruction(
subgraph->scale_bcast->CloneWithNewShape(new_convert->shape()));
new_binary = instr->AddInstruction(subgraph->binary->CloneWithNewOperands(
new_convert->shape(), {new_convert, new_scale_bcast}));
}
TF_RETURN_IF_ERROR(
instr->ReplaceAllUsesWith(subgraph->binary ? new_binary : new_convert));
*changed = true;
VLOG(5) << "Quantized collective " << new_collective->ToShortString();
return absl::OkStatus();
}
absl::Status MatchQuantization(HloInstruction* instr, bool* changed) {
std::optional<ConversionSubgraph> subgraph;
if (instr->user_count() == 1) {
subgraph = IsSupportedQuantization(instr->users()[0]);
}
if (!subgraph.has_value()) {
return absl::OkStatus();
}
if (subgraph->scale_bcast) {
TF_ASSIGN_OR_RETURN(
bool scale_is_replicated,
InstrIsReplicated(instr->parent()->parent(), subgraph->scale_bcast));
if (!scale_is_replicated) {
return absl::OkStatus();
}
}
HloInstruction* coll_operand = instr->mutable_operand(0);
HloInstruction *new_binary, *new_clamp;
if (subgraph->binary) {
HloInstruction* new_scale_bcast = instr->AddInstruction(
subgraph->scale_bcast->CloneWithNewShape(coll_operand->shape()));
new_binary = instr->AddInstruction(subgraph->binary->CloneWithNewOperands(
coll_operand->shape(), {coll_operand, new_scale_bcast}));
HloInstruction* new_clamp_lower = instr->AddInstruction(
subgraph->clamp->operand(0)->CloneWithNewShape(coll_operand->shape()));
HloInstruction* new_clamp_upper = instr->AddInstruction(
subgraph->clamp->operand(2)->CloneWithNewShape(coll_operand->shape()));
new_clamp = instr->AddInstruction(subgraph->clamp->CloneWithNewOperands(
coll_operand->shape(), {new_clamp_lower, new_binary, new_clamp_upper}));
}
Shape new_convert_shape = ShapeUtil::ChangeElementType(
coll_operand->shape(), subgraph->convert->shape().element_type());
HloInstruction* new_convert =
instr->AddInstruction(subgraph->convert->CloneWithNewOperands(
new_convert_shape, {subgraph->binary ? new_clamp : coll_operand}));
Shape new_collective_shape = ShapeUtil::ChangeElementType(
instr->shape(), subgraph->convert->shape().element_type());
HloInstruction* new_collective = instr->AddInstruction(
instr->CloneWithNewOperands(new_collective_shape, {new_convert}));
new_collective = ApplyUnaries(new_collective, subgraph->unaries);
TF_RETURN_IF_ERROR(subgraph->convert->ReplaceAllUsesWith(new_collective));
*changed = true;
VLOG(5) << "Quantized collective " << new_collective->ToShortString();
return absl::OkStatus();
}
}
absl::StatusOr<bool> CollectiveQuantizer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp : module->MakeComputationPostOrder()) {
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
if (IsSupportedCollective(instr)) {
TF_RETURN_IF_ERROR(MatchDequantization(instr, &changed));
TF_RETURN_IF_ERROR(MatchQuantization(instr, &changed));
}
}
}
return changed;
}
} | #include "xla/service/collective_quantizer.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class CollectiveQuantizerTest : public HloTestBase {
public:
absl::StatusOr<bool> RunCollectiveQuantizer(HloModule* module) {
CollectiveQuantizer collective_quantizer;
return collective_quantizer.Run(module, {});
}
};
TEST_F(CollectiveQuantizerTest, AllGatherConvert) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,4,8,128] parameter(0)
all-gather = bf16[8,32,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
ROOT convert = f8e4m3fn[8,32,8,128] convert(all-gather)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllGather(op::Convert(op::Parameter())));
HloInstruction* all_gather = module->entry_computation()->root_instruction();
EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, AllGatherConvertUnary) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,4,8,128] parameter(0)
all-gather = bf16[8,32,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
reshape = bf16[8,32,1024] reshape(all-gather)
slice = bf16[8,32,512] slice(reshape), slice={[0:8], [0:32], [256:768]}
ROOT convert = f8e4m3fn[8,32,512] convert(slice)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Slice(op::Reshape(op::AllGather(op::Convert(op::Parameter())))));
HloInstruction* all_gather = module->entry_computation()->root_instruction();
EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, AllGatherQuantize) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,4,8,128] parameter(0)
all-gather = bf16[8,32,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={}
divide = bf16[8,32,8,128] divide(all-gather, scale_bcast)
clamp_lower = bf16[] constant(-448.0)
clamp_lower_bcast = bf16[8,32,8,128] broadcast(clamp_lower), dimensions={}
clamp_upper = bf16[] constant(448.0)
clamp_upper_bcast = bf16[8,32,8,128] broadcast(clamp_upper), dimensions={}
clamp = bf16[8,32,8,128] clamp(clamp_lower_bcast, divide, clamp_upper_bcast)
ROOT convert = f8e4m3fn[8,32,8,128] convert(clamp)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllGather(op::Convert(op::Clamp(
op::Broadcast(), op::Divide(op::Parameter(), op::Broadcast()),
op::Broadcast()))));
HloInstruction* all_gather = module->entry_computation()->root_instruction();
EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, AllToAllQuantize) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,32,8,128] parameter(0)
all-to-all = bf16[8,32,8,128] all-to-all(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={}
divide = bf16[8,32,8,128] divide(all-to-all, scale_bcast)
clamp_lower = bf16[] constant(-448.0)
clamp_lower_bcast = bf16[8,32,8,128] broadcast(clamp_lower), dimensions={}
clamp_upper = bf16[] constant(448.0)
clamp_upper_bcast = bf16[8,32,8,128] broadcast(clamp_upper), dimensions={}
clamp = bf16[8,32,8,128] clamp(clamp_lower_bcast, divide, clamp_upper_bcast)
ROOT convert = f8e4m3fn[8,32,8,128] convert(clamp)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllToAll(op::Convert(op::Clamp(
op::Broadcast(), op::Divide(op::Parameter(), op::Broadcast()),
op::Broadcast()))));
HloInstruction* all_to_all = module->entry_computation()->root_instruction();
EXPECT_THAT(all_to_all->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, CollectiveBroadcastQuantize) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,32,8,128] parameter(0)
collective-broadcast = bf16[8,32,8,128] collective-broadcast(param), replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={}
divide = bf16[8,32,8,128] divide(collective-broadcast, scale_bcast)
clamp_lower = bf16[] constant(-448.0)
clamp_lower_bcast = bf16[8,32,8,128] broadcast(clamp_lower), dimensions={}
clamp_upper = bf16[] constant(448.0)
clamp_upper_bcast = bf16[8,32,8,128] broadcast(clamp_upper), dimensions={}
clamp = bf16[8,32,8,128] clamp(clamp_lower_bcast, divide, clamp_upper_bcast)
ROOT convert = f8e4m3fn[8,32,8,128] convert(clamp)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CollectiveBroadcast(op::Convert(op::Clamp(
op::Broadcast(), op::Divide(op::Parameter(), op::Broadcast()),
op::Broadcast()))));
HloInstruction* collective_broadcast =
module->entry_computation()->root_instruction();
EXPECT_THAT(collective_broadcast->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, CollectivePermuteQuantize) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,32,8,128] parameter(0)
collective-permute = bf16[8,32,8,128] collective-permute(param), source_target_pairs={{0,1},{2,3},{4,5},{6,7}}, channel_id=1
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={}
divide = bf16[8,32,8,128] divide(collective-permute, scale_bcast)
clamp_lower = bf16[] constant(-448.0)
clamp_lower_bcast = bf16[8,32,8,128] broadcast(clamp_lower), dimensions={}
clamp_upper = bf16[] constant(448.0)
clamp_upper_bcast = bf16[8,32,8,128] broadcast(clamp_upper), dimensions={}
clamp = bf16[8,32,8,128] clamp(clamp_lower_bcast, divide, clamp_upper_bcast)
ROOT convert = f8e4m3fn[8,32,8,128] convert(clamp)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CollectivePermute(op::Convert(op::Clamp(
op::Broadcast(), op::Divide(op::Parameter(), op::Broadcast()),
op::Broadcast()))));
HloInstruction* collective_permute =
module->entry_computation()->root_instruction();
EXPECT_THAT(collective_permute->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, AllGatherQuantizeUnary) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,4,8,128] parameter(0)
all-gather = bf16[8,32,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
reshape = bf16[8,32,1024] reshape(all-gather)
slice = bf16[8,32,512] slice(reshape), slice={[0:8], [0:32], [256:768]}
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,32,512] broadcast(scale), dimensions={}
divide = bf16[8,32,512] divide(slice, scale_bcast)
clamp_lower = bf16[] constant(-448.0)
clamp_lower_bcast = bf16[8,32,512] broadcast(clamp_lower), dimensions={}
clamp_upper = bf16[] constant(448.0)
clamp_upper_bcast = bf16[8,32,512] broadcast(clamp_upper), dimensions={}
clamp = bf16[8,32,512] clamp(clamp_lower_bcast, divide, clamp_upper_bcast)
ROOT convert = f8e4m3fn[8,32,512] convert(clamp)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Slice(op::Reshape(op::AllGather(op::Convert(op::Clamp(
op::Broadcast(), op::Divide(op::Parameter(), op::Broadcast()),
op::Broadcast()))))));
HloInstruction* slice = module->entry_computation()->root_instruction();
EXPECT_THAT(slice->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, AllGatherQuantizeMultiUser) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,4,8,128] parameter(0)
all-gather = bf16[8,32,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={}
divide = bf16[8,32,8,128] divide(all-gather, scale_bcast)
clamp_lower = bf16[] constant(-448.0)
clamp_lower_bcast = bf16[8,32,8,128] broadcast(clamp_lower), dimensions={}
clamp_upper = bf16[] constant(448.0)
clamp_upper_bcast = bf16[8,32,8,128] broadcast(clamp_upper), dimensions={}
clamp = bf16[8,32,8,128] clamp(clamp_lower_bcast, divide, clamp_upper_bcast)
add = bf16[8,32,8,128] add(divide, clamp)
ROOT convert = f8e4m3fn[8,32,8,128] convert(add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectiveQuantizerTest, AllGatherQuantizeNonReplicatedScale) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,4,8,128] parameter(0)
all-gather = bf16[8,32,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
scale = bf16[] parameter(1)
scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={}
divide = bf16[8,32,8,128] divide(all-gather, scale_bcast)
clamp_lower = bf16[] constant(-448.0)
clamp_lower_bcast = bf16[8,32,8,128] broadcast(clamp_lower), dimensions={}
clamp_upper = bf16[] constant(448.0)
clamp_upper_bcast = bf16[8,32,8,128] broadcast(clamp_upper), dimensions={}
clamp = bf16[8,32,8,128] clamp(clamp_lower_bcast, divide, clamp_upper_bcast)
ROOT convert = f8e4m3fn[8,32,8,128] convert(clamp)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectiveQuantizerTest, ConvertAllGather) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f8e4m3fn[8,4,8,128] parameter(0)
convert = bf16[8,4,8,128] convert(param)
ROOT all-gather = bf16[8,32,8,128] all-gather(convert), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Convert(op::AllGather(op::Parameter())));
const HloInstruction* all_gather =
module->entry_computation()->root_instruction()->operand(0);
EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, ConvertAllGatherUnary) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f8e4m3fn[8,4,8,128] parameter(0)
convert = bf16[8,4,8,128] convert(param)
reshape = bf16[8,4,1024] reshape(convert)
slice = bf16[8,4,512] slice(reshape), slice={[0:8], [0:4], [256:768]}
ROOT all-gather = bf16[8,32,512] all-gather(slice), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Convert(op::AllGather(op::Slice(op::Reshape(op::Parameter())))));
const HloInstruction* all_gather =
module->entry_computation()->root_instruction()->operand(0);
EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, DequantizeAllGather) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f8e4m3fn[8,4,8,128] parameter(0)
convert = bf16[8,4,8,128] convert(param)
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,4,8,128] broadcast(scale), dimensions={}
multiply = bf16[8,4,8,128] multiply(convert, scale_bcast)
ROOT all-gather = bf16[8,32,8,128] all-gather(multiply), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Multiply(op::Convert(op::AllGather(op::Parameter())),
op::Broadcast()));
const HloInstruction* all_gather =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, DequantizeAllToAll) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f8e4m3fn[8,32,8,128] parameter(0)
convert = bf16[8,32,8,128] convert(param)
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={}
multiply = bf16[8,32,8,128] multiply(convert, scale_bcast)
ROOT all-to-all = bf16[8,32,8,128] all-to-all(multiply), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Multiply(op::Convert(op::AllToAll(op::Parameter())),
op::Broadcast()));
const HloInstruction* all_to_all =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(all_to_all->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, DequantizeCollectiveBroadcast) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f8e4m3fn[8,32,8,128] parameter(0)
convert = bf16[8,32,8,128] convert(param)
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={}
multiply = bf16[8,32,8,128] multiply(convert, scale_bcast)
ROOT collective-broadcast = bf16[8,32,8,128] collective-broadcast(multiply), replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Multiply(op::Convert(op::CollectiveBroadcast(op::Parameter())),
op::Broadcast()));
const HloInstruction* collective_broadcast =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(collective_broadcast->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, DequantizeCollectivePermute) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f8e4m3fn[8,32,8,128] parameter(0)
convert = bf16[8,32,8,128] convert(param)
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={}
multiply = bf16[8,32,8,128] multiply(convert, scale_bcast)
ROOT collective-permute = bf16[8,32,8,128] collective-permute(multiply), source_target_pairs={{0,1},{2,3},{4,5},{6,7}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Multiply(op::Convert(op::CollectivePermute(op::Parameter())),
op::Broadcast()));
const HloInstruction* collective_permute =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(collective_permute->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, DequantizeAllGatherUnary) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f8e4m3fn[8,4,8,128] parameter(0)
convert = bf16[8,4,8,128] convert(param)
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,4,8,128] broadcast(scale), dimensions={}
multiply = bf16[8,4,8,128] multiply(convert, scale_bcast)
reshape = bf16[8,4,1024] reshape(multiply)
slice = bf16[8,4,512] slice(reshape), slice={[0:8], [0:4], [256:768]}
ROOT all-gather = bf16[8,32,512] all-gather(slice), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Multiply(
op::Convert(op::AllGather(op::Slice(op::Reshape(op::Parameter())))),
op::Broadcast()));
HloInstruction* all_gather = module->entry_computation()
->root_instruction()
->mutable_operand(0)
->mutable_operand(0);
EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_quantizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_quantizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e2b61ad0-5e16-4782-b7ce-a9f1973adf27 | cpp | google/arolla | struct_field | arolla/util/struct_field.h | arolla/util/struct_field_test.cc | #ifndef AROLLA_UTIL_STRUCT_FIELD_H_
#define AROLLA_UTIL_STRUCT_FIELD_H_
#include <array>
#include <cstddef>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "arolla/util/demangle.h"
namespace arolla {
template <typename T, bool kIsSkipped = false>
struct StructField {
static constexpr bool kIsIncludedToArollaQType = !kIsSkipped;
static_assert(!kIsIncludedToArollaQType || !std::is_array_v<T>,
"array field types are not supported");
using field_type = T;
size_t field_offset;
absl::string_view field_name;
};
template <class FieldType>
const FieldType* UnsafeGetStructFieldPtr(const StructField<FieldType>& field,
const void* value) {
return reinterpret_cast<const FieldType*>(static_cast<const char*>(value) +
field.field_offset);
}
namespace struct_field_impl {
template <typename T, typename Enabled = void>
struct StructFieldTraits {
static constexpr auto ArollaStructFields() { return std::tuple(); }
};
template <typename T>
struct StructFieldTraits<
T, std::enable_if_t<std::is_invocable_v<decltype(T::ArollaStructFields)>>> {
static auto ArollaStructFields() { return T::ArollaStructFields(); }
};
template <class T, class FieldTuple, size_t... Is>
absl::Status VerifyArollaStructFields(
ABSL_ATTRIBUTE_UNUSED const FieldTuple& fields,
std::index_sequence<Is...>) {
if constexpr (sizeof...(Is) != 0) {
auto offsets =
std::array<size_t, sizeof...(Is)>{std::get<Is>(fields).field_offset...};
auto alignments = std::array<size_t, sizeof...(Is)>{
alignof(typename std::tuple_element_t<Is, FieldTuple>::field_type)...};
auto sizes = std::array<size_t, sizeof...(Is)>{
sizeof(typename std::tuple_element_t<Is, FieldTuple>::field_type)...};
if (offsets[0] != 0) {
return absl::FailedPreconditionError(
"first struct field defined incorrectly");
}
if (!(((Is == 0) || (offsets[Is] > offsets[Is - 1])) && ...)) {
return absl::FailedPreconditionError("struct fields are out of order");
}
auto align_offset = [](size_t offset, size_t alignment) constexpr {
return offset +
(offset % alignment == 0 ? 0 : alignment - offset % alignment);
};
if (!(((Is == 0) ||
(offsets[Is] <=
align_offset(offsets[Is - 1] + sizes[Is - 1], alignments[Is]))) &&
...)) {
return absl::FailedPreconditionError(
"struct field is missed in the middle");
}
if (align_offset(offsets.back() + sizes.back(), alignof(T)) != sizeof(T)) {
return absl::FailedPreconditionError("struct field is missed at the end");
}
}
return absl::OkStatus();
}
}
template <class T>
const auto& GetStructFields() {
ABSL_ATTRIBUTE_UNUSED static const bool once = [] {
const auto fields =
struct_field_impl::StructFieldTraits<T>::ArollaStructFields();
constexpr size_t kSize = std::tuple_size_v<decltype(fields)>;
CHECK_OK(struct_field_impl::VerifyArollaStructFields<T>(
fields, std::make_index_sequence<kSize>()))
<< TypeName<T>();
return true;
}();
auto filter_and_convert_to_tuple = [](auto struct_field) {
using StructField = decltype(struct_field);
if constexpr (StructField::kIsIncludedToArollaQType) {
return std::tuple<StructField>{struct_field};
} else {
return std::tuple<>();
}
};
static const auto filtered_fields = std::apply(
[&](auto... struct_fields) {
return std::tuple_cat(filter_and_convert_to_tuple(struct_fields)...);
},
struct_field_impl::StructFieldTraits<T>::ArollaStructFields());
return filtered_fields;
}
template <class T>
constexpr size_t StructFieldCount() {
return std::tuple_size_v<std::decay_t<decltype(GetStructFields<T>())>>;
}
template <class T>
constexpr bool HasStructFields() {
return StructFieldCount<T>() != 0;
}
#define AROLLA_DECLARE_STRUCT_FIELD(NAME) \
::arolla::StructField<decltype(CppType::NAME)> { \
.field_offset = offsetof(CppType, NAME), .field_name = #NAME \
}
#define AROLLA_SKIP_STRUCT_FIELD(NAME) \
::arolla::StructField<decltype(CppType::NAME), true> { \
.field_offset = offsetof(CppType, NAME), .field_name = #NAME \
}
}
#endif | #include "arolla/util/struct_field.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/util/meta.h"
namespace {
using ::absl_testing::StatusIs;
using ::testing::MatchesRegex;
struct Point {
int x;
float y;
constexpr static auto ArollaStructFields() {
using CppType = Point;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(x),
AROLLA_DECLARE_STRUCT_FIELD(y),
};
}
};
struct Rectangle {
Point upper_left;
Point lower_bound;
std::string name;
constexpr static auto ArollaStructFields() {
using CppType = Rectangle;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(upper_left),
AROLLA_DECLARE_STRUCT_FIELD(lower_bound),
AROLLA_DECLARE_STRUCT_FIELD(name),
};
}
};
template <class A, class B, class C, bool kIsBSkipped = false>
struct Tripple {
A a;
B b;
C c;
constexpr static auto ArollaStructFields() {
using CppType = Tripple;
if constexpr (kIsBSkipped) {
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(a),
AROLLA_SKIP_STRUCT_FIELD(b),
AROLLA_DECLARE_STRUCT_FIELD(c),
};
} else {
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(a),
AROLLA_DECLARE_STRUCT_FIELD(b),
AROLLA_DECLARE_STRUCT_FIELD(c),
};
}
}
};
struct UnsupportedSkippedFields {
struct UnknownType {};
int a;
void* b;
float c;
UnknownType d;
constexpr static auto ArollaStructFields() {
using CppType = UnsupportedSkippedFields;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(a),
AROLLA_SKIP_STRUCT_FIELD(b),
AROLLA_DECLARE_STRUCT_FIELD(c),
AROLLA_SKIP_STRUCT_FIELD(d),
};
}
};
TEST(DeclareMacroTest, MacroInternalTest) {
using CppType = Point;
Point p{5, 7.};
auto field_x = AROLLA_DECLARE_STRUCT_FIELD(x);
static_assert(std::is_same_v<decltype(field_x)::field_type, int>);
EXPECT_EQ(field_x.field_offset, offsetof(Point, x));
EXPECT_EQ(field_x.field_name, "x");
EXPECT_EQ(::arolla::UnsafeGetStructFieldPtr(field_x, &p), &p.x);
auto field_y = AROLLA_DECLARE_STRUCT_FIELD(y);
static_assert(std::is_same_v<decltype(field_y)::field_type, float>);
EXPECT_EQ(field_y.field_offset, offsetof(Point, y));
EXPECT_EQ(field_y.field_name, "y");
EXPECT_EQ(::arolla::UnsafeGetStructFieldPtr(field_y, &p), &p.y);
}
}
namespace arolla {
namespace {
TEST(StructFieldTest, UnsupportedSkippedFields) {
auto t = arolla::GetStructFields<UnsupportedSkippedFields>();
EXPECT_EQ(std::tuple_size_v<decltype(t)>, 2);
EXPECT_EQ(std::get<0>(t).field_name, "a");
EXPECT_EQ(std::get<1>(t).field_name, "c");
}
TEST(StructFieldTest, PaddingVerification) {
meta::foreach_type(
meta::type_list<std::bool_constant<true>, std::bool_constant<false>>(),
[](auto t) {
constexpr bool kIsBSkipped = typename decltype(t)::type();
constexpr size_t kExpectedFieldCount = kIsBSkipped ? 2 : 3;
{
auto t = arolla::GetStructFields<
Tripple<int, char, double, kIsBSkipped>>();
EXPECT_EQ(std::tuple_size_v<decltype(t)>, kExpectedFieldCount);
EXPECT_EQ(std::get<0>(t).field_name, "a");
if constexpr (kIsBSkipped) {
EXPECT_EQ(std::get<1>(t).field_name, "c");
} else {
EXPECT_EQ(std::get<1>(t).field_name, "b");
EXPECT_EQ(std::get<2>(t).field_name, "c");
}
}
{
auto t = ::arolla::GetStructFields<
Tripple<char, char, double, kIsBSkipped>>();
EXPECT_EQ(std::tuple_size_v<decltype(t)>, kExpectedFieldCount);
}
{
auto t = ::arolla::GetStructFields<
Tripple<char, double, char, kIsBSkipped>>();
EXPECT_EQ(std::tuple_size_v<decltype(t)>, kExpectedFieldCount);
}
{
auto t = ::arolla::GetStructFields<
Tripple<double, char, char, kIsBSkipped>>();
EXPECT_EQ(std::tuple_size_v<decltype(t)>, kExpectedFieldCount);
}
{
auto t =
::arolla::GetStructFields<Tripple<int, int, int, kIsBSkipped>>();
EXPECT_EQ(std::tuple_size_v<decltype(t)>, kExpectedFieldCount);
}
{
auto t = ::arolla::GetStructFields<
Tripple<int16_t, char, double, kIsBSkipped>>();
EXPECT_EQ(std::tuple_size_v<decltype(t)>, kExpectedFieldCount);
}
{
auto t = ::arolla::GetStructFields<
Tripple<int, double, int16_t, kIsBSkipped>>();
EXPECT_EQ(std::tuple_size_v<decltype(t)>, kExpectedFieldCount);
}
});
}
TEST(LayoutTest, Point) {
FrameLayout::Builder builder;
auto point_slot = builder.AddSlot<Point>();
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
frame.Set(point_slot, {5, 7.});
FrameLayout::Slot<int> x_slot = point_slot.GetSubslot<0>();
EXPECT_EQ(frame.Get(x_slot), 5);
FrameLayout::Slot<float> y_slot = point_slot.GetSubslot<1>();
EXPECT_EQ(frame.Get(y_slot), 7.);
}
TEST(LayoutTest, Rectangle) {
FrameLayout::Builder builder;
auto rectangle_slot = builder.AddSlot<Rectangle>();
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
frame.Set(rectangle_slot, {{-5, -7.}, {5, 7.}, "ABCD"});
FrameLayout::Slot<Point> ul_slot = rectangle_slot.GetSubslot<0>();
FrameLayout::Slot<int> ulx_slot = ul_slot.GetSubslot<0>();
FrameLayout::Slot<float> uly_slot = ul_slot.GetSubslot<1>();
EXPECT_EQ(frame.Get(ulx_slot), -5);
EXPECT_EQ(frame.Get(uly_slot), -7.);
FrameLayout::Slot<Point> lb_slot = rectangle_slot.GetSubslot<1>();
FrameLayout::Slot<int> lbx_slot = lb_slot.GetSubslot<0>();
FrameLayout::Slot<float> lby_slot = lb_slot.GetSubslot<1>();
EXPECT_EQ(frame.Get(lbx_slot), 5);
EXPECT_EQ(frame.Get(lby_slot), 7.);
FrameLayout::Slot<std::string> name_slot = rectangle_slot.GetSubslot<2>();
EXPECT_EQ(frame.Get(name_slot), "ABCD");
}
TEST(VerifyArollaStructFieldsTest, MissedFirst) {
struct MissedFirst {
int a;
int b;
int c;
constexpr static auto ArollaStructFields() {
using CppType = MissedFirst;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(b),
AROLLA_DECLARE_STRUCT_FIELD(c),
};
}
};
EXPECT_THAT(struct_field_impl::VerifyArollaStructFields<MissedFirst>(
MissedFirst::ArollaStructFields(),
std::make_index_sequence<StructFieldCount<MissedFirst>()>()),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*first.*incorrectly.*")));
}
TEST(VerifyArollaStructFieldsTest, MissedMiddle) {
struct MissedMiddle {
int a;
int b;
int c;
constexpr static auto ArollaStructFields() {
using CppType = MissedMiddle;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(a),
AROLLA_DECLARE_STRUCT_FIELD(c),
};
}
};
EXPECT_THAT(struct_field_impl::VerifyArollaStructFields<MissedMiddle>(
MissedMiddle::ArollaStructFields(),
std::make_index_sequence<StructFieldCount<MissedMiddle>()>()),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*missed.*middle.*")));
}
TEST(VerifyArollaStructFieldsTest, MissedEnd) {
struct MissedEnd {
int a;
int b;
int c;
constexpr static auto ArollaStructFields() {
using CppType = MissedEnd;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(a),
AROLLA_DECLARE_STRUCT_FIELD(b),
};
}
};
EXPECT_THAT(struct_field_impl::VerifyArollaStructFields<MissedEnd>(
MissedEnd::ArollaStructFields(),
std::make_index_sequence<StructFieldCount<MissedEnd>()>()),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*missed.*end.*")));
}
TEST(VerifyArollaStructFieldsTest, OutOfOrder) {
struct OutOfOrder {
int a;
int b;
int c;
constexpr static auto ArollaStructFields() {
using CppType = OutOfOrder;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(a),
AROLLA_DECLARE_STRUCT_FIELD(c),
AROLLA_DECLARE_STRUCT_FIELD(b),
};
}
};
EXPECT_THAT(struct_field_impl::VerifyArollaStructFields<OutOfOrder>(
OutOfOrder::ArollaStructFields(),
std::make_index_sequence<StructFieldCount<OutOfOrder>()>()),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*out.*order.*")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/struct_field.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/struct_field_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
b1eaa4f7-89a4-4061-89a3-e9e5d277c703 | cpp | tensorflow/tensorflow | jpeg_decompress_buffered_struct | tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_decompress_buffered_struct.h | tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_decompress_buffered_struct_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_JPEG_DECOMPRESS_BUFFERED_STRUCT_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_JPEG_DECOMPRESS_BUFFERED_STRUCT_H_
#include <algorithm>
#include <cstddef>
#include <cstdlib>
#include <vector>
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
class JpegDecompressBufferedStruct {
public:
explicit JpegDecompressBufferedStruct(std::size_t expected_size)
: resized_size_(std::max(sizeof(jpeg_decompress_struct), expected_size)),
buffer_(reinterpret_cast<char*>(malloc(resized_size_))) {
while (--expected_size >= sizeof(jpeg_decompress_struct)) {
buffer_[expected_size] = 0;
}
}
~JpegDecompressBufferedStruct() { std::free(buffer_); }
JpegDecompressBufferedStruct(const JpegDecompressBufferedStruct&) = delete;
JpegDecompressBufferedStruct& operator=(const JpegDecompressBufferedStruct&) =
delete;
jpeg_decompress_struct* get() const {
return reinterpret_cast<jpeg_decompress_struct*>(buffer_);
}
int const size() { return resized_size_; }
const char* buffer() { return buffer_; }
private:
int resized_size_;
char* const buffer_;
};
}
}
}
#endif | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_decompress_buffered_struct.h"
#include <cstddef>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
namespace {
const int kSizeOfJpegDecompressStruct = sizeof(jpeg_decompress_struct);
TEST(JpegDecompressBufferedStructTest,
ExpectInitializationSizeMatchesStructSize) {
JpegDecompressBufferedStruct buffered_struct(kSizeOfJpegDecompressStruct);
EXPECT_EQ(buffered_struct.size(), kSizeOfJpegDecompressStruct);
}
TEST(JpegDecompressBufferedStructTest,
StructWithSizeGreaterThanCompiledStruct) {
int excess_bytes = 16;
JpegDecompressBufferedStruct buffered_struct(kSizeOfJpegDecompressStruct +
excess_bytes);
EXPECT_EQ(buffered_struct.size(), kSizeOfJpegDecompressStruct + excess_bytes);
const char* buffer = buffered_struct.buffer();
ASSERT_NE(buffer, nullptr);
while (excess_bytes--) {
EXPECT_EQ(
(unsigned char)(buffer[kSizeOfJpegDecompressStruct + excess_bytes]),
'\0');
}
}
TEST(JpegDecompressBufferedStructTest, StructWithSizeLessThanCompiledStruct) {
JpegDecompressBufferedStruct buffered_struct(kSizeOfJpegDecompressStruct -
16);
EXPECT_EQ(buffered_struct.size(), kSizeOfJpegDecompressStruct);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_decompress_buffered_struct.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_decompress_buffered_struct_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
05ca717a-710e-4112-add2-ca73d6c6071f | cpp | google/googletest | sample1 | googletest/samples/sample1.cc | googletest/samples/sample1_unittest.cc | #include "sample1.h"
int Factorial(int n) {
int result = 1;
for (int i = 1; i <= n; i++) {
result *= i;
}
return result;
}
bool IsPrime(int n) {
if (n <= 1) return false;
if (n % 2 == 0) return n == 2;
for (int i = 3;; i += 2) {
if (i > n / i) break;
if (n % i == 0) return false;
}
return true;
} | #include "sample1.h"
#include <limits.h>
#include "gtest/gtest.h"
namespace {
TEST(FactorialTest, Negative) {
EXPECT_EQ(1, Factorial(-5));
EXPECT_EQ(1, Factorial(-1));
EXPECT_GT(Factorial(-10), 0);
}
TEST(FactorialTest, Zero) { EXPECT_EQ(1, Factorial(0)); }
TEST(FactorialTest, Positive) {
EXPECT_EQ(1, Factorial(1));
EXPECT_EQ(2, Factorial(2));
EXPECT_EQ(6, Factorial(3));
EXPECT_EQ(40320, Factorial(8));
}
TEST(IsPrimeTest, Negative) {
EXPECT_FALSE(IsPrime(-1));
EXPECT_FALSE(IsPrime(-2));
EXPECT_FALSE(IsPrime(INT_MIN));
}
TEST(IsPrimeTest, Trivial) {
EXPECT_FALSE(IsPrime(0));
EXPECT_FALSE(IsPrime(1));
EXPECT_TRUE(IsPrime(2));
EXPECT_TRUE(IsPrime(3));
}
TEST(IsPrimeTest, Positive) {
EXPECT_FALSE(IsPrime(4));
EXPECT_TRUE(IsPrime(5));
EXPECT_FALSE(IsPrime(6));
EXPECT_TRUE(IsPrime(23));
}
} | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googletest/samples/sample1.cc | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googletest/samples/sample1_unittest.cc | a1e255a582377e1006bb88a408ac3f933ba7c916 |
da8d2664-ab11-4fc3-b0f0-3d232f73b516 | cpp | tensorflow/tensorflow | op_stats_to_pod_stats | tensorflow/core/profiler/convert/op_stats_to_pod_stats.cc | tensorflow/core/profiler/convert/op_stats_to_pod_stats_test.cc | #include "tensorflow/core/profiler/convert/op_stats_to_pod_stats.h"
#include <algorithm>
#include <utility>
#include <vector>
#include "google/protobuf/any.pb.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/utils/diagnostics.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
PodStatsRecord CreatePodStatsRecord(absl::string_view host_name,
const StepInfoResult& step_info) {
PodStatsRecord record;
GenericStepBreakdown generic;
bool success = step_info.step_breakdown().UnpackTo(&generic);
DCHECK(success);
record.set_host_name(string(host_name));
record.set_step_num(step_info.step_num());
record.set_total_duration_us(
tsl::profiler::PicoToMicro(step_info.duration_ps()));
auto& step_breakdown_map = *record.mutable_step_breakdown_us();
std::vector<std::pair<uint64, absl::string_view>> metrics;
auto add_event = [&](GenericEventType type,
std::initializer_list<EventType> event_list) {
uint64 ps = 0;
for (const auto& event_type : event_list) {
ps += gtl::FindWithDefault(generic.type_ps(), event_type, 0);
}
step_breakdown_map[type] = tsl::profiler::PicoToMicro(ps);
metrics.emplace_back(ps, GetGenericEventTypeStr(type));
};
add_event(kDeviceCompute, {DEVICE_COMPUTE_32, DEVICE_COMPUTE_16});
add_event(kDeviceToDevice, {DEVICE_TO_DEVICE, DEVICE_WAIT_DEVICE});
add_event(kDeviceCollectives, {DEVICE_COLLECTIVES});
add_event(kHostCompute, {HOST_COMPUTE});
add_event(kHostPrepare, {HOST_PREPARE});
add_event(kInput, {HOST_WAIT_INPUT, HOST_TO_DEVICE, DEVICE_WAIT_HOST});
add_event(kOutput, {DEVICE_TO_HOST});
add_event(kCompile, {HOST_COMPILE});
add_event(kAllOthers, {UNKNOWN_TIME});
std::sort(metrics.begin(), metrics.end());
record.set_bottleneck(metrics.back().second.data(),
metrics.back().second.size());
return record;
}
}
PodStatsDatabase ConvertOpStatsToPodStats(const OpStats& op_stats) {
PodStatsDatabase pod_stats_db;
const auto& core_id_map = op_stats.core_id_to_details();
for (int i = GenericEventType::kFirstGenericEventType;
i <= GenericEventType::kLastGenericEventType; i++) {
auto& event = *pod_stats_db.add_step_breakdown_events();
event.set_id(i);
absl::string_view type_str =
GetGenericEventTypeStr(static_cast<GenericEventType>(i));
event.set_name(type_str.data(), type_str.size());
}
for (const auto& step_sequence : op_stats.step_db().step_sequence()) {
for (const auto& entry : step_sequence.step_info_per_core()) {
if (!core_id_map.contains(entry.first)) {
LOG(WARNING) << "core_id_map does not contain " << entry.first;
continue;
}
const CoreDetails& details = core_id_map.at(entry.first);
*pod_stats_db.add_pod_stats_record() =
CreatePodStatsRecord(details.hostname(), entry.second);
}
}
PopulateStepDiagnostics(op_stats, pod_stats_db.mutable_diagnostics());
return pod_stats_db;
}
}
} | #include "tensorflow/core/profiler/convert/op_stats_to_pod_stats.h"
#include "google/protobuf/any.pb.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/diagnostics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/utils/diagnostics.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
const double kMaxError = 1e-6;
constexpr int kStepNum = 2;
constexpr int kCoreId = 1001;
constexpr int kStepTimePs = 1000;
constexpr int kHostComputePs = 50;
constexpr int kHostCompilePs = 50;
constexpr int kHostToHostPs = 50;
constexpr int kHostToDevicePs = 50;
constexpr int kHostPreparePs = 50;
constexpr int kDeviceCollectivePs = 350;
constexpr int kHostWaitInputPs = 50;
constexpr int kDeviceToDevicePs = 50;
constexpr int kDeviceToHostPs = 50;
constexpr int kDeviceCompute32Ps = 50;
constexpr int kDeviceCompute16Ps = 50;
constexpr int kDeviceWaitDevicePs = 50;
constexpr int kDeviceWaitHostPs = 50;
constexpr int kUnknownTimePs = 50;
static constexpr char kHostname[] = "host:123";
void CreateOpStats(OpStats* op_stats) {
PerCoreStepInfo* info = op_stats->mutable_step_db()->add_step_sequence();
info->set_step_num(kStepNum);
StepInfoResult& step_info = (*info->mutable_step_info_per_core())[kCoreId];
step_info.set_step_num(kStepNum);
step_info.set_duration_ps(kStepTimePs);
GenericStepBreakdown breakdown;
auto& type_ps = *breakdown.mutable_type_ps();
type_ps[HOST_COMPUTE] = kHostComputePs;
type_ps[HOST_COMPILE] = kHostCompilePs;
type_ps[HOST_TO_HOST] = kHostToHostPs;
type_ps[HOST_TO_DEVICE] = kHostToDevicePs;
type_ps[HOST_PREPARE] = kHostPreparePs;
type_ps[DEVICE_COLLECTIVES] = kDeviceCollectivePs;
type_ps[HOST_WAIT_INPUT] = kHostWaitInputPs;
type_ps[DEVICE_TO_DEVICE] = kDeviceToDevicePs;
type_ps[DEVICE_TO_HOST] = kDeviceToHostPs;
type_ps[DEVICE_COMPUTE_32] = kDeviceCompute32Ps;
type_ps[DEVICE_COMPUTE_16] = kDeviceCompute16Ps;
type_ps[DEVICE_WAIT_DEVICE] = kDeviceWaitDevicePs;
type_ps[DEVICE_WAIT_HOST] = kDeviceWaitHostPs;
type_ps[UNKNOWN_TIME] = kUnknownTimePs;
step_info.mutable_step_breakdown()->PackFrom(breakdown);
CoreDetails& details = (*op_stats->mutable_core_id_to_details())[kCoreId];
details.set_hostname(kHostname);
}
TEST(OpStatsToPodStats, GpuPodStats) {
OpStats op_stats;
CreateOpStats(&op_stats);
PodStatsDatabase pod_stats_db = ConvertOpStatsToPodStats(op_stats);
EXPECT_EQ(1, pod_stats_db.pod_stats_record_size());
const PodStatsRecord& record = pod_stats_db.pod_stats_record(0);
EXPECT_EQ(kStepNum, record.step_num());
EXPECT_EQ(kHostname, record.host_name());
EXPECT_NEAR(tsl::profiler::PicoToMicro(kStepTimePs),
record.total_duration_us(), kMaxError);
const auto& breakdown = record.step_breakdown_us();
EXPECT_NEAR(
tsl::profiler::PicoToMicro(kDeviceCompute32Ps + kDeviceCompute16Ps),
breakdown.at(kDeviceCompute), kMaxError);
EXPECT_NEAR(
tsl::profiler::PicoToMicro(kDeviceToDevicePs + kDeviceWaitDevicePs),
breakdown.at(kDeviceToDevice), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kDeviceCollectivePs),
breakdown.at(kDeviceCollectives), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostComputePs),
breakdown.at(kHostCompute), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostPreparePs),
breakdown.at(kHostPrepare), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostWaitInputPs + kHostToDevicePs +
kDeviceWaitHostPs),
breakdown.at(kInput), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kDeviceToHostPs),
breakdown.at(kOutput), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostCompilePs),
breakdown.at(kCompile), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kUnknownTimePs),
breakdown.at(kAllOthers), kMaxError);
EXPECT_EQ(GetGenericEventTypeStr(kDeviceCollectives), record.bottleneck());
}
TEST(OpStatsToPodStats, Diagnostics) {
OpStats op_stats;
op_stats.mutable_step_db()->set_use_incomplete_step(true);
PodStatsDatabase pod_stats_db = ConvertOpStatsToPodStats(op_stats);
EXPECT_EQ(1, pod_stats_db.diagnostics().warnings_size());
EXPECT_EQ(kErrorIncompleteStep, pod_stats_db.diagnostics().warnings(0));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/op_stats_to_pod_stats.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/op_stats_to_pod_stats_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f7259880-9f62-41fd-9467-e1f62aa32ba3 | cpp | google/tensorstore | estimate_heap_usage | tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h | tensorstore/internal/estimate_heap_usage/estimate_heap_usage_test.cc | #ifndef TENSORSTORE_INTERNAL_ESTIMATE_HEAP_USAGE_ESTIMATE_HEAP_USAGE_H_
#define TENSORSTORE_INTERNAL_ESTIMATE_HEAP_USAGE_ESTIMATE_HEAP_USAGE_H_
#include <stddef.h>
#include <memory>
#include <string>
#include <type_traits>
#include "absl/strings/cord.h"
#include "tensorstore/util/apply_members/apply_members.h"
namespace tensorstore {
namespace internal {
template <typename T, typename SFINAE = void>
struct HeapUsageEstimator;
template <typename T, typename SFINAE = void>
constexpr inline bool MayUseHeapMemory = true;
template <typename T>
constexpr inline bool MayUseHeapMemory<
T, std::enable_if_t<
!std::is_trivially_destructible_v<T>,
std::void_t<decltype(&HeapUsageEstimator<T>::MayUseHeapMemory)>>> =
HeapUsageEstimator<T>::MayUseHeapMemory();
template <typename T>
constexpr inline bool
MayUseHeapMemory<T, std::enable_if_t<std::is_trivially_destructible_v<T>>> =
false;
template <typename T>
size_t EstimateHeapUsage(const T& x, size_t max_depth = -1) {
if constexpr (!MayUseHeapMemory<T>) {
return 0;
} else {
return HeapUsageEstimator<T>::EstimateHeapUsage(x, max_depth);
}
}
struct MayAnyUseHeapMemory {
template <typename... T>
constexpr auto operator()(const T&... arg) const {
return std::integral_constant<bool, (MayUseHeapMemory<T> || ...)>{};
}
};
template <typename T>
struct HeapUsageEstimator<T, std::enable_if_t<SupportsApplyMembers<T>>> {
static size_t EstimateHeapUsage(const T& v, size_t max_depth) {
return ApplyMembers<T>::Apply(v, [&](auto&&... x) {
return (internal::EstimateHeapUsage(x, max_depth) + ... +
static_cast<size_t>(0));
});
}
static constexpr bool MayUseHeapMemory() {
return decltype(ApplyMembers<T>::Apply(std::declval<const T&>(),
MayAnyUseHeapMemory{}))::value;
}
};
template <>
struct HeapUsageEstimator<std::string> {
static size_t EstimateHeapUsage(const std::string& x, size_t max_depth) {
return x.capacity();
}
};
template <>
struct HeapUsageEstimator<absl::Cord> {
static size_t EstimateHeapUsage(const absl::Cord& x, size_t max_depth) {
return x.size();
}
};
template <typename T>
struct PointerHeapUsageEstimator {
static size_t EstimateHeapUsage(const T& x, size_t max_depth) {
if (!x) return 0;
size_t total = sizeof(*x);
if (max_depth > 0) {
total += internal::EstimateHeapUsage(*x);
}
return total;
}
};
template <typename T>
struct HeapUsageEstimator<std::shared_ptr<T>>
: public PointerHeapUsageEstimator<std::shared_ptr<T>> {};
template <typename T>
struct HeapUsageEstimator<std::unique_ptr<T>>
: public PointerHeapUsageEstimator<std::unique_ptr<T>> {};
template <typename T, typename R>
class IntrusivePtr;
template <typename T, typename R>
struct HeapUsageEstimator<IntrusivePtr<T, R>>
: public PointerHeapUsageEstimator<IntrusivePtr<T, R>> {};
}
}
#endif | #include "tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h"
#include <optional>
#include <tuple>
#include <variant>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "tensorstore/internal/estimate_heap_usage/std_optional.h"
#include "tensorstore/internal/estimate_heap_usage/std_variant.h"
#include "tensorstore/internal/estimate_heap_usage/std_vector.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/util/apply_members/std_tuple.h"
namespace {
using ::tensorstore::internal::AtomicReferenceCount;
using ::tensorstore::internal::EstimateHeapUsage;
using ::tensorstore::internal::IntrusivePtr;
TEST(EstimateHeapUsageTest, Trivial) {
EXPECT_EQ(0, EstimateHeapUsage(5));
struct Trivial {};
EXPECT_EQ(0, EstimateHeapUsage(Trivial{}));
}
TEST(EstimateHeapUsageTest, String) {
std::string s(1000, 'x');
EXPECT_EQ(s.capacity(), EstimateHeapUsage(s));
}
TEST(EstimateHeapUsageTest, Cord) {
auto cord = absl::Cord(std::string(1000, 'x'));
EXPECT_EQ(cord.size(), EstimateHeapUsage(cord));
}
TEST(EstimateHeapUsageTest, Optional) {
EXPECT_EQ(0, EstimateHeapUsage(std::optional<int>()));
EXPECT_EQ(0, EstimateHeapUsage(std::optional<int>(42)));
EXPECT_EQ(0, EstimateHeapUsage(std::optional<std::string>()));
auto o = std::optional<std::string>(std::in_place, 1000, 'x');
EXPECT_EQ(o->capacity(), EstimateHeapUsage(o));
}
TEST(EstimateHeapUsageTest, UniquePtr) {
std::unique_ptr<int> ptr;
EXPECT_EQ(0, EstimateHeapUsage(ptr));
ptr.reset(new int);
EXPECT_EQ(sizeof(int), EstimateHeapUsage(ptr));
}
TEST(EstimateHeapUsageTest, SharedPtr) {
std::shared_ptr<int> ptr;
EXPECT_EQ(0, EstimateHeapUsage(ptr));
ptr.reset(new int);
EXPECT_EQ(sizeof(int), EstimateHeapUsage(ptr));
}
struct Foo : public AtomicReferenceCount<Foo> {
int x;
constexpr static auto ApplyMembers = [](auto& x, auto f) { return f(x.x); };
};
TEST(EstimateHeapUsageTest, IntrusivePtr) {
IntrusivePtr<Foo> ptr;
EXPECT_EQ(0, EstimateHeapUsage(ptr));
ptr.reset(new Foo);
EXPECT_EQ(sizeof(Foo), EstimateHeapUsage(ptr));
}
TEST(EstimateHeapUsageTest, Vector) {
std::vector<std::string> v;
v.push_back(std::string(1000, 'x'));
v.push_back(std::string(5000, 'x'));
size_t expected =
v[0].capacity() + v[1].capacity() + v.capacity() * sizeof(std::string);
EXPECT_EQ(expected, EstimateHeapUsage(v));
EXPECT_EQ(v.capacity() * sizeof(std::string), EstimateHeapUsage(v, 0));
}
TEST(EstimateHeapUsageTest, Composite) {
std::variant<std::vector<std::string>, std::vector<int>> v;
v = std::vector<std::string>({"a", "b"});
{
auto& string_vec = std::get<std::vector<std::string>>(v);
EXPECT_EQ(string_vec.capacity() * sizeof(std::string) +
string_vec[0].capacity() + string_vec[1].capacity(),
EstimateHeapUsage(v));
EXPECT_EQ(string_vec.capacity() * sizeof(std::string),
EstimateHeapUsage(v, 0));
}
v = std::vector<int>({1, 2, 3});
{
auto& int_vec = std::get<std::vector<int>>(v);
EXPECT_EQ(int_vec.capacity() * sizeof(int), EstimateHeapUsage(v));
}
}
TEST(EstimateHeapUsageTest, Tuple) {
auto t = std::tuple{std::string(1000, 'x'), std::string(5000, 'x')};
auto& [s0, s1] = t;
EXPECT_EQ(s0.capacity() + s1.capacity(), EstimateHeapUsage(t));
}
TEST(EstimateHeapUsageTest, Variant) {
using Variant = std::variant<int, std::string>;
EXPECT_EQ(0, EstimateHeapUsage(Variant(5)));
std::string s(1000, 'x');
size_t capacity = s.capacity();
EXPECT_EQ(capacity, EstimateHeapUsage(Variant(std::move(s))));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/estimate_heap_usage/estimate_heap_usage_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e49465f7-3466-44da-bff8-071558a62945 | cpp | tensorflow/tensorflow | tf_type_utils | tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils.cc | tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils.h"
#include "absl/status/status.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/TypeSwitch.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/mangling_util.h"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
namespace mlir::quant::tensorflow {
bool IsTFQintType(const Type type) {
return mlir::isa<TF::Qint8Type, TF::Qint16Type, TF::Qint32Type,
TF::Quint8Type, TF::Quint16Type>(type);
}
Type GetIntTypeFromTFQint(const Type type) {
return TypeSwitch<Type, Type>(type)
.Case<TF::Qint8Type>(
[&type](Type) { return IntegerType::get(type.getContext(), 8); })
.Case<TF::Qint16Type>(
[&type](Type) { return IntegerType::get(type.getContext(), 16); })
.Case<TF::Qint32Type>(
[&type](Type) { return IntegerType::get(type.getContext(), 32); })
.Case<TF::Quint8Type>([&type](Type) {
return IntegerType::get(type.getContext(), 8,
IntegerType::SignednessSemantics::Unsigned);
})
.Case<TF::Quint16Type>([&type](Type) {
return IntegerType::get(type.getContext(), 16,
IntegerType::SignednessSemantics::Unsigned);
})
.Default([&type](Type) { return type; });
}
FailureOr<mlir::DenseElementsAttr> GetDenseAttrFromTensorProtoAttr(
const llvm::StringRef mangled_tensor_proto, TensorType tensor_type) {
::tensorflow::TensorProto tensor_proto;
absl::Status status = ::tensorflow::mangling_util::DemangleTensor(
mangled_tensor_proto, &tensor_proto);
if (!status.ok()) {
return failure();
}
::tensorflow::Tensor t;
if (!t.FromProto(tensor_proto)) {
return failure();
}
if (t.dtype() == ::tensorflow::DT_QINT8) {
const auto arr = t.flat<::tensorflow::qint8>();
return mlir::DenseElementsAttr::get(
tensor_type.clone(IntegerType::get(tensor_type.getContext(), 8)),
llvm::ArrayRef(arr.data(), arr.size()));
} else if (t.dtype() == ::tensorflow::DT_QINT32) {
const auto arr = t.flat<::tensorflow::qint32>();
return mlir::DenseElementsAttr::get(
tensor_type.clone(IntegerType::get(tensor_type.getContext(), 32)),
llvm::ArrayRef(arr.data(), arr.size()));
} else {
return failure();
}
}
bool IsTFUniformQuantizedOp(Operation *op) {
return llvm::isa<
TF::UniformDequantizeOp,
TF::UniformQuantizeOp,
TF::UniformQuantizedAddOp,
TF::UniformQuantizedClipByValueOp,
TF::UniformQuantizedConvolutionHybridOp,
TF::UniformQuantizedConvolutionOp,
TF::UniformQuantizedDotHybridOp,
TF::UniformQuantizedDotOp,
TF::UniformRequantizeOp
>(op);
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils.h"
#include <cstdint>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Quant/IR/Quant.h"
#include "mlir/Dialect/Quant/IR/QuantTypes.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/mangling_util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/tsl/framework/numeric_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/ir/types/dialect.h"
namespace mlir::quant::tensorflow {
namespace {
std::string GetQint8Tensor() {
::tensorflow::Tensor tensor(::tensorflow::DT_QINT8, {2, 2});
tensor.matrix<tsl::qint8>()(0, 0) = tsl::qint8(1);
tensor.matrix<tsl::qint8>()(0, 1) = tsl::qint8(2);
tensor.matrix<tsl::qint8>()(1, 0) = tsl::qint8(3);
tensor.matrix<tsl::qint8>()(1, 1) = tsl::qint8(4);
::tensorflow::TensorProto tensor_proto;
tensor.AsProtoTensorContent(&tensor_proto);
return ::tensorflow::mangling_util::MangleTensor(tensor_proto);
}
std::string GetQint16Tensor() {
::tensorflow::Tensor tensor(::tensorflow::DT_QINT16, {2, 2});
tensor.matrix<tsl::qint16>()(0, 0) = tsl::qint16(1);
tensor.matrix<tsl::qint16>()(0, 1) = tsl::qint16(2);
tensor.matrix<tsl::qint16>()(1, 0) = tsl::qint16(3);
tensor.matrix<tsl::qint16>()(1, 1) = tsl::qint16(4);
::tensorflow::TensorProto tensor_proto;
tensor.AsProtoTensorContent(&tensor_proto);
return ::tensorflow::mangling_util::MangleTensor(tensor_proto);
}
std::string GetQint32Tensor() {
::tensorflow::Tensor tensor(::tensorflow::DT_QINT32, {2, 2});
tensor.matrix<tsl::qint32>()(0, 0) = tsl::qint32(1);
tensor.matrix<tsl::qint32>()(0, 1) = tsl::qint32(2);
tensor.matrix<tsl::qint32>()(1, 0) = tsl::qint32(3);
tensor.matrix<tsl::qint32>()(1, 1) = tsl::qint32(4);
::tensorflow::TensorProto tensor_proto;
tensor.AsProtoTensorContent(&tensor_proto);
return ::tensorflow::mangling_util::MangleTensor(tensor_proto);
}
std::unique_ptr<MLIRContext> CreateContext() {
auto context = std::make_unique<MLIRContext>();
DialectRegistry mlir_registry;
RegisterCommonToolingDialects(mlir_registry);
context->appendDialectRegistry(mlir_registry);
context->getOrLoadDialect<tf_type::TFTypeDialect>();
context->getOrLoadDialect<quant::QuantDialect>();
context->getOrLoadDialect<mlir::mhlo::MhloDialect>();
context->getOrLoadDialect<sparse_tensor::SparseTensorDialect>();
return context;
}
TEST(GetDenseAttrFromTensorProtoAttrTest, Qint8ToUQ8Succeeds) {
auto context = CreateContext();
TensorType result_tensor_type = RankedTensorType::get(
{2, 2}, quant::UniformQuantizedType::get(
quant::QuantizationFlags::FlagValue::Signed,
IntegerType::get(context.get(), 8),
FloatType::getF32(context.get()), 3.0, 2, -128, 127));
auto dense_attr =
GetDenseAttrFromTensorProtoAttr(GetQint8Tensor(), result_tensor_type);
ASSERT_TRUE(succeeded(dense_attr));
EXPECT_THAT(dense_attr->getValues<int8_t>(), testing::SizeIs(4));
EXPECT_EQ(dense_attr->getValues<int8_t>()[0], 1);
EXPECT_EQ(dense_attr->getValues<int8_t>()[1], 2);
EXPECT_EQ(dense_attr->getValues<int8_t>()[2], 3);
EXPECT_EQ(dense_attr->getValues<int8_t>()[3], 4);
}
TEST(GetDenseAttrFromTensorProtoAttrTest, Qint8ToInt8Succeeds) {
auto context = CreateContext();
TensorType result_tensor_type =
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 8));
auto dense_attr =
GetDenseAttrFromTensorProtoAttr(GetQint8Tensor(), result_tensor_type);
ASSERT_TRUE(succeeded(dense_attr));
EXPECT_THAT(dense_attr->getValues<int8_t>(), testing::SizeIs(4));
EXPECT_EQ(dense_attr->getValues<int8_t>()[0], 1);
EXPECT_EQ(dense_attr->getValues<int8_t>()[1], 2);
EXPECT_EQ(dense_attr->getValues<int8_t>()[2], 3);
EXPECT_EQ(dense_attr->getValues<int8_t>()[3], 4);
}
TEST(GetDenseAttrFromTensorProtoAttrTest, Qint32ToUQ32Succeeds) {
auto context = CreateContext();
TensorType result_tensor_type = RankedTensorType::get(
{2, 2},
quant::UniformQuantizedType::get(
quant::QuantizationFlags::FlagValue::Signed,
IntegerType::get(context.get(), 32), FloatType::getF32(context.get()),
3.0, 2, -2147483648, 2147483647));
auto dense_attr =
GetDenseAttrFromTensorProtoAttr(GetQint32Tensor(), result_tensor_type);
ASSERT_TRUE(succeeded(dense_attr));
EXPECT_THAT(dense_attr->getValues<int32_t>(), testing::SizeIs(4));
EXPECT_EQ(dense_attr->getValues<int32_t>()[0], 1);
EXPECT_EQ(dense_attr->getValues<int32_t>()[1], 2);
EXPECT_EQ(dense_attr->getValues<int32_t>()[2], 3);
EXPECT_EQ(dense_attr->getValues<int32_t>()[3], 4);
}
TEST(GetDenseAttrFromTensorProtoAttrTest, Qint32ToInt32Succeeds) {
auto context = CreateContext();
TensorType result_tensor_type =
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 32));
auto dense_attr =
GetDenseAttrFromTensorProtoAttr(GetQint32Tensor(), result_tensor_type);
ASSERT_TRUE(succeeded(dense_attr));
EXPECT_THAT(dense_attr->getValues<int32_t>(), testing::SizeIs(4));
EXPECT_EQ(dense_attr->getValues<int32_t>()[0], 1);
EXPECT_EQ(dense_attr->getValues<int32_t>()[1], 2);
EXPECT_EQ(dense_attr->getValues<int32_t>()[2], 3);
EXPECT_EQ(dense_attr->getValues<int32_t>()[3], 4);
}
TEST(GetDenseAttrFromTensorProtoAttrTest, UnsupportedQint16Fails) {
auto context = CreateContext();
TensorType result_tensor_type =
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 16));
EXPECT_TRUE(failed(
GetDenseAttrFromTensorProtoAttr(GetQint16Tensor(), result_tensor_type)));
}
TEST(IsTFQintTypeTest, ValidTFQintTypeSucceeds) {
auto context = CreateContext();
EXPECT_TRUE(IsTFQintType(TF::Qint8Type::get(context.get())));
EXPECT_TRUE(IsTFQintType(TF::Qint16Type::get(context.get())));
EXPECT_TRUE(IsTFQintType(TF::Qint32Type::get(context.get())));
EXPECT_TRUE(IsTFQintType(TF::Quint8Type::get(context.get())));
EXPECT_TRUE(IsTFQintType(TF::Quint16Type::get(context.get())));
EXPECT_FALSE(IsTFQintType(TF::Int8RefType::get(context.get())));
EXPECT_FALSE(IsTFQintType(TF::Float8E5M2RefType::get(context.get())));
}
TEST(GetIntTypeFromTFQintTest, ChecksIntTypesFromTFQint) {
auto context = CreateContext();
auto type = GetIntTypeFromTFQint(TF::Qint8Type::get(context.get()));
EXPECT_TRUE(llvm::isa<IntegerType>(type));
EXPECT_EQ(mlir::dyn_cast<IntegerType>(type).getWidth(), 8);
EXPECT_FALSE(mlir::dyn_cast<IntegerType>(type).isSigned());
EXPECT_FALSE(mlir::dyn_cast<IntegerType>(type).isUnsigned());
type = GetIntTypeFromTFQint(TF::Qint16Type::get(context.get()));
EXPECT_TRUE(llvm::isa<IntegerType>(type));
EXPECT_EQ(mlir::dyn_cast<IntegerType>(type).getWidth(), 16);
EXPECT_FALSE(mlir::dyn_cast<IntegerType>(type).isSigned());
EXPECT_FALSE(mlir::dyn_cast<IntegerType>(type).isUnsigned());
type = GetIntTypeFromTFQint(TF::Qint32Type::get(context.get()));
EXPECT_TRUE(llvm::isa<IntegerType>(type));
EXPECT_EQ(mlir::dyn_cast<IntegerType>(type).getWidth(), 32);
EXPECT_FALSE(mlir::dyn_cast<IntegerType>(type).isSigned());
EXPECT_FALSE(mlir::dyn_cast<IntegerType>(type).isUnsigned());
type = GetIntTypeFromTFQint(TF::Quint8Type::get(context.get()));
EXPECT_TRUE(llvm::isa<IntegerType>(type));
EXPECT_EQ(mlir::dyn_cast<IntegerType>(type).getWidth(), 8);
EXPECT_TRUE(mlir::dyn_cast<IntegerType>(type).isUnsigned());
type = GetIntTypeFromTFQint(TF::Quint16Type::get(context.get()));
EXPECT_TRUE(llvm::isa<IntegerType>(type));
EXPECT_EQ(mlir::dyn_cast<IntegerType>(type).getWidth(), 16);
EXPECT_TRUE(mlir::dyn_cast<IntegerType>(type).isUnsigned());
EXPECT_EQ(GetIntTypeFromTFQint(IntegerType::get(type.getContext(), 32)),
IntegerType::get(type.getContext(), 32));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5d32953b-d4e0-443f-b032-8a137a203bae | cpp | google/cel-cpp | evaluator_stack | eval/eval/evaluator_stack.cc | eval/eval/evaluator_stack_test.cc | #include "eval/eval/evaluator_stack.h"
namespace google::api::expr::runtime {
void EvaluatorStack::Clear() {
stack_.clear();
attribute_stack_.clear();
current_size_ = 0;
}
} | #include "eval/eval/evaluator_stack.h"
#include "base/attribute.h"
#include "base/type_provider.h"
#include "common/type_factory.h"
#include "common/type_manager.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "common/values/legacy_value_manager.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/testing.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::TypeFactory;
using ::cel::TypeManager;
using ::cel::TypeProvider;
using ::cel::ValueManager;
using ::cel::extensions::ProtoMemoryManagerRef;
TEST(EvaluatorStackTest, StackPushPop) {
google::protobuf::Arena arena;
auto manager = ProtoMemoryManagerRef(&arena);
cel::common_internal::LegacyValueManager value_factory(
manager, TypeProvider::Builtin());
cel::Attribute attribute("name", {});
EvaluatorStack stack(10);
stack.Push(value_factory.CreateIntValue(1));
stack.Push(value_factory.CreateIntValue(2), AttributeTrail());
stack.Push(value_factory.CreateIntValue(3), AttributeTrail("name"));
ASSERT_EQ(stack.Peek().GetInt().NativeValue(), 3);
ASSERT_FALSE(stack.PeekAttribute().empty());
ASSERT_EQ(stack.PeekAttribute().attribute(), attribute);
stack.Pop(1);
ASSERT_EQ(stack.Peek().GetInt().NativeValue(), 2);
ASSERT_TRUE(stack.PeekAttribute().empty());
stack.Pop(1);
ASSERT_EQ(stack.Peek().GetInt().NativeValue(), 1);
ASSERT_TRUE(stack.PeekAttribute().empty());
}
TEST(EvaluatorStackTest, StackBalanced) {
google::protobuf::Arena arena;
auto manager = ProtoMemoryManagerRef(&arena);
cel::common_internal::LegacyValueManager value_factory(
manager, TypeProvider::Builtin());
EvaluatorStack stack(10);
ASSERT_EQ(stack.size(), stack.attribute_size());
stack.Push(value_factory.CreateIntValue(1));
ASSERT_EQ(stack.size(), stack.attribute_size());
stack.Push(value_factory.CreateIntValue(2), AttributeTrail());
stack.Push(value_factory.CreateIntValue(3), AttributeTrail());
ASSERT_EQ(stack.size(), stack.attribute_size());
stack.PopAndPush(value_factory.CreateIntValue(4), AttributeTrail());
ASSERT_EQ(stack.size(), stack.attribute_size());
stack.PopAndPush(value_factory.CreateIntValue(5));
ASSERT_EQ(stack.size(), stack.attribute_size());
stack.Pop(3);
ASSERT_EQ(stack.size(), stack.attribute_size());
}
TEST(EvaluatorStackTest, Clear) {
google::protobuf::Arena arena;
auto manager = ProtoMemoryManagerRef(&arena);
cel::common_internal::LegacyValueManager value_factory(
manager, TypeProvider::Builtin());
EvaluatorStack stack(10);
ASSERT_EQ(stack.size(), stack.attribute_size());
stack.Push(value_factory.CreateIntValue(1));
stack.Push(value_factory.CreateIntValue(2), AttributeTrail());
stack.Push(value_factory.CreateIntValue(3), AttributeTrail());
ASSERT_EQ(stack.size(), 3);
stack.Clear();
ASSERT_EQ(stack.size(), 0);
ASSERT_TRUE(stack.empty());
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/evaluator_stack.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/evaluator_stack_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
c610c4bf-ddc0-4b3d-946d-435e6dbf2fa1 | cpp | tensorflow/tensorflow | coordination_service_agent | third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc | third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent_test.cc | #include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <map>
#include <memory>
#include <optional>
#include <random>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/functional/bind_front.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_client.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_error_util.h"
#include "xla/tsl/framework/cancellation.h"
#include "xla/tsl/lib/monitoring/gauge.h"
#include "xla/tsl/protobuf/coordination_config.pb.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/random.h"
#include "tsl/platform/status.h"
#include "tsl/platform/thread_annotations.h"
namespace tsl {
using tensorflow::CoordinatedTask;
using tensorflow::CoordinatedTaskState;
using tensorflow::CoordinatedTaskStateInfo;
using tensorflow::CoordinationServiceConfig;
using tensorflow::DeviceInfo;
using tensorflow::KeyValueEntry;
namespace {
auto* enabled_usage_metric =
monitoring::Gauge<bool, 0>::New("/coordination_service/agent/enabled",
"Tracks usage of coordination service.");
constexpr absl::Duration kDefaultClusterRegisterTimeout = absl::Hours(1);
constexpr absl::Duration kDefaultHeartbeatTimeout = absl::Seconds(10);
constexpr absl::Duration kDefaultShutdownTimeout = absl::Seconds(10);
constexpr char kHeartbeatThread[] = "CoordinationServiceHeartbeatLoop";
constexpr char kErrorPollingThread[] = "CoordinationServiceErrorPolling";
class CoordinationServiceAgentImpl : public CoordinationServiceAgent {
public:
CoordinationServiceAgentImpl() = default;
~CoordinationServiceAgentImpl() override {
absl::Status s = ShutdownInternal();
VLOG(3) << "Coordination agent dtor failed with status: " << s;
}
absl::Status Initialize(Env* env, std::string_view job_name, int task_id,
const CoordinationServiceConfig& configs,
std::unique_ptr<CoordinationClient> leader_client,
StatusCallback error_fn) override;
absl::Status Initialize(Env* env, const CoordinatedTask& task,
const CoordinationServiceConfig& configs,
std::unique_ptr<CoordinationClient> leader_client,
StatusCallback error_fn) override;
bool IsInitialized() override;
bool IsConnected() override;
bool IsError() override;
absl::Status Connect() override;
absl::Status WaitForAllTasks(const DeviceInfo& local_devices) override;
const DeviceInfo& GetClusterDeviceInfo() override;
absl::StatusOr<CoordinatedTask> GetOwnTask() override;
absl::StatusOr<std::vector<CoordinatedTaskStateInfo>> GetTaskState(
const std::vector<CoordinatedTask>& task) override;
absl::Status ReportError(const absl::Status& error) override;
absl::Status Shutdown() override;
absl::Status Reset() override;
absl::StatusOr<std::string> GetKeyValue(std::string_view key) override;
absl::StatusOr<std::string> GetKeyValue(std::string_view key,
absl::Duration timeout) override;
std::shared_ptr<CallOptions> GetKeyValueAsync(
std::string_view key, StatusOrValueCallback done) override;
absl::StatusOr<std::string> TryGetKeyValue(std::string_view key) override;
absl::StatusOr<std::vector<KeyValueEntry>> GetKeyValueDir(
std::string_view key) override;
void GetKeyValueDirAsync(std::string_view key,
StatusOrValueDirCallback done) override;
absl::Status InsertKeyValue(std::string_view key,
std::string_view value) override;
absl::Status InsertKeyValue(std::string_view key, std::string_view value,
bool allow_overwrite) override;
absl::Status DeleteKeyValue(std::string_view key) override;
absl::Status UpdateKeyValue(std::string_view key,
std::string_view value) override;
absl::Status StartWatchKey(std::string_view key,
ChangedKeyValuesCallback on_change) override;
absl::Status StopWatchKey(std::string_view key) override;
absl::Status WaitAtBarrier(
std::string_view barrier_id, absl::Duration timeout,
const std::vector<CoordinatedTask>& tasks) override;
void WaitAtBarrierAsync(std::string_view barrier_id, absl::Duration timeout,
const std::vector<CoordinatedTask>& tasks,
StatusCallback done) override;
absl::Status CancelBarrier(std::string_view barrier_id) override;
void CancelBarrierAsync(std::string_view barrier_id,
StatusCallback done) override;
absl::StatusOr<Env*> GetEnv() override;
protected:
void SetError(const absl::Status& error) override;
absl::Status ActivateWatch(
std::string_view key, const std::map<std::string, std::string>&) override;
absl::Status ValidateRunningAgent(bool allow_disconnected = false);
void StopHeartbeat();
private:
absl::Status ShutdownInternal();
void StartSendingHeartbeats();
absl::Status PollForError();
std::shared_ptr<CallOptions> PollForErrorAsync(StatusCallback done);
void StartPollingForError();
void StopErrorPolling();
void ResetCancellationManager();
Env* env_ = nullptr;
const uint64_t incarnation_id_ = random::New64();
CoordinatedTask task_;
CoordinationServiceConfig configs_;
StatusCallback error_fn_;
mutable absl::Mutex state_mu_;
CoordinatedTaskState state_ TF_GUARDED_BY(state_mu_) =
CoordinatedTaskState::TASKSTATE_UNINITIALIZED;
absl::Status status_ TF_GUARDED_BY(state_mu_) = absl::OkStatus();
absl::flat_hash_set<std::string> used_barrier_ids_ TF_GUARDED_BY(state_mu_);
uint64_t leader_incarnation_ = 0;
DeviceInfo cluster_devices_;
absl::Mutex heartbeat_thread_shutdown_mu_;
absl::CondVar heartbeat_thread_cv_;
bool shutting_down_ TF_GUARDED_BY(heartbeat_thread_shutdown_mu_) = false;
std::unique_ptr<Thread> heartbeat_thread_;
std::unique_ptr<Thread> error_polling_thread_;
CancellationManager cancellation_manager_;
std::unique_ptr<CancellationManager> error_polling_cancellation_manager_ =
std::make_unique<CancellationManager>();
std::unique_ptr<CoordinationClient> leader_client_;
CoordinationServiceAgentImpl(const CoordinationServiceAgentImpl&) = delete;
void operator=(const CoordinationServiceAgentImpl&) = delete;
};
absl::Status CoordinationServiceAgentImpl::Initialize(
Env* env, std::string_view job_name, int task_id,
const CoordinationServiceConfig& configs,
std::unique_ptr<CoordinationClient> leader_client,
StatusCallback error_fn) {
CoordinatedTask task;
task.set_job_name(std::string(job_name));
task.set_task_id(task_id);
return Initialize(env, task, configs, std::move(leader_client), error_fn);
}
absl::Status CoordinationServiceAgentImpl::Initialize(
Env* env, const CoordinatedTask& task,
const CoordinationServiceConfig& configs,
std::unique_ptr<CoordinationClient> leader_client,
StatusCallback error_fn) {
enabled_usage_metric->GetCell()->Set(true);
absl::MutexLock l(&state_mu_);
if (state_ != CoordinatedTaskState::TASKSTATE_UNINITIALIZED) {
return MakeCoordinationError(absl::FailedPreconditionError(
"Coordination service agent has already been initialized."));
}
env_ = env;
task_ = task;
configs_ = configs;
if (configs_.service_leader().empty()) {
return MakeCoordinationError(absl::InvalidArgumentError(
"CoordinationServiceAgent must be initialized with a valid leader."));
}
leader_client_ = std::move(leader_client);
if (leader_client_ == nullptr) {
return MakeCoordinationError(absl::InvalidArgumentError(
"CoordinationServiceAgent must have a valid leader client."));
}
error_fn_ = error_fn;
state_ = CoordinatedTaskState::TASKSTATE_DISCONNECTED;
return absl::OkStatus();
}
bool CoordinationServiceAgentImpl::IsInitialized() {
absl::MutexLock l(&state_mu_);
return state_ != CoordinatedTaskState::TASKSTATE_UNINITIALIZED;
}
bool CoordinationServiceAgentImpl::IsConnected() {
absl::MutexLock l(&state_mu_);
return state_ == CoordinatedTaskState::TASKSTATE_CONNECTED;
}
bool CoordinationServiceAgentImpl::IsError() {
absl::MutexLock l(&state_mu_);
return state_ == CoordinatedTaskState::TASKSTATE_ERROR;
}
void CoordinationServiceAgentImpl::StopHeartbeat() {
{
absl::MutexLock l(&heartbeat_thread_shutdown_mu_);
shutting_down_ = true;
heartbeat_thread_cv_.SignalAll();
}
heartbeat_thread_ = nullptr;
}
void CoordinationServiceAgentImpl::StopErrorPolling() {
error_polling_cancellation_manager_->StartCancel();
error_polling_thread_ = nullptr;
}
void CoordinationServiceAgentImpl::ResetCancellationManager() {
error_polling_cancellation_manager_ = std::make_unique<CancellationManager>();
}
absl::Status CoordinationServiceAgentImpl::Connect() {
VLOG(3) << "Agent has started trying to Connect().";
{
absl::MutexLock l(&state_mu_);
if (state_ != CoordinatedTaskState::TASKSTATE_DISCONNECTED) {
return MakeCoordinationError(absl::FailedPreconditionError(
"Coordination service agent is not in DISCONNECTED state."));
}
}
absl::Status connect_status =
absl::UnknownError("Connection not attempted yet.");
RegisterTaskRequest request;
*request.mutable_source_task() = task_;
request.set_incarnation(incarnation_id_);
RegisterTaskResponse response;
const int64_t register_timeout =
configs_.cluster_register_timeout_in_ms() > 0
? configs_.cluster_register_timeout_in_ms()
: absl::ToInt64Milliseconds(kDefaultClusterRegisterTimeout);
const absl::Time deadline =
absl::Now() + absl::Milliseconds(register_timeout);
int attempt = 0;
std::default_random_engine generator;
std::uniform_real_distribution<double> distribution(0.0, 1.0);
do {
++attempt;
CallOptions call_opts;
call_opts.SetTimeout(absl::ToInt64Milliseconds(deadline - absl::Now()));
absl::Notification n;
leader_client_->RegisterTaskAsync(
&call_opts, &request, &response, [&](absl::Status s) {
if (s.ok()) {
leader_incarnation_ = response.leader_incarnation();
{
absl::MutexLock l(&state_mu_);
state_ = CoordinatedTaskState::TASKSTATE_CONNECTED;
}
}
connect_status = s;
n.Notify();
});
n.WaitForNotification();
if (!connect_status.ok()) {
const int backoff = 1 << std::min(14, attempt);
absl::SleepFor(absl::Milliseconds(backoff * distribution(generator)));
}
} while (!connect_status.ok() && absl::Now() < deadline &&
(connect_status.GetPayload(CoordinationErrorPayloadKey()) ==
std::nullopt ||
absl::IsAborted(connect_status) ||
absl::IsInternal(connect_status)));
if (!connect_status.ok()) {
SetError(connect_status);
return connect_status;
}
LOG(INFO) << "Coordination agent has successfully connected.";
heartbeat_thread_.reset(env_->StartThread(
ThreadOptions(), kHeartbeatThread,
absl::bind_front(&CoordinationServiceAgentImpl::StartSendingHeartbeats,
this)));
if (configs_.poll_for_error_from_service_at_startup()) {
error_polling_thread_.reset(env_->StartThread(
ThreadOptions(), kErrorPollingThread,
absl::bind_front(&CoordinationServiceAgentImpl::StartPollingForError,
this)));
}
return absl::OkStatus();
}
void CoordinationServiceAgentImpl::StartSendingHeartbeats() {
HeartbeatRequest request;
*request.mutable_source_task() = task_;
request.set_incarnation(incarnation_id_);
HeartbeatResponse response;
const int64_t heartbeat_interval_ms =
configs_.heartbeat_timeout_in_ms() > 0
? configs_.heartbeat_timeout_in_ms() / 2
: absl::ToInt64Milliseconds(kDefaultHeartbeatTimeout) / 2;
CallOptions call_opts;
call_opts.SetTimeout(heartbeat_interval_ms);
while (true) {
absl::Status status;
absl::Notification n;
VLOG(10) << "HeartbeatRequest: " << request.DebugString();
leader_client_->HeartbeatAsync(&call_opts, &request, &response,
[&](absl::Status s) {
status = s;
n.Notify();
});
n.WaitForNotification();
VLOG(10) << "HeartbeatResponse: " << status;
if (!status.ok()) {
absl::SleepFor(absl::Seconds(1));
{
absl::MutexLock l(&heartbeat_thread_shutdown_mu_);
if (shutting_down_) {
return;
}
}
SetError(status);
} else if (response.leader_incarnation() != leader_incarnation_) {
SetError(MakeCoordinationError(
absl::AbortedError("Leader incarnation ID mismatch: the "
"coordination leader has restarted.")));
}
{
absl::MutexLock l(&heartbeat_thread_shutdown_mu_);
heartbeat_thread_cv_.WaitWithTimeout(
&heartbeat_thread_shutdown_mu_,
absl::Milliseconds(heartbeat_interval_ms));
if (shutting_down_) {
return;
}
}
}
}
void CoordinationServiceAgentImpl::StartPollingForError() {
LOG(INFO) << "Polling for error from coordination service. This thread will "
"run until an error is encountered or the agent is shutdown.";
absl::Status status = PollForError();
CHECK(!status.ok()) << "PollForError returned OK status. Should "
"always return an error.";
if (absl::IsCancelled(status)) {
LOG(INFO) << "Cancelling error polling because the service or the agent is "
"shutting down.";
return;
}
LOG(ERROR) << "An error is returned from coordination service (this can be "
"an error from this or another task).";
SetError(status);
}
absl::Status CoordinationServiceAgentImpl::PollForError() {
absl::Status status = absl::OkStatus();
absl::Notification n;
PollForErrorAsync([&](absl::Status s) {
status = s;
n.Notify();
});
n.WaitForNotification();
CHECK(!status.ok())
<< "PollForError returned OK status. Should always return an error.";
return status;
}
std::shared_ptr<CallOptions> CoordinationServiceAgentImpl::PollForErrorAsync(
StatusCallback done) {
auto call_opts = std::make_shared<CallOptions>();
absl::Status agent_running_status =
ValidateRunningAgent(true);
if (!agent_running_status.ok()) {
done(agent_running_status);
return call_opts;
}
auto request = std::make_shared<PollForErrorRequest>();
auto response = std::make_shared<PollForErrorResponse>();
*request->mutable_source_task() = task_;
VLOG(3) << "PollForErrorRequest: " << request->DebugString();
const CancellationToken token =
error_polling_cancellation_manager_->get_cancellation_token();
const bool already_cancelled =
!error_polling_cancellation_manager_->RegisterCallback(
token, [call_opts]() { call_opts->StartCancel(); });
if (already_cancelled) {
done(absl::CancelledError("PollForErrorAsync() was cancelled."));
return call_opts;
}
leader_client_->PollForErrorAsync(
call_opts.get(), request.get(), response.get(),
[call_opts, request, response, done = std::move(done),
&cm = error_polling_cancellation_manager_,
token](const absl::Status& s) {
cm->TryDeregisterCallback(token);
done(s);
});
return call_opts;
}
absl::Status CoordinationServiceAgentImpl::WaitForAllTasks(
const DeviceInfo& local_devices) {
absl::Status agent_running_status = ValidateRunningAgent();
if (!agent_running_status.ok()) {
return agent_running_status;
}
WaitForAllTasksRequest request;
*request.mutable_source_task() = task_;
*request.mutable_device_info() = local_devices;
VLOG(3) << "WaitForAllTasksRequest: " << request.DebugString();
WaitForAllTasksResponse response;
absl::Status status;
absl::Notification n;
leader_client_->WaitForAllTasksAsync(&request, &response,
[&](absl::Status s) {
status = s;
n.Notify();
});
n.WaitForNotification();
if (!status.ok()) {
VLOG(3) << "WaitForAllTasksResponse: " << status;
SetError(status);
return status;
}
VLOG(3) << "WaitForAllTasksResponse: " << response.DebugString();
cluster_devices_ = response.device_info();
return absl::OkStatus();
}
const DeviceInfo& CoordinationServiceAgentImpl::GetClusterDeviceInfo() {
return cluster_devices_;
}
absl::StatusOr<CoordinatedTask> CoordinationServiceAgentImpl::GetOwnTask() {
if (!IsInitialized()) {
return MakeCoordinationError(absl::FailedPreconditionError(
"Agent has not been initialized; we do not "
"know the associated task yet."));
}
return task_;
}
absl::StatusOr<std::vector<CoordinatedTaskStateInfo>>
CoordinationServiceAgentImpl::GetTaskState(
const std::vector<CoordinatedTask>& tasks) {
GetTaskStateRequest request;
*request.mutable_source_task() = {tasks.begin(), tasks.end()};
GetTaskStateResponse response;
absl::Notification n;
absl::StatusOr<std::vector<CoordinatedTaskStateInfo>> result;
leader_client_->GetTaskStateAsync(
&request, &response, [&](const absl::Status& s) {
if (s.ok()) {
result = std::vector<CoordinatedTaskStateInfo>(
std::make_move_iterator(response.task_state().begin()),
std::make_move_iterator(response.task_state().end()));
} else {
result = s;
}
n.Notify();
});
n.WaitForNotification();
return result;
}
absl::Status CoordinationServiceAgentImpl::ReportError(
const absl::Status& error) {
{
absl::MutexLock l(&state_mu_);
if (state_ == CoordinatedTaskState::TASKSTATE_UNINITIALIZED) {
return MakeCoordinationError(absl::FailedPreconditionError(
"Coordination service agent must be initialized first before "
"reporting error."));
} else if (state_ == CoordinatedTaskState::TASKSTATE_ERROR) {
return MakeCoordinationError(absl::FailedPreconditionError(
"Coordination service agent is already in error state."));
}
}
SetError(MakeCoordinationError(error, task_,
true));
LOG(INFO) << "Reporting error to coordination service: " << error;
ReportErrorToServiceRequest request;
request.set_error_code(error.raw_code());
request.set_error_message(std::string(error.message()));
*request.mutable_error_origin() = task_;
VLOG(5) << "ReportErrorToServiceRequest: " << request.DebugString();
ReportErrorToServiceResponse response;
absl::Notification n;
leader_client_->ReportErrorToServiceAsync(
&request, &response, [&](absl::Status s) {
VLOG(5) << "ReportErrorToServiceResponse: " << s;
if (!s.ok()) {
LOG(ERROR)
<< "Encountered another error when reporting error to "
"coordination service: "
<< s
<< "\nThis is usually caused by an earlier error during "
"execution. Check the logs (this task or the leader) for "
"an earlier error to debug further.";
}
n.Notify();
});
n.WaitForNotification();
return absl::OkStatus();
}
absl::Status CoordinationServiceAgentImpl::Shutdown() {
return ShutdownInternal();
}
absl::Status CoordinationServiceAgentImpl::ShutdownInternal() {
absl::Status status = absl::OkStatus();
bool is_connected = false;
{
absl::MutexLock l(&state_mu_);
is_connected = state_ == CoordinatedTaskState::TASKSTATE_CONNECTED;
}
if (!configs_.agent_destruction_without_shutdown() && is_connected) {
LOG(INFO) << "Coordination agent has initiated Shutdown().";
ShutdownTaskRequest request;
*request.mutable_source_task() = task_;
ShutdownTaskResponse response;
CallOptions call_opts;
const int64_t shutdown_timeout =
configs_.shutdown_barrier_timeout_in_ms() > 0
? configs_.shutdown_barrier_timeout_in_ms()
: absl::ToInt64Milliseconds(kDefaultShutdownTimeout);
call_opts.SetTimeout(shutdown_timeout);
absl::Notification n;
leader_client_->ShutdownTaskAsync(&call_opts, &request, &response,
[&status, &n](absl::Status s) {
status = s;
n.Notify();
});
n.WaitForNotification();
if (status.ok()) {
LOG(INFO) << "Coordination agent has successfully shut down.";
} else {
LOG(ERROR)
<< "Failed to disconnect from coordination service with status: "
<< TrimCoordinationErrorMessage(status)
<< "\nProceeding with agent shutdown anyway. This is usually caused "
"by an earlier error during execution. Check the logs (this task "
"or the leader) for an earlier error to debug further.";
}
}
StopHeartbeat();
StopErrorPolling();
{
absl::MutexLock l(&state_mu_);
if (state_ == CoordinatedTaskState::TASKSTATE_ERROR) {
const std::string status_message = absl::StrCat(
"Shutdown() was called while coordination agent is in error state, "
"implying that distributed execution failed. Note: agent will "
"still shutdown anyway. Agent status: ",
status_.ToString(),
"\nThis is usually caused by an earlier error during execution. "
"Check the logs (this task or the leader) for an earlier error to "
"debug further.");
status =
MakeCoordinationError(absl::FailedPreconditionError(status_message));
LOG(ERROR) << status_message;
}
state_ = CoordinatedTaskState::TASKSTATE_DISCONNECTED;
}
cancellation_manager_.StartCancel();
return status;
}
absl::Status CoordinationServiceAgentImpl::Reset() {
{
absl::MutexLock l(&state_mu_);
if (state_ != CoordinatedTaskState::TASKSTATE_ERROR) {
return MakeCoordinationError(absl::FailedPreconditionError(
"Reset() failed: coordination service agent is not in ERROR state."));
}
}
ResetTaskRequest request;
*request.mutable_source_task() = task_;
VLOG(3) << "ResetTaskRequest: " << request.DebugString();
ResetTaskResponse response;
absl::Status status;
absl::Notification n;
leader_client_->ResetTaskAsync(&request, &response,
[&status, &n](absl::Status s) {
status = s;
n.Notify();
});
n.WaitForNotification();
VLOG(3) << "ResetTaskResponse: " << status;
if (!status.ok()) {
return status;
}
StopHeartbeat();
StopErrorPolling();
ResetCancellationManager();
{
absl::MutexLock l(&state_mu_);
state_ = CoordinatedTaskState::TASKSTATE_DISCONNECTED;
}
{
absl::MutexLock l(&heartbeat_thread_shutdown_mu_);
shutting_down_ = false;
}
LOG(INFO) << "Coordination agent has been reset.";
return status;
}
absl::StatusOr<std::string> CoordinationServiceAgentImpl::GetKeyValue(
std::string_view key) {
return GetKeyValue(key, absl::InfiniteDuration());
}
absl::StatusOr<std::string> CoordinationServiceAgentImpl::GetKeyValue(
std::string_view key, absl::Duration timeout) {
auto n = std::make_shared<absl::Notification>();
auto result = std::make_shared<absl::StatusOr<std::string>>();
GetKeyValueAsync(
key, [n, result](const absl::StatusOr<std::string>& status_or_value) {
*result = status_or_value;
n->Notify();
});
bool call_completed_before_timeout =
n->WaitForNotificationWithTimeout(timeout);
if (!call_completed_before_timeout) {
VLOG(3) << "GetKeyValue(" << key << ") timed out after " << timeout;
return MakeCoordinationError(absl::DeadlineExceededError(absl::Substitute(
"GetKeyValue() timed out with key: $0 and duration: $1", key,
absl::FormatDuration(timeout))));
}
return *result;
}
std::shared_ptr<CallOptions> CoordinationServiceAgentImpl::GetKeyValueAsync(
std::string_view key, StatusOrValueCallback done) {
auto request = std::make_shared<GetKeyValueRequest>();
request->set_key(key.data(), key.size());
VLOG(3) << "GetKeyValueRequest: " << request->DebugString();
auto response = std::make_shared<GetKeyValueResponse>();
auto call_opts = std::make_shared<CallOptions>();
const CancellationToken token =
cancellation_manager_.get_cancellation_token();
const bool already_cancelled = !cancellation_manager_.RegisterCallback(
token, [call_opts]() { call_opts->StartCancel(); });
if (already_cancelled) {
done(absl::CancelledError("GetKeyValueAsync() was cancelled."));
return call_opts;
}
leader_client_->GetKeyValueAsync(
call_opts.get(), request.get(), response.get(),
[call_opts, request, response, done = std::move(done),
&cm = cancellation_manager_, token](const absl::Status& s) {
cm.TryDeregisterCallback(token);
if (!s.ok()) {
done(s);
VLOG(3) << "GetKeyValueResponse: " << s;
} else {
done(response->kv().value());
VLOG(3) << "GetKeyValueResponse: " << response->DebugString();
}
});
return call_opts;
}
absl::StatusOr<std::string> CoordinationServiceAgentImpl::TryGetKeyValue(
std::string_view key) {
absl::Notification n;
absl::StatusOr<std::string> result;
TryGetKeyValueRequest request;
request.set_key(key.data(), key.size());
VLOG(3) << "TryGetKeyValueRequest: " << request.DebugString();
TryGetKeyValueResponse response;
leader_client_->TryGetKeyValueAsync(
&request, &response, [&](const absl::Status& s) {
if (s.ok()) {
result = response.kv().value();
VLOG(3) << "TryGetKeyValueResponse: " << result.value();
} else {
result = s;
VLOG(3) << "TryGetKeyValueResponse: " << s;
}
n.Notify();
});
n.WaitForNotification();
return result;
}
absl::StatusOr<std::vector<KeyValueEntry>>
CoordinationServiceAgentImpl::GetKeyValueDir(std::string_view key) {
absl::Notification n;
absl::StatusOr<std::vector<KeyValueEntry>> result;
GetKeyValueDirAsync(
key, [&n, &result](
absl::StatusOr<std::vector<KeyValueEntry>> status_or_value) {
result = std::move(status_or_value);
n.Notify();
});
n.WaitForNotification();
return result;
}
void CoordinationServiceAgentImpl::GetKeyValueDirAsync(
std::string_view key, StatusOrValueDirCallback done) {
auto request = std::make_shared<GetKeyValueDirRequest>();
request->set_directory_key(key.data(), key.size());
VLOG(3) << "GetKeyValueDirRequest: " << request->DebugString();
auto response = std::make_shared<GetKeyValueDirResponse>();
leader_client_->GetKeyValueDirAsync(
request.get(), response.get(),
[request, response, done = std::move(done)](const absl::Status& s) {
if (!s.ok()) {
done(s);
VLOG(3) << "GetKeyValueDirResponse: " << s;
} else {
VLOG(3) << "GetKeyValueDirResponse: " << response->DebugString();
std::vector<KeyValueEntry> kv_in_directory = {
std::make_move_iterator(response->kv().begin()),
std::make_move_iterator(response->kv().end())};
done(kv_in_directory);
}
});
}
absl::Status CoordinationServiceAgentImpl::InsertKeyValue(
std::string_view key, std::string_view value) {
return InsertKeyValue(key, value, false);
}
absl::Status CoordinationServiceAgentImpl::InsertKeyValue(
std::string_view key, std::string_view value, bool allow_overwrite) {
InsertKeyValueRequest request;
request.mutable_kv()->set_key(key.data(), key.size());
request.mutable_kv()->set_value(value.data(), value.size());
request.set_allow_overwrite(allow_overwrite);
VLOG(3) << "InsertKeyValueRequest: " << request.DebugString();
InsertKeyValueResponse response;
absl::Status status;
absl::Notification n;
leader_client_->InsertKeyValueAsync(&request, &response, [&](absl::Status s) {
status = s;
n.Notify();
});
n.WaitForNotification();
VLOG(3) << "InsertKeyValueResponse: " << status;
return status;
}
absl::Status CoordinationServiceAgentImpl::DeleteKeyValue(
std::string_view key) {
DeleteKeyValueRequest request;
request.set_key(key.data(), key.size());
request.set_is_directory(true);
VLOG(3) << "DeleteKeyValueRequest: " << request.DebugString();
DeleteKeyValueResponse response;
absl::Status status;
absl::Notification n;
leader_client_->DeleteKeyValueAsync(&request, &response, [&](absl::Status s) {
status = s;
n.Notify();
});
n.WaitForNotification();
VLOG(3) << "DeleteKeyValueResponse " << status;
return absl::OkStatus();
}
absl::Status CoordinationServiceAgentImpl::UpdateKeyValue(
std::string_view key, std::string_view value) {
return MakeCoordinationError(absl::UnimplementedError(
"CoordinationServiceAgent::UpdateKeyValue is not implemented."));
}
absl::Status CoordinationServiceAgentImpl::StartWatchKey(
std::string_view key,
CoordinationServiceAgentImpl::ChangedKeyValuesCallback on_change) {
return MakeCoordinationError(absl::UnimplementedError(
"CoordinationServiceAgent::StartWatchKey is not implemented."));
}
absl::Status CoordinationServiceAgentImpl::StopWatchKey(std::string_view key) {
return MakeCoordinationError(absl::UnimplementedError(
"CoordinationServiceAgent::StopWatchKey is not implemented."));
}
void CoordinationServiceAgentImpl::SetError(const absl::Status& error) {
assert(!error.ok());
absl::MutexLock l(&state_mu_);
if (state_ == CoordinatedTaskState::TASKSTATE_ERROR) return;
absl::Status trimmed_error = TrimCoordinationErrorMessage(error);
LOG(ERROR) << "Coordination agent is set to ERROR: " << trimmed_error;
state_ = CoordinatedTaskState::TASKSTATE_ERROR;
status_ = trimmed_error;
error_fn_(trimmed_error);
}
absl::Status CoordinationServiceAgentImpl::ActivateWatch(
std::string_view key, const std::map<std::string, std::string>& kvs) {
return MakeCoordinationError(absl::UnimplementedError(
"CoordinationServiceAgent::ActivateWatch is not implemented."));
}
absl::Status CoordinationServiceAgentImpl::WaitAtBarrier(
std::string_view barrier_id, absl::Duration timeout,
const std::vector<CoordinatedTask>& tasks) {
absl::Status status;
absl::Notification n;
WaitAtBarrierAsync(barrier_id, timeout, tasks, [&](absl::Status s) {
status = s;
n.Notify();
});
n.WaitForNotification();
return status;
}
void CoordinationServiceAgentImpl::WaitAtBarrierAsync(
std::string_view barrier_id, absl::Duration timeout,
const std::vector<CoordinatedTask>& tasks, StatusCallback done) {
absl::Status agent_running_status =
ValidateRunningAgent(true);
if (!agent_running_status.ok()) {
done(agent_running_status);
return;
}
{
absl::MutexLock l(&state_mu_);
auto [it, inserted] = used_barrier_ids_.insert(std::string(barrier_id));
if (!inserted) {
done(absl::FailedPreconditionError(absl::StrCat(
"WaitAtBarrier() should not be called with the same id more than "
"once. Barrier id: ",
barrier_id)));
return;
}
}
auto request = std::make_shared<BarrierRequest>();
auto response = std::make_shared<BarrierResponse>();
request->set_barrier_id(std::string(barrier_id));
request->set_barrier_timeout_in_ms(timeout / absl::Milliseconds(1));
*request->mutable_source_task() = task_;
*request->mutable_tasks() = {tasks.begin(), tasks.end()};
VLOG(3) << "WaitAtBarrierRequest: " << request->DebugString();
leader_client_->BarrierAsync(
request.get(), response.get(),
[request, response, done = std::move(done)](const absl::Status& s) {
auto status = TrimCoordinationErrorMessage(s);
done(status);
VLOG(3) << "WaitAtBarrierResponse: " << status;
});
}
absl::Status CoordinationServiceAgentImpl::CancelBarrier(
std::string_view barrier_id) {
absl::Status status;
absl::Notification n;
CancelBarrierAsync(barrier_id, [&](const absl::Status& s) {
status = s;
n.Notify();
});
n.WaitForNotification();
return status;
}
void CoordinationServiceAgentImpl::CancelBarrierAsync(
std::string_view barrier_id, StatusCallback done) {
absl::Status agent_running_status =
ValidateRunningAgent(true);
if (!agent_running_status.ok()) {
done(agent_running_status);
return;
}
auto request = std::make_shared<CancelBarrierRequest>();
auto response = std::make_shared<CancelBarrierResponse>();
request->set_barrier_id(std::string(barrier_id));
*request->mutable_source_task() = task_;
VLOG(3) << "CancelBarrierRequest: " << request->DebugString();
leader_client_->CancelBarrierAsync(
request.get(), response.get(),
[request, response, done = std::move(done)](const absl::Status& s) {
done(s);
VLOG(3) << "CancelBarrierResponse: " << s;
});
}
absl::Status CoordinationServiceAgentImpl::ValidateRunningAgent(
bool allow_disconnected) {
absl::MutexLock l(&state_mu_);
switch (state_) {
case CoordinatedTaskState::TASKSTATE_CONNECTED:
return absl::OkStatus();
case CoordinatedTaskState::TASKSTATE_UNINITIALIZED:
return MakeCoordinationError(absl::FailedPreconditionError(
"Agent must be in CONNECTED state. It is currently UNINITIALIZED."));
case CoordinatedTaskState::TASKSTATE_DISCONNECTED:
if (allow_disconnected) return absl::OkStatus();
return MakeCoordinationError(absl::FailedPreconditionError(
"Agent must be in CONNECTED state. It is currently DISCONNECTED."));
case CoordinatedTaskState::TASKSTATE_ERROR:
return MakeCoordinationError(absl::FailedPreconditionError(
"Agent must be in CONNECTED state. It is currently in ERROR."));
default:
return MakeCoordinationError(absl::FailedPreconditionError(absl::StrCat(
"Agent is not in CONNECTED state. Current state: ", state_)));
}
}
absl::StatusOr<Env*> CoordinationServiceAgentImpl::GetEnv() {
if (!IsInitialized()) {
return MakeCoordinationError(absl::FailedPreconditionError(
"Coordination service agent has not been initialized."));
}
if (env_ == nullptr) {
return MakeCoordinationError(
absl::FailedPreconditionError("Coordination service agent was not "
"initialized with a valid Env* object."));
}
return env_;
}
}
std::unique_ptr<CoordinationServiceAgent> CreateCoordinationServiceAgent() {
return std::make_unique<CoordinationServiceAgentImpl>();
}
} | #include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_client.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/protobuf/coordination_config.pb.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
using tensorflow::CoordinatedTask;
using tensorflow::CoordinationServiceConfig;
using tensorflow::KeyValueEntry;
using ::testing::_;
using ::testing::DoAll;
using ::testing::InvokeArgument;
using ::testing::SetArgPointee;
using ::testing::UnorderedPointwise;
using ::testing::WithArgs;
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.DebugString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p,
::testing::MatchResultListener*) const {
return p.DebugString() == expected_;
}
void DescribeTo(std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
MATCHER(KvEq, "simple KeyValueEntry matcher") {
const KeyValueEntry& kv0 = std::get<0>(arg);
const KeyValueEntry& kv1 = std::get<1>(arg);
return kv0.key() == kv1.key() && kv0.value() == kv1.value();
}
KeyValueEntry CreateKv(const std::string& key, const std::string& value) {
KeyValueEntry kv;
kv.set_key(key);
kv.set_value(value);
return kv;
}
class TestCoordinationClient : public CoordinationClient {
public:
TestCoordinationClient() = default;
MOCK_METHOD(void, GetKeyValueAsync,
(CallOptions * call_opts, const GetKeyValueRequest*,
GetKeyValueResponse*, StatusCallback),
(override));
MOCK_METHOD(void, TryGetKeyValueAsync,
(const TryGetKeyValueRequest*, TryGetKeyValueResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, GetKeyValueDirAsync,
(const GetKeyValueDirRequest*, GetKeyValueDirResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, InsertKeyValueAsync,
(const InsertKeyValueRequest*, InsertKeyValueResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, DeleteKeyValueAsync,
(const DeleteKeyValueRequest*, DeleteKeyValueResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, RegisterTaskAsync,
(CallOptions*, const RegisterTaskRequest*, RegisterTaskResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, ShutdownTaskAsync,
(CallOptions*, const ShutdownTaskRequest*, ShutdownTaskResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, ResetTaskAsync,
(const ResetTaskRequest*, ResetTaskResponse*, StatusCallback),
(override));
MOCK_METHOD(void, ReportErrorToServiceAsync,
(const ReportErrorToServiceRequest*,
ReportErrorToServiceResponse*, StatusCallback),
(override));
MOCK_METHOD(void, BarrierAsync,
(const BarrierRequest*, BarrierResponse*, StatusCallback),
(override));
MOCK_METHOD(void, GetTaskStateAsync,
(const GetTaskStateRequest*, GetTaskStateResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, HeartbeatAsync,
(CallOptions*, const HeartbeatRequest*, HeartbeatResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, PollForErrorAsync,
(CallOptions * call_opts, const PollForErrorRequest*,
PollForErrorResponse*, StatusCallback),
(override));
#define UNIMPLEMENTED(method) \
void method##Async(const method##Request* request, \
method##Response* response, StatusCallback done) \
override { \
done(absl::UnimplementedError(#method "Async")); \
}
UNIMPLEMENTED(WaitForAllTasks);
UNIMPLEMENTED(CancelBarrier);
#undef UNIMPLEMENTED
void ReportErrorToTaskAsync(CallOptions* call_opts,
const ReportErrorToTaskRequest* request,
ReportErrorToTaskResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("ReportErrorToTaskAsync"));
}
};
class CoordinationServiceAgentTest : public ::testing::Test {
public:
void SetUp() override {
ON_CALL(*client_, RegisterTaskAsync(_, _, _, _))
.WillByDefault(InvokeArgument<3>(absl::OkStatus()));
ON_CALL(*client_, HeartbeatAsync(_, _, _, _))
.WillByDefault(InvokeArgument<3>(absl::OkStatus()));
ON_CALL(*client_, ShutdownTaskAsync(_, _, _, _))
.WillByDefault(InvokeArgument<3>(absl::OkStatus()));
ON_CALL(*client_, ReportErrorToServiceAsync(_, _, _))
.WillByDefault(InvokeArgument<2>(absl::OkStatus()));
ON_CALL(*client_, ResetTaskAsync(_, _, _))
.WillByDefault(InvokeArgument<2>(absl::OkStatus()));
ON_CALL(*client_, BarrierAsync(_, _, _))
.WillByDefault(InvokeArgument<2>(absl::OkStatus()));
ON_CALL(*client_, GetTaskStateAsync(_, _, _))
.WillByDefault(InvokeArgument<2>(absl::OkStatus()));
}
void InitializeAgent(CoordinationServiceConfig config = {}) {
config.set_service_leader("test_leader");
TF_ASSERT_OK(agent_->Initialize(
Env::Default(), "test_job",
0, config, std::move(client_),
[](absl::Status s) {
LOG(ERROR) << "Coordination agent is set to error: " << s;
}));
}
TestCoordinationClient* GetClient() {
CHECK(client_ != nullptr)
<< "GetClient() was called after InitializeAgent()";
return client_.get();
}
protected:
std::unique_ptr<CoordinationServiceAgent> agent_ =
CreateCoordinationServiceAgent();
std::unique_ptr<TestCoordinationClient> client_ =
std::make_unique<TestCoordinationClient>();
};
TEST_F(CoordinationServiceAgentTest, GetKeyValue_Simple_Success) {
const std::string& test_key = "test_key";
const std::string& test_value = "test_value";
GetKeyValueResponse mocked_response;
auto kv = mocked_response.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key);
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
TEST_F(CoordinationServiceAgentTest, GetKeyValue_WithTimeout_Success) {
const std::string& test_key = "test_key";
const std::string& test_value = "test_value";
GetKeyValueResponse mocked_response;
auto kv = mocked_response.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key, absl::Seconds(10));
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
TEST_F(CoordinationServiceAgentTest, GetKeyValue_Timeout_ReturnError) {
const std::string& test_key = "test_key";
StatusCallback owned_done;
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(WithArgs<3>([&](StatusCallback done) {
owned_done = done;
}));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key, absl::Seconds(1));
EXPECT_TRUE(absl::IsDeadlineExceeded(result.status()));
owned_done(absl::CancelledError("error"));
}
TEST_F(CoordinationServiceAgentTest,
GetKeyValue_DelayedResponse_TimeoutWithoutMemoryError) {
const std::string& test_key = "test_key";
const std::string& test_value = "test_value";
auto client = std::make_unique<TestCoordinationClient>();
GetKeyValueResponse* owned_response;
StatusCallback owned_done;
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(WithArgs<2, 3>(
[&](GetKeyValueResponse* response, StatusCallback done) {
owned_response = response;
owned_done = done;
}));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key, absl::Seconds(1));
EXPECT_TRUE(absl::IsDeadlineExceeded(result.status()));
auto kv = owned_response->mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
owned_done(absl::OkStatus());
}
TEST_F(CoordinationServiceAgentTest,
GetKeyValue_DelayedResponseBeforeTimeout_Success) {
const std::string& test_key = "test_key";
const std::string& test_value = "test_value";
auto client = std::make_unique<TestCoordinationClient>();
std::unique_ptr<Thread> async_thread;
GetKeyValueResponse* owned_response;
StatusCallback owned_done;
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(WithArgs<2, 3>(
[&](GetKeyValueResponse* response, StatusCallback done) {
owned_response = response;
owned_done = done;
async_thread = absl::WrapUnique(Env::Default()->StartThread(
ThreadOptions(), "async_thread", [&]() {
absl::SleepFor(absl::Seconds(5));
auto kv = owned_response->mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
owned_done(absl::OkStatus());
}));
}));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key, absl::Seconds(10));
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
TEST_F(CoordinationServiceAgentTest, CancelGetKeyValue_Success) {
const std::string test_key = "test_key";
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(
WithArgs<0, 3>([](CallOptions* call_opts, StatusCallback done) {
call_opts->SetCancelCallback([callback = std::move(done)]() {
callback(absl::CancelledError("RPC call cancelled."));
});
}));
InitializeAgent();
absl::Status status;
std::shared_ptr<CallOptions> get_kv_call_opts = agent_->GetKeyValueAsync(
test_key, [&status](const absl::StatusOr<std::string>& result) {
status = result.status();
});
get_kv_call_opts->StartCancel();
EXPECT_TRUE(absl::IsCancelled(status)) << status;
get_kv_call_opts->ClearCancelCallback();
}
TEST_F(CoordinationServiceAgentTest, TryGetKeyValue_Simple_Success) {
const std::string& test_key = "test_key";
const std::string& test_value = "test_value";
TryGetKeyValueResponse mocked_response;
auto kv = mocked_response.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
ON_CALL(*GetClient(), TryGetKeyValueAsync(_, _, _))
.WillByDefault(DoAll(SetArgPointee<1>(mocked_response),
InvokeArgument<2>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->TryGetKeyValue(test_key);
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
TEST_F(CoordinationServiceAgentTest, GetKeyValueDir_Simple_Success) {
const std::string test_key = "test_key_dir";
std::vector<KeyValueEntry> test_values;
test_values.push_back(CreateKv("test_key_dir/task_0", "0"));
test_values.push_back(CreateKv("test_key_dir/task_1", "1"));
GetKeyValueDirResponse mocked_response;
mocked_response.set_directory_key(test_key);
*mocked_response.mutable_kv() = {test_values.begin(), test_values.end()};
ON_CALL(*GetClient(), GetKeyValueDirAsync(_, _, _))
.WillByDefault(DoAll(SetArgPointee<1>(mocked_response),
InvokeArgument<2>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->GetKeyValueDir(test_key);
TF_ASSERT_OK(result.status());
EXPECT_THAT(*result, UnorderedPointwise(KvEq(), test_values));
}
TEST_F(CoordinationServiceAgentTest, ShutdownInErrorShouldReturnError) {
InitializeAgent();
TF_ASSERT_OK(agent_->Connect());
TF_ASSERT_OK(agent_->ReportError(absl::InternalError("Test Error.")));
absl::Status s = agent_->Shutdown();
EXPECT_TRUE(absl::IsFailedPrecondition(s));
}
TEST_F(CoordinationServiceAgentTest, Reset_ConnectedButNotInError_Fail) {
InitializeAgent();
TF_ASSERT_OK(agent_->Connect());
auto status = agent_->Reset();
EXPECT_TRUE(absl::IsFailedPrecondition(status));
}
TEST_F(CoordinationServiceAgentTest, ConnectAfterResetError) {
InitializeAgent();
TF_ASSERT_OK(agent_->Connect());
TF_ASSERT_OK(agent_->ReportError(absl::InternalError("Test Error.")));
TF_ASSERT_OK(agent_->Reset());
TF_EXPECT_OK(agent_->Connect());
}
TEST_F(CoordinationServiceAgentTest, ConnectAfterReset_WithErrorPolling) {
PollForErrorResponse mocked_response;
EXPECT_CALL(*GetClient(), PollForErrorAsync(_, _, _, _))
.WillOnce(DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::UnavailableError("Test Error."))))
.WillOnce(DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::InternalError("Test Error."))));
CoordinationServiceConfig config;
config.set_poll_for_error_from_service_at_startup(true);
InitializeAgent(config);
TF_ASSERT_OK(agent_->Connect());
absl::SleepFor(absl::Seconds(2));
ASSERT_TRUE(agent_->IsError());
TF_ASSERT_OK(agent_->Reset());
TF_EXPECT_OK(agent_->Connect());
absl::SleepFor(absl::Seconds(2));
EXPECT_TRUE(agent_->IsError());
}
TEST_F(CoordinationServiceAgentTest, CancelledPollForErrorRequest) {
PollForErrorResponse mocked_response;
EXPECT_CALL(*GetClient(), PollForErrorAsync(_, _, _, _))
.WillOnce(DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::CancelledError("Test Error."))));
CoordinationServiceConfig config;
config.set_poll_for_error_from_service_at_startup(true);
InitializeAgent(config);
TF_ASSERT_OK(agent_->Connect());
absl::SleepFor(absl::Seconds(2));
ASSERT_FALSE(agent_->IsError());
}
TEST_F(CoordinationServiceAgentTest, InvalidPollForErrorRequest) {
PollForErrorResponse mocked_response;
EXPECT_CALL(*GetClient(), PollForErrorAsync(_, _, _, _))
.WillOnce(
DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::InvalidArgumentError("Test Error."))));
CoordinationServiceConfig config;
config.set_poll_for_error_from_service_at_startup(true);
InitializeAgent(config);
TF_ASSERT_OK(agent_->Connect());
absl::SleepFor(absl::Seconds(2));
ASSERT_TRUE(agent_->IsError());
}
TEST_F(CoordinationServiceAgentTest,
PollForErrorRequestWithFailedPrecondition) {
PollForErrorResponse mocked_response;
EXPECT_CALL(*GetClient(), PollForErrorAsync(_, _, _, _))
.WillOnce(DoAll(
SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::FailedPreconditionError("Test Error."))));
CoordinationServiceConfig config;
config.set_poll_for_error_from_service_at_startup(true);
InitializeAgent(config);
TF_ASSERT_OK(agent_->Connect());
absl::SleepFor(absl::Seconds(2));
ASSERT_TRUE(agent_->IsError());
}
TEST_F(CoordinationServiceAgentTest, ResetCanBeRetried) {
EXPECT_CALL(*GetClient(), ResetTaskAsync(_, _, _))
.WillOnce(InvokeArgument<2>(absl::InternalError("Reset error")))
.WillOnce(InvokeArgument<2>(absl::OkStatus()));
InitializeAgent();
TF_ASSERT_OK(agent_->Connect());
TF_ASSERT_OK(agent_->ReportError(absl::InternalError("Test Error.")));
absl::Status reset_status = agent_->Reset();
EXPECT_TRUE(absl::IsInternal(reset_status));
TF_ASSERT_OK(agent_->Reset());
TF_EXPECT_OK(agent_->Connect());
}
TEST_F(CoordinationServiceAgentTest, GetOwnTask) {
InitializeAgent();
auto result = agent_->GetOwnTask();
TF_ASSERT_OK(result.status());
CoordinatedTask actual_task = *result;
CoordinatedTask expected_task;
expected_task.set_job_name("test_job");
expected_task.set_task_id(0);
EXPECT_EQ(actual_task.job_name(), expected_task.job_name());
EXPECT_EQ(actual_task.task_id(), expected_task.task_id());
}
TEST_F(CoordinationServiceAgentTest, GetOwnTask_Uninitialized) {
auto result = agent_->GetOwnTask();
EXPECT_TRUE(absl::IsFailedPrecondition(result.status()));
}
TEST_F(CoordinationServiceAgentTest, WaitAtBarrier_SameIdUsedTwice_Fails) {
InitializeAgent();
const std::string barrier_id = "only_use_once";
TF_ASSERT_OK(agent_->Connect());
TF_ASSERT_OK(
agent_->WaitAtBarrier(barrier_id, absl::Seconds(1), {}));
auto result =
agent_->WaitAtBarrier(barrier_id, absl::Seconds(1), {});
EXPECT_TRUE(absl::IsFailedPrecondition(result));
}
TEST_F(CoordinationServiceAgentTest, GetEnv_SucceedsAfterInit) {
EXPECT_TRUE(absl::IsFailedPrecondition(agent_->GetEnv().status()));
InitializeAgent();
absl::StatusOr<Env*> result = agent_->GetEnv();
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, Env::Default());
}
TEST_F(CoordinationServiceAgentTest, Connect_AbortedErrorShouldBeRetried) {
EXPECT_CALL(*GetClient(), RegisterTaskAsync(_, _, _, _))
.WillOnce(
InvokeArgument<3>(absl::AbortedError("DuplicateTaskRegistration")))
.WillOnce(
InvokeArgument<3>(absl::AbortedError("DuplicateTaskRegistration")))
.WillOnce(InvokeArgument<3>(absl::OkStatus()));
InitializeAgent();
TF_EXPECT_OK(agent_->Connect());
}
TEST_F(CoordinationServiceAgentTest, Connect_AbortedErrorShouldFailEventually) {
EXPECT_CALL(*GetClient(), RegisterTaskAsync(_, _, _, _))
.WillRepeatedly(
InvokeArgument<3>(absl::AbortedError("DuplicateTaskRegistration")));
CoordinationServiceConfig config;
config.set_cluster_register_timeout_in_ms(
absl::ToInt64Milliseconds(absl::Seconds(3)));
InitializeAgent(config);
absl::Status s = agent_->Connect();
EXPECT_TRUE(absl::IsAborted(s));
}
TEST_F(CoordinationServiceAgentTest, Connect_InternalErrorShouldBeRetried) {
EXPECT_CALL(*GetClient(), RegisterTaskAsync(_, _, _, _))
.WillOnce(InvokeArgument<3>(
absl::InternalError("Coordination service is not enabled.")))
.WillOnce(InvokeArgument<3>(
absl::InternalError("Coordination service is not enabled.")))
.WillOnce(InvokeArgument<3>(absl::OkStatus()));
InitializeAgent();
TF_EXPECT_OK(agent_->Connect());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7293ea99-22f7-461c-95a5-f309d55d0574 | cpp | abseil/abseil-cpp | time_zone | absl/time/internal/cctz/include/cctz/time_zone.h | absl/time/time_zone_test.cc | #ifndef ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_H_
#define ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_H_
#include <chrono>
#include <cstdint>
#include <limits>
#include <ratio>
#include <string>
#include <utility>
#include "absl/base/config.h"
#include "absl/time/internal/cctz/include/cctz/civil_time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace time_internal {
namespace cctz {
template <typename D>
using time_point = std::chrono::time_point<std::chrono::system_clock, D>;
using seconds = std::chrono::duration<std::int_fast64_t>;
using sys_seconds = seconds;
namespace detail {
template <typename D>
std::pair<time_point<seconds>, D> split_seconds(const time_point<D>& tp);
std::pair<time_point<seconds>, seconds> split_seconds(
const time_point<seconds>& tp);
}
class time_zone {
public:
time_zone() : time_zone(nullptr) {}
time_zone(const time_zone&) = default;
time_zone& operator=(const time_zone&) = default;
std::string name() const;
struct absolute_lookup {
civil_second cs;
int offset;
bool is_dst;
const char* abbr;
};
absolute_lookup lookup(const time_point<seconds>& tp) const;
template <typename D>
absolute_lookup lookup(const time_point<D>& tp) const {
return lookup(detail::split_seconds(tp).first);
}
struct civil_lookup {
enum civil_kind {
UNIQUE,
SKIPPED,
REPEATED,
} kind;
time_point<seconds> pre;
time_point<seconds> trans;
time_point<seconds> post;
};
civil_lookup lookup(const civil_second& cs) const;
struct civil_transition {
civil_second from;
civil_second to;
};
bool next_transition(const time_point<seconds>& tp,
civil_transition* trans) const;
template <typename D>
bool next_transition(const time_point<D>& tp, civil_transition* trans) const {
return next_transition(detail::split_seconds(tp).first, trans);
}
bool prev_transition(const time_point<seconds>& tp,
civil_transition* trans) const;
template <typename D>
bool prev_transition(const time_point<D>& tp, civil_transition* trans) const {
return prev_transition(detail::split_seconds(tp).first, trans);
}
std::string version() const;
std::string description() const;
friend bool operator==(time_zone lhs, time_zone rhs) {
return &lhs.effective_impl() == &rhs.effective_impl();
}
friend bool operator!=(time_zone lhs, time_zone rhs) { return !(lhs == rhs); }
template <typename H>
friend H AbslHashValue(H h, time_zone tz) {
return H::combine(std::move(h), &tz.effective_impl());
}
class Impl;
private:
explicit time_zone(const Impl* impl) : impl_(impl) {}
const Impl& effective_impl() const;
const Impl* impl_;
};
bool load_time_zone(const std::string& name, time_zone* tz);
time_zone utc_time_zone();
time_zone fixed_time_zone(const seconds& offset);
time_zone local_time_zone();
template <typename D>
inline civil_second convert(const time_point<D>& tp, const time_zone& tz) {
return tz.lookup(tp).cs;
}
inline time_point<seconds> convert(const civil_second& cs,
const time_zone& tz) {
const time_zone::civil_lookup cl = tz.lookup(cs);
if (cl.kind == time_zone::civil_lookup::SKIPPED) return cl.trans;
return cl.pre;
}
namespace detail {
using femtoseconds = std::chrono::duration<std::int_fast64_t, std::femto>;
std::string format(const std::string&, const time_point<seconds>&,
const femtoseconds&, const time_zone&);
bool parse(const std::string&, const std::string&, const time_zone&,
time_point<seconds>*, femtoseconds*, std::string* err = nullptr);
template <typename Rep, std::intmax_t Denom>
bool join_seconds(
const time_point<seconds>& sec, const femtoseconds& fs,
time_point<std::chrono::duration<Rep, std::ratio<1, Denom>>>* tpp);
template <typename Rep, std::intmax_t Num>
bool join_seconds(
const time_point<seconds>& sec, const femtoseconds& fs,
time_point<std::chrono::duration<Rep, std::ratio<Num, 1>>>* tpp);
template <typename Rep>
bool join_seconds(
const time_point<seconds>& sec, const femtoseconds& fs,
time_point<std::chrono::duration<Rep, std::ratio<1, 1>>>* tpp);
bool join_seconds(const time_point<seconds>& sec, const femtoseconds&,
time_point<seconds>* tpp);
}
template <typename D>
inline std::string format(const std::string& fmt, const time_point<D>& tp,
const time_zone& tz) {
const auto p = detail::split_seconds(tp);
const auto n = std::chrono::duration_cast<detail::femtoseconds>(p.second);
return detail::format(fmt, p.first, n, tz);
}
template <typename D>
inline bool parse(const std::string& fmt, const std::string& input,
const time_zone& tz, time_point<D>* tpp) {
time_point<seconds> sec;
detail::femtoseconds fs;
return detail::parse(fmt, input, tz, &sec, &fs) &&
detail::join_seconds(sec, fs, tpp);
}
namespace detail {
template <typename D>
std::pair<time_point<seconds>, D> split_seconds(const time_point<D>& tp) {
auto sec = std::chrono::time_point_cast<seconds>(tp);
auto sub = tp - sec;
if (sub.count() < 0) {
sec -= seconds(1);
sub += seconds(1);
}
return {sec, std::chrono::duration_cast<D>(sub)};
}
inline std::pair<time_point<seconds>, seconds> split_seconds(
const time_point<seconds>& tp) {
return {tp, seconds::zero()};
}
template <typename Rep, std::intmax_t Denom>
bool join_seconds(
const time_point<seconds>& sec, const femtoseconds& fs,
time_point<std::chrono::duration<Rep, std::ratio<1, Denom>>>* tpp) {
using D = std::chrono::duration<Rep, std::ratio<1, Denom>>;
*tpp = std::chrono::time_point_cast<D>(sec);
*tpp += std::chrono::duration_cast<D>(fs);
return true;
}
template <typename Rep, std::intmax_t Num>
bool join_seconds(
const time_point<seconds>& sec, const femtoseconds&,
time_point<std::chrono::duration<Rep, std::ratio<Num, 1>>>* tpp) {
using D = std::chrono::duration<Rep, std::ratio<Num, 1>>;
auto count = sec.time_since_epoch().count();
if (count >= 0 || count % Num == 0) {
count /= Num;
} else {
count /= Num;
count -= 1;
}
if (count > (std::numeric_limits<Rep>::max)()) return false;
if (count < (std::numeric_limits<Rep>::min)()) return false;
*tpp = time_point<D>() + D{static_cast<Rep>(count)};
return true;
}
template <typename Rep>
bool join_seconds(
const time_point<seconds>& sec, const femtoseconds&,
time_point<std::chrono::duration<Rep, std::ratio<1, 1>>>* tpp) {
using D = std::chrono::duration<Rep, std::ratio<1, 1>>;
auto count = sec.time_since_epoch().count();
if (count > (std::numeric_limits<Rep>::max)()) return false;
if (count < (std::numeric_limits<Rep>::min)()) return false;
*tpp = time_point<D>() + D{static_cast<Rep>(count)};
return true;
}
inline bool join_seconds(const time_point<seconds>& sec, const femtoseconds&,
time_point<seconds>* tpp) {
*tpp = sec;
return true;
}
}
}
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/time/internal/cctz/include/cctz/time_zone.h"
#include "gtest/gtest.h"
#include "absl/time/internal/test_util.h"
#include "absl/time/time.h"
namespace cctz = absl::time_internal::cctz;
namespace {
TEST(TimeZone, ValueSemantics) {
absl::TimeZone tz;
absl::TimeZone tz2 = tz;
EXPECT_EQ(tz, tz2);
tz2 = tz;
EXPECT_EQ(tz, tz2);
}
TEST(TimeZone, Equality) {
absl::TimeZone a, b;
EXPECT_EQ(a, b);
EXPECT_EQ(a.name(), b.name());
absl::TimeZone implicit_utc;
absl::TimeZone explicit_utc = absl::UTCTimeZone();
EXPECT_EQ(implicit_utc, explicit_utc);
EXPECT_EQ(implicit_utc.name(), explicit_utc.name());
absl::TimeZone la = absl::time_internal::LoadTimeZone("America/Los_Angeles");
absl::TimeZone nyc = absl::time_internal::LoadTimeZone("America/New_York");
EXPECT_NE(la, nyc);
}
TEST(TimeZone, CCTZConversion) {
const cctz::time_zone cz = cctz::utc_time_zone();
const absl::TimeZone tz(cz);
EXPECT_EQ(cz, cctz::time_zone(tz));
}
TEST(TimeZone, DefaultTimeZones) {
absl::TimeZone tz;
EXPECT_EQ("UTC", absl::TimeZone().name());
EXPECT_EQ("UTC", absl::UTCTimeZone().name());
}
TEST(TimeZone, FixedTimeZone) {
const absl::TimeZone tz = absl::FixedTimeZone(123);
const cctz::time_zone cz = cctz::fixed_time_zone(cctz::seconds(123));
EXPECT_EQ(tz, absl::TimeZone(cz));
}
TEST(TimeZone, LocalTimeZone) {
const absl::TimeZone local_tz = absl::LocalTimeZone();
absl::TimeZone tz = absl::time_internal::LoadTimeZone("localtime");
EXPECT_EQ(tz, local_tz);
}
TEST(TimeZone, NamedTimeZones) {
absl::TimeZone nyc = absl::time_internal::LoadTimeZone("America/New_York");
EXPECT_EQ("America/New_York", nyc.name());
absl::TimeZone syd = absl::time_internal::LoadTimeZone("Australia/Sydney");
EXPECT_EQ("Australia/Sydney", syd.name());
absl::TimeZone fixed = absl::FixedTimeZone((((3 * 60) + 25) * 60) + 45);
EXPECT_EQ("Fixed/UTC+03:25:45", fixed.name());
}
TEST(TimeZone, Failures) {
absl::TimeZone tz = absl::time_internal::LoadTimeZone("America/Los_Angeles");
EXPECT_FALSE(LoadTimeZone("Invalid/TimeZone", &tz));
EXPECT_EQ(absl::UTCTimeZone(), tz);
tz = absl::time_internal::LoadTimeZone("America/Los_Angeles");
EXPECT_FALSE(LoadTimeZone("Invalid/TimeZone", &tz));
EXPECT_EQ(absl::UTCTimeZone(), tz);
tz = absl::time_internal::LoadTimeZone("America/Los_Angeles");
EXPECT_FALSE(LoadTimeZone("", &tz));
EXPECT_EQ(absl::UTCTimeZone(), tz);
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/time/internal/cctz/include/cctz/time_zone.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/time/time_zone_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
5253ef1e-f139-4a56-b343-b128d7405c88 | cpp | tensorflow/tensorflow | fingerprint | tensorflow/core/platform/fingerprint.h | third_party/xla/third_party/tsl/tsl/platform/fingerprint_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_FINGERPRINT_H_
#define TENSORFLOW_CORE_PLATFORM_FINGERPRINT_H_
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/fingerprint.h"
namespace tensorflow {
using Fprint128 = tsl::Fprint128;
using Fprint128Hasher = tsl::Fprint128Hasher;
using tsl::Fingerprint128;
using tsl::Fingerprint32;
using tsl::Fingerprint64;
using tsl::FingerprintCat64;
}
#endif | #include "tsl/platform/fingerprint.h"
#include <unordered_set>
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace {
TEST(Fingerprint64, IsForeverFrozen) {
EXPECT_EQ(15404698994557526151ULL, Fingerprint64("Hello"));
EXPECT_EQ(18308117990299812472ULL, Fingerprint64("World"));
}
TEST(Fingerprint128, IsForeverFrozen) {
{
const Fprint128 fingerprint = Fingerprint128("Hello");
EXPECT_EQ(1163506517679092766ULL, fingerprint.low64);
EXPECT_EQ(10829806600034513965ULL, fingerprint.high64);
}
{
const Fprint128 fingerprint = Fingerprint128("World");
EXPECT_EQ(14404540403896557767ULL, fingerprint.low64);
EXPECT_EQ(4859093245152058524ULL, fingerprint.high64);
}
}
TEST(Fingerprint128, Fprint128Hasher) {
const std::unordered_set<Fprint128, Fprint128Hasher> map = {{1, 2}, {3, 4}};
}
TEST(FingerprintCat64, IsForeverFrozen) {
EXPECT_EQ(16877292868973613377ULL,
FingerprintCat64(Fingerprint64("Hello"), Fingerprint64("World")));
EXPECT_EQ(7158413233176775252ULL,
FingerprintCat64(Fingerprint64("World"), Fingerprint64("Hello")));
}
TEST(FingerprintCat64, Idempotence) {
const uint64_t orig =
FingerprintCat64(Fingerprint64("Hello"), Fingerprint64("World"));
EXPECT_EQ(orig,
FingerprintCat64(Fingerprint64("Hello"), Fingerprint64("World")));
EXPECT_NE(FingerprintCat64(Fingerprint64("Hello"), Fingerprint64("Hi")),
FingerprintCat64(Fingerprint64("Hello"), Fingerprint64("World")));
EXPECT_EQ(orig,
FingerprintCat64(Fingerprint64("Hello"), Fingerprint64("World")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/fingerprint.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/fingerprint_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
48af6870-55d0-4586-aed0-fbd18837805e | cpp | tensorflow/tensorflow | despecializer | third_party/xla/xla/service/despecializer.cc | third_party/xla/xla/service/despecializer_test.cc | #include "xla/service/despecializer.h"
#include <iterator>
#include <utility>
#include <vector>
#include "xla/service/defuser.h"
#include "xla/service/float_normalization.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/sub_byte_normalization.h"
namespace xla {
Despecializer::Despecializer() : pipeline_("despecializer") {
pipeline_.AddPass<HloDescheduler>();
pipeline_.AddPass<ControlDepRemover>();
pipeline_.AddPass<Defuser>();
pipeline_.AddPass<BFloat16MixedPrecisionRemoval>();
pipeline_.AddPass<SubByteNormalization>(
SubByteNormalization::REMOVE_ELEMENT_SIZE);
}
void Despecializer::AddAssumeGatherIndicesInBoundRewriteToCopy() {
pipeline_.AddPass<AssumeGatherIndicesInBoundRewriteToCopy>();
}
void Despecializer::AddReduceWindowToReduceBroadcastDeconstruct() {
pipeline_.AddPass<DeconstructReduceWindowToReduceBroadcast>();
}
absl::StatusOr<bool> Despecializer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return pipeline_.Run(module, execution_threads);
}
absl::StatusOr<bool> AssumeGatherIndicesInBoundRewriteToCopy::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::vector<HloInstruction*> candidates;
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->IsCustomCall("AssumeGatherIndicesInBound")) {
candidates.push_back(instruction);
}
}
}
for (HloInstruction* gather_indices : candidates) {
auto computation = gather_indices->parent();
auto copy = computation->AddInstruction(
HloInstruction::CreateUnary(gather_indices->shape(), HloOpcode::kCopy,
gather_indices->mutable_operand(0)));
TF_CHECK_OK(computation->ReplaceInstruction(gather_indices, copy));
}
return !candidates.empty();
}
absl::StatusOr<bool> DeconstructReduceWindowToReduceBroadcast::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::vector<std::pair<HloInstruction*, int64_t>> candidate_rw;
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kReduceWindow) {
continue;
}
auto* reduce_window = CastOrNull<HloReduceWindowInstruction>(instruction);
if (reduce_window == nullptr) {
continue;
}
if (reduce_window->operand(0)->shape() != reduce_window->shape()) {
continue;
}
const Window& window = reduce_window->window();
int64_t num_stride_dilations = absl::c_count_if(
window.dimensions(), [](const WindowDimension& win_dim) {
return (
win_dim.stride() != 1 || win_dim.window_reversal() == true ||
win_dim.window_dilation() != 1 || win_dim.base_dilation() != 1);
});
if (num_stride_dilations != 0) {
continue;
}
int64_t num_dimensions_reduced = absl::c_count_if(
window.dimensions(),
[](const WindowDimension& win_dim) { return (win_dim.size() != 1); });
if (num_dimensions_reduced != 1) {
continue;
}
auto reduce_dim = absl::c_find_if(
window.dimensions(),
[](const WindowDimension& win_dim) { return (win_dim.size() != 1); });
if (reduce_dim == window.dimensions().end()) {
continue;
}
int64_t reduce_dim_index =
std::distance(window.dimensions().begin(), reduce_dim);
auto input_dim_size =
reduce_window->operand(0)->shape().dimensions(reduce_dim_index);
if (reduce_dim->size() != 2 * input_dim_size - 1) {
continue;
}
if (reduce_dim->padding_low() != input_dim_size - 1) {
continue;
}
if (reduce_dim->padding_high() != input_dim_size - 1) {
continue;
}
VLOG(2) << "Adding Candidate ReduceWindow:" << reduce_window->ToString();
candidate_rw.push_back(std::make_pair(reduce_window, reduce_dim_index));
}
}
for (const auto& rw : candidate_rw) {
auto reduce_window = rw.first;
auto reduce_dim_index = rw.second;
if (reduce_window == nullptr || reduce_dim_index < 0 ||
reduce_dim_index >= reduce_window->operand(0)->shape().rank()) {
continue;
}
std::vector<int64_t> reduce_instr_dimensions;
std::vector<int64_t> broadcast_dimensions;
const Window& window = reduce_window->window();
for (int64_t index = 0; index < window.dimensions().size(); ++index) {
const auto& window_dimension = window.dimensions(index);
if (window_dimension.size() == 1) {
reduce_instr_dimensions.push_back(
reduce_window->operand(0)->shape().dimensions(index));
broadcast_dimensions.push_back(index);
}
}
Shape reduce_shape = ShapeUtil::MakeShape(
reduce_window->shape().element_type(), reduce_instr_dimensions);
auto reduce_instr =
reduce_window->AddInstruction(HloInstruction::CreateReduce(
reduce_shape, reduce_window->mutable_operand(0),
reduce_window->mutable_operand(1), {reduce_dim_index},
reduce_window->called_computations()[0]));
auto broadcast_instr =
reduce_window->AddInstruction(HloInstruction::CreateBroadcast(
reduce_window->shape(), reduce_instr, broadcast_dimensions));
VLOG(2) << "reduce_window:" << reduce_window->ToString();
VLOG(2) << "reduce:" << reduce_instr->ToString();
VLOG(2) << "broadcast:" << broadcast_instr->ToString();
TF_CHECK_OK(reduce_window->parent()->ReplaceInstruction(reduce_window,
broadcast_instr));
changed = true;
}
return changed;
}
} | #include "xla/service/despecializer.h"
#include <string>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class DespecializerTest : public HloTestBase {
protected:
Despecializer despecializer_;
};
TEST_F(DespecializerTest, ValidRW1) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32,8,128]{3,2,1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.1 = bf16[32,32,8,128]{3,2,1,0} reduce-window(param_0.938,constant.381.clone.1), window={size=1x1x1x255 pad=0_0x0_0x0_0x127_127}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
auto bcast = m->entry_computation()->root_instruction();
auto reduce = bcast->operand(0);
EXPECT_TRUE(bcast != nullptr && reduce != nullptr);
EXPECT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_EQ(reduce->dimensions().size(), 1);
EXPECT_EQ(reduce->dimensions(0), 3);
EXPECT_EQ(bcast->dimensions().size(), 3);
EXPECT_EQ(bcast->dimensions()[0], 0);
EXPECT_EQ(bcast->dimensions()[1], 1);
EXPECT_EQ(bcast->dimensions()[2], 2);
}
TEST_F(DespecializerTest, ValidRW2) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32,8,128]{3,2,1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.1 = bf16[32,32,8,128]{3,2,1,0} reduce-window(param_0.938,constant.381.clone.1), window={size=1x1x15x1 pad=0_0x0_0x7_7x0_0}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
auto bcast = m->entry_computation()->root_instruction();
auto reduce = bcast->operand(0);
EXPECT_TRUE(bcast != nullptr && reduce != nullptr);
EXPECT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_EQ(reduce->dimensions().size(), 1);
EXPECT_EQ(reduce->dimensions(0), 2);
EXPECT_EQ(bcast->dimensions().size(), 3);
EXPECT_EQ(bcast->dimensions()[0], 0);
EXPECT_EQ(bcast->dimensions()[1], 1);
EXPECT_EQ(bcast->dimensions()[2], 3);
}
TEST_F(DespecializerTest, ValidRW3) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,128,32,8]{1,3,2,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.1 = bf16[32,128,32,8]{1,3,2,0} reduce-window(param_0.938,constant.381.clone.1), window={size=1x255x1x1 pad=0_0x127_127x0_0x0_0}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
auto bcast = m->entry_computation()->root_instruction();
auto reduce = bcast->operand(0);
EXPECT_TRUE(bcast != nullptr && reduce != nullptr);
EXPECT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_EQ(reduce->dimensions().size(), 1);
EXPECT_EQ(reduce->dimensions(0), 1);
EXPECT_EQ(bcast->dimensions().size(), 3);
EXPECT_EQ(bcast->dimensions()[0], 0);
EXPECT_EQ(bcast->dimensions()[1], 2);
EXPECT_EQ(bcast->dimensions()[2], 3);
}
TEST_F(DespecializerTest, ValidRW4) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[8,32,32,128]{3,0,1,2} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.1 = bf16[8,32,32,128]{3,0,1,2} reduce-window(param_0.938,constant.381.clone.1), window={size=15x1x1x1 pad=7_7x0_0x0_0x0_0}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
auto bcast = m->entry_computation()->root_instruction();
auto reduce = bcast->operand(0);
EXPECT_TRUE(bcast != nullptr && reduce != nullptr);
EXPECT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_EQ(reduce->dimensions().size(), 1);
EXPECT_EQ(reduce->dimensions(0), 0);
EXPECT_EQ(bcast->dimensions().size(), 3);
EXPECT_EQ(bcast->dimensions()[0], 1);
EXPECT_EQ(bcast->dimensions()[1], 2);
EXPECT_EQ(bcast->dimensions()[2], 3);
}
TEST_F(DespecializerTest, ValidRW5) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32,8,128]{3,2,1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.1 = bf16[32,32,8,128]{3,2,1,0} reduce-window(param_0.938,constant.381.clone.1), window={size=1x1x1x32 pad=0_0x0_0x0_0x0_31}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
EXPECT_EQ(m->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduceWindow);
}
TEST_F(DespecializerTest, ValidRW6) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32]{1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.2 = bf16[32,32]{1,0} reduce-window(param_0.938, constant.381.clone.1), window={size=63x1 pad=31_31x0_0}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
auto bcast = m->entry_computation()->root_instruction();
auto reduce = bcast->operand(0);
EXPECT_TRUE(bcast != nullptr && reduce != nullptr);
EXPECT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_EQ(reduce->dimensions().size(), 1);
EXPECT_EQ(reduce->dimensions(0), 0);
EXPECT_EQ(bcast->dimensions().size(), 1);
EXPECT_EQ(bcast->dimensions()[0], 1);
}
TEST_F(DespecializerTest, ValidRWMultiple) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32,8,128]{3,2,1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.1 = bf16[32,32,8,128]{3,2,1,0} reduce-window(param_0.938,constant.381.clone.1), window={size=63x1x1x255 pad=31_31x0_0x0_0x127_127}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
EXPECT_EQ(m->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduceWindow);
}
TEST_F(DespecializerTest, ValidRWStrideDilation) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32,8,128]{3,2,1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.2 = bf16[32,32,8,128]{3,2,1,0} reduce-window(param_0.938, constant.381.clone.1), window={size=1x1x1x255 pad=0_0x0_0x0_0x127_127 stride=2x1x1x1 lhs_dilate=2x1x1x1}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
EXPECT_EQ(m->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduceWindow);
}
TEST_F(DespecializerTest, ValidRWShape) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32,8,128]{3,2,1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.2 = bf16[32,32,2,128]{3,2,1,0} reduce-window(param_0.938, constant.381.clone.1), window={size=1x1x7x1 pad=0_0x0_0x0_0x0_0}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
EXPECT_EQ(m->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduceWindow);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/despecializer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/despecializer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3685acb6-6aa3-475c-a394-49e4602f0efb | cpp | tensorflow/tensorflow | base64 | third_party/xla/third_party/tsl/tsl/platform/base64.cc | tensorflow/core/lib/strings/base64_test.cc | #include "tsl/platform/base64.h"
#include <cstring>
#include <memory>
#include "absl/status/status.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace {
constexpr int8 kBase64Bytes[128] = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, 0x3E, -1, -1,
0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, -1, -1,
-1, -1, -1, -1, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12,
0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, -1, -1, -1, -1, 0x3F,
-1, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24,
0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
0x31, 0x32, 0x33, -1, -1, -1, -1, -1};
constexpr char kBase64UrlSafeChars[65] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
constexpr char kPadChar = '=';
inline uint32 Convert(char x) {
const int8_t y = kBase64Bytes[x & 0x7F] | (x & 0x80);
const int32_t z = static_cast<int32>(y);
return static_cast<uint32>(z);
}
absl::Status DecodeThreeChars(const char* codes, char* result) {
const uint32 packed = (Convert(codes[0]) << 18) | (Convert(codes[1]) << 12) |
(Convert(codes[2]) << 6) | (Convert(codes[3]));
if (TF_PREDICT_FALSE((packed & 0xFF000000) != 0)) {
return errors::InvalidArgument("Invalid character found in base64.");
}
result[0] = static_cast<char>(packed >> 16);
result[1] = static_cast<char>(packed >> 8);
result[2] = static_cast<char>(packed);
return absl::OkStatus();
}
}
template <typename T>
absl::Status Base64Decode(absl::string_view data, T* decoded) {
if (decoded == nullptr) {
return errors::Internal("'decoded' cannot be nullptr.");
}
if (data.empty()) {
decoded->clear();
return absl::OkStatus();
}
const size_t max_decoded_size = 3 * (data.size() / 4) + 3;
std::unique_ptr<char[]> buffer(new char[max_decoded_size]);
char* current = buffer.get();
if (current == nullptr) {
return errors::ResourceExhausted(
"Failed to allocate buffer for decoded string.");
}
const char* b64 = data.data();
const char* end = data.data() + data.size();
while (end - b64 > 4) {
TF_RETURN_IF_ERROR(DecodeThreeChars(b64, current));
b64 += 4;
current += 3;
}
if (end - b64 == 4) {
if (b64[2] == kPadChar && b64[3] == kPadChar) {
end -= 2;
}
if (b64[2] != kPadChar && b64[3] == kPadChar) {
end -= 1;
}
}
const int remain = static_cast<int>(end - b64);
if (TF_PREDICT_FALSE(remain == 1)) {
return errors::InvalidArgument(
"Base64 string length cannot be 1 modulo 4.");
}
char tail[4] = {kBase64UrlSafeChars[0], kBase64UrlSafeChars[0],
kBase64UrlSafeChars[0], kBase64UrlSafeChars[0]};
std::memcpy(tail, b64, remain * sizeof(*b64));
TF_RETURN_IF_ERROR(DecodeThreeChars(tail, current));
current += remain - 1;
decoded->assign(buffer.get(), current - buffer.get());
return absl::OkStatus();
}
template <typename T>
absl::Status Base64Encode(absl::string_view source, T* encoded) {
return Base64Encode(source, false, encoded);
}
template <typename T>
absl::Status Base64Encode(absl::string_view source, bool with_padding,
T* encoded) {
const char* const base64_chars = kBase64UrlSafeChars;
if (encoded == nullptr) {
return errors::Internal("'encoded' cannot be nullptr.");
}
const size_t max_encoded_size = 4 * (source.size() / 3) + 4;
std::unique_ptr<char[]> buffer(new char[max_encoded_size]);
char* current = buffer.get();
if (current == nullptr) {
return errors::ResourceExhausted(
"Failed to allocate buffer for encoded string.");
}
const char* data = source.data();
const char* const end = source.data() + source.size();
while (end - data >= 3) {
*current++ = base64_chars[(data[0] >> 2) & 0x3F];
*current++ =
base64_chars[((data[0] & 0x03) << 4) | ((data[1] >> 4) & 0x0F)];
*current++ =
base64_chars[((data[1] & 0x0F) << 2) | ((data[2] >> 6) & 0x03)];
*current++ = base64_chars[data[2] & 0x3F];
data += 3;
}
if (end - data == 2) {
*current++ = base64_chars[(data[0] >> 2) & 0x3F];
*current++ =
base64_chars[((data[0] & 0x03) << 4) | ((data[1] >> 4) & 0x0F)];
*current++ = base64_chars[(data[1] & 0x0F) << 2];
if (with_padding) {
*current++ = kPadChar;
}
} else if (end - data == 1) {
*current++ = base64_chars[(data[0] >> 2) & 0x3F];
*current++ = base64_chars[(data[0] & 0x03) << 4];
if (with_padding) {
*current++ = kPadChar;
*current++ = kPadChar;
}
}
encoded->assign(buffer.get(), current - buffer.get());
return absl::OkStatus();
}
template Status Base64Decode<std::string>(StringPiece data,
std::string* decoded);
template Status Base64Encode<std::string>(StringPiece source,
std::string* encoded);
template Status Base64Encode<std::string>(StringPiece source, bool with_padding,
std::string* encoded);
template Status Base64Decode<tstring>(StringPiece data, tstring* decoded);
template Status Base64Encode<tstring>(StringPiece source, tstring* encoded);
template Status Base64Encode<tstring>(StringPiece source, bool with_padding,
tstring* encoded);
} | #include "tensorflow/core/lib/strings/base64.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(Base64, EncodeDecode) {
const string original = "a simple test message!";
tstring encoded;
TF_EXPECT_OK(Base64Encode(original, &encoded));
EXPECT_EQ("YSBzaW1wbGUgdGVzdCBtZXNzYWdlIQ", encoded);
tstring decoded;
TF_EXPECT_OK(Base64Decode(encoded, &decoded));
EXPECT_EQ(original, decoded);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/base64.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/strings/base64_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a755e12a-a649-46e9-8e77-9ef89b24815e | cpp | google/arolla | simple_qtype | arolla/qtype/simple_qtype.cc | arolla/qtype/simple_qtype_test.cc | #include "arolla/qtype/simple_qtype.h"
#include <cstdint>
#include <optional>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
namespace arolla {
absl::Status SimpleQType::InitNameMap() {
name2index_.reserve(field_names_.size());
for (const auto& field_name : field_names_) {
if (bool inserted =
name2index_.emplace(field_name, name2index_.size()).second;
!inserted) {
return absl::FailedPreconditionError(absl::StrCat(
"duplicated name field for QType ", name(), ": ", field_name));
}
}
return absl::OkStatus();
}
absl::Span<const std::string> SimpleQType::GetFieldNames() const {
return field_names_;
}
std::optional<int64_t> SimpleQType::GetFieldIndexByName(
absl::string_view field_name) const {
if (auto it = name2index_.find(field_name); it != name2index_.end()) {
return it->second;
}
return std::nullopt;
}
} | #include "arolla/qtype/simple_qtype.h"
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/no_destructor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/named_field_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/meta.h"
#include "arolla/util/repr.h"
#include "arolla/util/struct_field.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla {
namespace {
using ::arolla::testing::ReprTokenEq;
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using ::testing::MatchesRegex;
struct TypeWithRepr {};
struct TypeWithoutRepr {};
struct FullFeaturedType {
int32_t state;
};
struct TypeWithNamedFields {
float x;
double y;
constexpr static auto ArollaStructFields() {
using CppType = TypeWithNamedFields;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(x),
AROLLA_DECLARE_STRUCT_FIELD(y),
};
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
CombineStructFields(hasher, *this);
}
};
}
AROLLA_DECLARE_QTYPE(TypeWithRepr);
AROLLA_DECLARE_QTYPE(TypeWithoutRepr);
AROLLA_DECLARE_QTYPE(FullFeaturedType);
AROLLA_DECLARE_QTYPE(TypeWithNamedFields);
AROLLA_DECLARE_FINGERPRINT_HASHER_TRAITS(TypeWithRepr);
void FingerprintHasherTraits<TypeWithRepr>::operator()(
FingerprintHasher*, const TypeWithRepr&) const {}
AROLLA_DECLARE_FINGERPRINT_HASHER_TRAITS(TypeWithoutRepr);
void FingerprintHasherTraits<TypeWithoutRepr>::operator()(
FingerprintHasher*, const TypeWithoutRepr&) const {}
AROLLA_DECLARE_FINGERPRINT_HASHER_TRAITS(FullFeaturedType);
void FingerprintHasherTraits<FullFeaturedType>::operator()(
FingerprintHasher* hasher, const FullFeaturedType& value) const {
hasher->Combine(value.state);
}
AROLLA_DECLARE_REPR(TypeWithRepr);
ReprToken ReprTraits<TypeWithRepr>::operator()(const TypeWithRepr&) const {
return ReprToken{"type_with_repr", {10, 50}};
}
AROLLA_DECLARE_REPR(FullFeaturedType);
ReprToken ReprTraits<FullFeaturedType>::operator()(
const FullFeaturedType& value) const {
return ReprToken{absl::StrFormat("FullFeaturedType{%d}", value.state),
{31, 27}};
}
AROLLA_DEFINE_SIMPLE_QTYPE(TYPE_WITH_REPR, TypeWithRepr);
AROLLA_DEFINE_SIMPLE_QTYPE(TYPE_WITHOUT_REPR, TypeWithoutRepr);
AROLLA_DEFINE_SIMPLE_QTYPE(TYPE_WITH_NAMED_FIELDS, TypeWithNamedFields);
QTypePtr QTypeTraits<FullFeaturedType>::type() {
struct FullFeaturedTypeQType final : SimpleQType {
FullFeaturedTypeQType()
: SimpleQType(
meta::type<FullFeaturedType>(), "FullFeaturedType",
GetQType<TypeWithoutRepr>(),
"::arolla::FullFeaturedQType") {}
absl::string_view UnsafePyQValueSpecializationKey(
const void* ) const final {
return "::arolla::FullFeaturedQValue";
}
};
static const absl::NoDestructor<FullFeaturedTypeQType> result;
return result.get();
}
namespace {
TEST(SimpleQType, TypeWithRepr) {
TypeWithRepr x;
EXPECT_THAT(GetQType<TypeWithRepr>()->UnsafeReprToken(&x),
ReprTokenEq("type_with_repr", {10, 50}));
}
TEST(SimpleQType, TypeWithoutRepr) {
TypeWithoutRepr x;
const auto repr_result = GetQType<TypeWithoutRepr>()->UnsafeReprToken(&x);
EXPECT_THAT(repr_result.str,
MatchesRegex("<value of TYPE_WITHOUT_REPR at 0x[0-9a-f]+>"));
EXPECT_THAT(repr_result.precedence.left, -1);
EXPECT_THAT(repr_result.precedence.right, -1);
}
TEST(SimpleQType, FullFeaturedQType) {
auto qtype = GetQType<FullFeaturedType>();
const FullFeaturedType x{4};
EXPECT_EQ(qtype->value_qtype(), GetQType<TypeWithoutRepr>());
EXPECT_EQ(qtype->qtype_specialization_key(), "::arolla::FullFeaturedQType");
EXPECT_THAT(qtype->UnsafeReprToken(&x),
ReprTokenEq("FullFeaturedType{4}", {31, 27}));
EXPECT_EQ(qtype->UnsafePyQValueSpecializationKey(&x),
"::arolla::FullFeaturedQValue");
FingerprintHasher hx("salt");
FingerprintHasher hy("salt");
const FullFeaturedType y{3};
qtype->UnsafeCombineToFingerprintHasher(&x, &hx);
qtype->UnsafeCombineToFingerprintHasher(&y, &hy);
EXPECT_NE(std::move(hx).Finish(), std::move(hy).Finish());
}
TEST(SimpleQType, TypeWithNames) {
QTypePtr qtype = GetQType<TypeWithNamedFields>();
EXPECT_THAT(GetFieldNames(qtype), ElementsAre("x", "y"));
EXPECT_EQ(GetFieldIndexByName(qtype, "x"), 0);
EXPECT_EQ(GetFieldIndexByName(qtype, "y"), 1);
EXPECT_EQ(GetFieldIndexByName(qtype, "z"), std::nullopt);
}
TEST(SimpleQType, TypeWithNamesErrors) {
QTypePtr qtype = GetQType<int>();
EXPECT_THAT(GetFieldNames(qtype), IsEmpty());
EXPECT_EQ(GetFieldIndexByName(qtype, "y"), std::nullopt);
EXPECT_EQ(GetFieldIndexByName(qtype, "x"), std::nullopt);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/simple_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/simple_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
534e3bfc-a9e2-4780-8149-1ee9748e5b1d | cpp | google/arolla | test_util | arolla/decision_forest/testing/test_util.cc | arolla/decision_forest/testing/test_util_test.cc | #include "arolla/decision_forest/testing/test_util.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/split_condition.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
#include "arolla/decision_forest/split_conditions/set_of_values_split_condition.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
namespace arolla {
namespace {
constexpr int kSetOfValuesSize = 10;
template <typename ConditionFactoryFn>
DecisionTree CreateRandomTreeImpl(absl::BitGen* rnd, int num_features,
bool interactions, int num_splits,
ConditionFactoryFn condition_factory) {
DecisionTree tree;
tree.adjustments.resize(num_splits + 1);
for (float& val : tree.adjustments) {
val = absl::Uniform<uint8_t>(*rnd);
}
int single_feature_id = absl::Uniform<int32_t>(*rnd, 0, num_features);
for (int i = 0; i < num_splits; ++i) {
auto child1 =
i * 2 + 1 < num_splits
? DecisionTreeNodeId::SplitNodeId(i * 2 + 1)
: DecisionTreeNodeId::AdjustmentId(i * 2 + 1 - num_splits);
auto child2 =
i * 2 + 2 < num_splits
? DecisionTreeNodeId::SplitNodeId(i * 2 + 2)
: DecisionTreeNodeId::AdjustmentId(i * 2 + 2 - num_splits);
int feature_id;
if (interactions) {
feature_id = absl::Uniform<int32_t>(*rnd, 0, num_features);
} else {
feature_id = single_feature_id;
}
tree.split_nodes.push_back({child1, child2, condition_factory(feature_id)});
}
return tree;
}
}
absl::Status FillWithRandomValue(TypedSlot tslot, FramePtr ctx,
absl::BitGen* rnd, double missed_prob) {
if (tslot.byte_offset() == FrameLayout::Slot<float>::kUninitializedOffset) {
return absl::OkStatus();
}
bool missed = (absl::Uniform<float>(*rnd, 0, 1) < missed_prob);
if (tslot.GetType() == GetOptionalQType<float>()) {
auto slot = tslot.ToSlot<OptionalValue<float>>().value();
auto val = OptionalValue<float>(absl::Uniform<float>(*rnd, 0, 1));
ctx.Set(slot, missed ? OptionalValue<float>{} : val);
} else if (tslot.GetType() == GetOptionalQType<int64_t>()) {
auto slot = tslot.ToSlot<OptionalValue<int64_t>>().value();
auto val = OptionalValue<int64_t>(absl::Uniform<int64_t>(*rnd, 0, 1000));
ctx.Set(slot, missed ? OptionalValue<int64_t>{} : val);
} else {
return absl::UnimplementedError(std::string("Unimplemented for type: ") +
std::string(tslot.GetType()->name()));
}
return absl::OkStatus();
}
absl::Status FillArrayWithRandomValues(int64_t size, TypedSlot tslot,
FramePtr ctx, absl::BitGen* rnd,
double missed_prob) {
if (tslot.byte_offset() == FrameLayout::Slot<float>::kUninitializedOffset) {
return absl::OkStatus();
}
if (tslot.GetType() == GetDenseArrayQType<float>()) {
auto slot = tslot.UnsafeToSlot<DenseArray<float>>();
DenseArrayBuilder<float> bldr(size);
for (int64_t i = 0; i < size; ++i) {
bool missed = (absl::Uniform<float>(*rnd, 0, 1) < missed_prob);
if (!missed) {
bldr.Set(i, absl::Uniform<float>(*rnd, 0, 1));
}
}
ctx.Set(slot, std::move(bldr).Build());
} else if (tslot.GetType() == GetDenseArrayQType<int64_t>()) {
auto slot = tslot.UnsafeToSlot<DenseArray<int64_t>>();
DenseArrayBuilder<int64_t> bldr(size);
for (int64_t i = 0; i < size; ++i) {
bool missed = (absl::Uniform<float>(*rnd, 0, 1) < missed_prob);
if (!missed) {
bldr.Set(i, absl::Uniform<int64_t>(*rnd, 0, 1000));
}
}
ctx.Set(slot, std::move(bldr).Build());
} else {
return absl::UnimplementedError(std::string("Unimplemented for type: ") +
std::string(tslot.GetType()->name()));
}
return absl::OkStatus();
}
void CreateSlotsForForest(const DecisionForest& forest,
FrameLayout::Builder* layout_builder,
std::vector<TypedSlot>* slots) {
auto placeholder =
TypedSlot::FromSlot(FrameLayout::Slot<float>::UnsafeUninitializedSlot());
for (auto id_qtype : forest.GetRequiredQTypes()) {
while (slots->size() <= id_qtype.first) {
slots->push_back(placeholder);
}
QTypePtr qtype = id_qtype.second;
(*slots)[id_qtype.first] = AddSlot(qtype, layout_builder);
}
}
absl::Status CreateArraySlotsForForest(const DecisionForest& forest,
FrameLayout::Builder* layout_builder,
std::vector<TypedSlot>* slots) {
auto placeholder =
TypedSlot::FromSlot(FrameLayout::Slot<float>::UnsafeUninitializedSlot());
for (auto id_qtype : forest.GetRequiredQTypes()) {
while (slots->size() <= id_qtype.first) {
slots->push_back(placeholder);
}
QTypePtr qtype = id_qtype.second;
if (qtype == GetOptionalQType<float>()) {
(*slots)[id_qtype.first] =
TypedSlot::FromSlot(layout_builder->AddSlot<DenseArray<float>>());
} else if (qtype == GetOptionalQType<int64_t>()) {
(*slots)[id_qtype.first] =
TypedSlot::FromSlot(layout_builder->AddSlot<DenseArray<int64_t>>());
} else {
return absl::UnimplementedError(std::string("Unimplemented for type: ") +
std::string(qtype->name()));
}
}
if (slots->empty()) {
slots->push_back(
TypedSlot::FromSlot(layout_builder->AddSlot<DenseArray<float>>()));
}
return absl::OkStatus();
}
DecisionTree CreateRandomFloatTree(absl::BitGen* rnd, int num_features,
bool interactions, int num_splits,
double range_split_prob,
double equality_split_prob) {
return CreateRandomTreeImpl(
rnd, num_features, interactions, num_splits, [&](int feature_id) {
float split_type_rnd = absl::Uniform<float>(*rnd, 0, 1);
if (split_type_rnd < range_split_prob + equality_split_prob) {
float sp0 = absl::Uniform<uint8_t>(*rnd) / 256.0;
float sp1 = split_type_rnd < range_split_prob
? absl::Uniform<uint8_t>(*rnd) / 256.0
: sp0;
return IntervalSplit(feature_id, std::min(sp0, sp1),
std::max(sp0, sp1));
} else {
float split_point = absl::Uniform<uint8_t>(*rnd) / 256.0;
if (absl::Bernoulli(*rnd, 0.5)) {
return IntervalSplit(feature_id, -INFINITY, split_point);
} else {
return IntervalSplit(feature_id, split_point, +INFINITY);
}
}
});
}
std::unique_ptr<const DecisionForest> CreateRandomFloatForest(
absl::BitGen* rnd, int num_features, bool interactions, int min_num_splits,
int max_num_splits, int num_trees) {
std::vector<DecisionTree> trees;
trees.reserve(num_trees);
for (int i = 0; i < num_trees; ++i) {
int num_splits =
absl::Uniform<int32_t>(*rnd, min_num_splits, max_num_splits);
trees.push_back(
CreateRandomFloatTree(rnd, num_features, interactions, num_splits));
}
return DecisionForest::FromTrees(std::move(trees)).value();
}
DecisionTree CreateRandomTree(absl::BitGen* rnd, bool interactions,
int num_splits,
std::vector<QTypePtr>* feature_types) {
const float inf = std::numeric_limits<float>::infinity();
return CreateRandomTreeImpl(
rnd, feature_types->size(), interactions, num_splits,
[&](int feature_id) -> std::shared_ptr<SplitCondition> {
QTypePtr& type = (*feature_types)[feature_id];
if (!type) {
type = absl::Bernoulli(*rnd, 0.5) ? GetOptionalQType<float>()
: GetOptionalQType<int64_t>();
}
if (type == GetOptionalQType<float>()) {
float split_point = absl::Uniform<uint8_t>(*rnd) / 256.0;
if (absl::Bernoulli(*rnd, 0.5)) {
return IntervalSplit(feature_id, -inf, split_point);
} else {
return IntervalSplit(feature_id, split_point, +inf);
}
} else {
absl::flat_hash_set<int64_t> values;
for (int i = 0; i < kSetOfValuesSize; ++i) {
values.insert(absl::Uniform<int64_t>(*rnd, 0, 1000));
}
return SetOfValuesSplit<int64_t>(feature_id, values,
absl::Bernoulli(*rnd, 0.5));
}
});
}
DecisionTree CreateRandomObliviousTree(absl::BitGen* rnd, int depth,
std::vector<QTypePtr>* feature_types) {
const float inf = std::numeric_limits<float>::infinity();
std::vector<std::shared_ptr<SplitCondition>> conditions(depth);
for (int i = 0; i < depth; ++i) {
int feature_id = absl::Uniform<int32_t>(*rnd, 0, feature_types->size());
QTypePtr& type = (*feature_types)[feature_id];
if (!type) {
type = absl::Bernoulli(*rnd, 0.5) ? GetOptionalQType<float>()
: GetOptionalQType<int64_t>();
}
if (type == GetOptionalQType<float>()) {
float split_point = absl::Uniform<uint8_t>(*rnd) / 256.0;
if (absl::Bernoulli(*rnd, 0.5)) {
conditions[i] = IntervalSplit(feature_id, -inf, split_point);
} else {
conditions[i] = IntervalSplit(feature_id, split_point, +inf);
}
} else {
absl::flat_hash_set<int64_t> values;
for (int i = 0; i < kSetOfValuesSize; ++i) {
values.insert(absl::Uniform<int64_t>(*rnd, 0, 1000));
}
conditions[i] = SetOfValuesSplit<int64_t>(feature_id, values,
absl::Bernoulli(*rnd, 0.5));
}
}
int cond_id = 0;
int node_id = 0;
return CreateRandomTreeImpl(rnd, feature_types->size(), false,
(1 << depth) - 1, [&](int) {
node_id++;
bool last_in_the_row = node_id & (node_id + 1);
if (last_in_the_row) {
return conditions[cond_id];
} else {
return conditions[cond_id++];
}
});
}
std::unique_ptr<const DecisionForest> CreateRandomForest(
absl::BitGen* rnd, int num_features, bool interactions, int min_num_splits,
int max_num_splits, int num_trees,
absl::Span<const QTypePtr> feature_types) {
std::vector<QTypePtr> types;
for (int feature_id = 0; feature_id < num_features; feature_id++) {
if (feature_id < feature_types.size() && feature_types[feature_id]) {
types.push_back(feature_types[feature_id]);
} else {
types.push_back(nullptr);
}
}
std::vector<DecisionTree> trees;
trees.reserve(num_trees);
for (int i = 0; i < num_trees; ++i) {
int num_splits =
absl::Uniform<int32_t>(*rnd, min_num_splits, max_num_splits);
trees.push_back(CreateRandomTree(rnd, interactions, num_splits, &types));
}
return DecisionForest::FromTrees(std::move(trees)).value();
}
} | #include "arolla/decision_forest/testing/test_util.h"
#include <cstddef>
#include <cstdint>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/check.h"
#include "absl/random/random.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
namespace arolla {
namespace {
TEST(TestUtilTest, FillWithRandomValue) {
absl::BitGen rnd;
FrameLayout::Builder bldr;
auto opt_float_slot = bldr.AddSlot<OptionalValue<float>>();
auto opt_int64_slot = bldr.AddSlot<OptionalValue<int64_t>>();
auto layout = std::move(bldr).Build();
RootEvaluationContext ctx(&layout);
ctx.Set(opt_float_slot, OptionalValue<float>(-1.0));
ctx.Set(opt_int64_slot, OptionalValue<int64_t>(-1));
CHECK_OK(FillWithRandomValue(TypedSlot::FromSlot(opt_float_slot), ctx.frame(),
&rnd));
CHECK_OK(FillWithRandomValue(TypedSlot::FromSlot(opt_int64_slot), ctx.frame(),
&rnd));
EXPECT_NE(OptionalValue<float>(-1.0), ctx.Get(opt_float_slot));
EXPECT_NE(OptionalValue<int64_t>(-1), ctx.Get(opt_int64_slot));
}
TEST(TestUtilTest, CreateSlotsForForest) {
absl::BitGen rnd;
auto forest = CreateRandomForest(&rnd, 5, true, 1, 64, 16);
FrameLayout::Builder bldr;
std::vector<TypedSlot> slots;
CreateSlotsForForest(*forest, &bldr, &slots);
EXPECT_OK(forest->ValidateInputSlots(slots));
}
TEST(TestUtilTest, CreateRandomFloatTree) {
absl::BitGen rnd;
for (size_t depth = 0; depth <= 15; ++depth) {
auto tree = CreateRandomFloatTree(&rnd, 5, true, (1 << depth) - 1);
EXPECT_EQ(tree.adjustments.size(), 1 << depth);
EXPECT_EQ(tree.split_nodes.size(), (1 << depth) - 1);
}
}
TEST(TestUtilTest, CreateRandomFloatForest) {
absl::BitGen rnd;
auto forest = CreateRandomFloatForest(&rnd, 5, true, 1, 64, 16);
EXPECT_EQ(forest->GetTrees().size(), 16);
EXPECT_GE(forest->GetRequiredQTypes().size(), 1);
EXPECT_LE(forest->GetRequiredQTypes().size(), 5);
for (const DecisionTree& tree : forest->GetTrees()) {
EXPECT_LE(tree.split_nodes.size(), 64);
}
}
TEST(TestUtilTest, CreateRandomForest) {
absl::BitGen rnd;
auto forest = CreateRandomForest(&rnd, 5, true, 1, 64, 16);
EXPECT_EQ(forest->GetTrees().size(), 16);
EXPECT_GE(forest->GetRequiredQTypes().size(), 1);
EXPECT_LE(forest->GetRequiredQTypes().size(), 5);
for (const DecisionTree& tree : forest->GetTrees()) {
EXPECT_LE(tree.split_nodes.size(), 64);
}
}
TEST(TestUtilTest, CreateRandomObliviousTree) {
absl::BitGen rnd;
std::vector<QTypePtr> types(10);
auto tree = CreateRandomObliviousTree(&rnd, 3, &types);
ASSERT_EQ(tree.split_nodes.size(), 7);
EXPECT_EQ(tree.split_nodes[1].condition, tree.split_nodes[2].condition);
EXPECT_EQ(tree.split_nodes[3].condition, tree.split_nodes[4].condition);
EXPECT_EQ(tree.split_nodes[4].condition, tree.split_nodes[5].condition);
EXPECT_EQ(tree.split_nodes[5].condition, tree.split_nodes[6].condition);
}
TEST(TestUtilTest, CreateRandomForestWithoutInteractions) {
absl::BitGen rnd;
auto forest = CreateRandomForest(&rnd, 5, false, 512, 512, 1);
EXPECT_EQ(forest->GetTrees().size(), 1);
EXPECT_EQ(forest->GetRequiredQTypes().size(), 1);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/testing/test_util.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/testing/test_util_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
6f3f95cf-f163-4ace-87a5-f25a38f093e7 | cpp | google/cel-cpp | list_value | common/values/list_value.cc | common/values/list_value_test.cc | #include <cstddef>
#include <utility>
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/variant.h"
#include "common/casting.h"
#include "common/value.h"
#include "internal/status_macros.h"
namespace cel {
absl::string_view ListValue::GetTypeName() const {
return absl::visit(
[](const auto& alternative) -> absl::string_view {
return alternative.GetTypeName();
},
variant_);
}
std::string ListValue::DebugString() const {
return absl::visit(
[](const auto& alternative) -> std::string {
return alternative.DebugString();
},
variant_);
}
absl::Status ListValue::SerializeTo(AnyToJsonConverter& converter,
absl::Cord& value) const {
return absl::visit(
[&converter, &value](const auto& alternative) -> absl::Status {
return alternative.SerializeTo(converter, value);
},
variant_);
}
absl::StatusOr<Json> ListValue::ConvertToJson(
AnyToJsonConverter& converter) const {
return absl::visit(
[&converter](const auto& alternative) -> absl::StatusOr<Json> {
return alternative.ConvertToJson(converter);
},
variant_);
}
absl::StatusOr<JsonArray> ListValue::ConvertToJsonArray(
AnyToJsonConverter& converter) const {
return absl::visit(
[&converter](const auto& alternative) -> absl::StatusOr<JsonArray> {
return alternative.ConvertToJsonArray(converter);
},
variant_);
}
bool ListValue::IsZeroValue() const {
return absl::visit(
[](const auto& alternative) -> bool { return alternative.IsZeroValue(); },
variant_);
}
absl::StatusOr<bool> ListValue::IsEmpty() const {
return absl::visit(
[](const auto& alternative) -> bool { return alternative.IsEmpty(); },
variant_);
}
absl::StatusOr<size_t> ListValue::Size() const {
return absl::visit(
[](const auto& alternative) -> size_t { return alternative.Size(); },
variant_);
}
namespace common_internal {
absl::Status ListValueEqual(ValueManager& value_manager, const ListValue& lhs,
const ListValue& rhs, Value& result) {
if (Is(lhs, rhs)) {
result = BoolValue{true};
return absl::OkStatus();
}
CEL_ASSIGN_OR_RETURN(auto lhs_size, lhs.Size());
CEL_ASSIGN_OR_RETURN(auto rhs_size, rhs.Size());
if (lhs_size != rhs_size) {
result = BoolValue{false};
return absl::OkStatus();
}
CEL_ASSIGN_OR_RETURN(auto lhs_iterator, lhs.NewIterator(value_manager));
CEL_ASSIGN_OR_RETURN(auto rhs_iterator, rhs.NewIterator(value_manager));
Value lhs_element;
Value rhs_element;
for (size_t index = 0; index < lhs_size; ++index) {
ABSL_CHECK(lhs_iterator->HasNext());
ABSL_CHECK(rhs_iterator->HasNext());
CEL_RETURN_IF_ERROR(lhs_iterator->Next(value_manager, lhs_element));
CEL_RETURN_IF_ERROR(rhs_iterator->Next(value_manager, rhs_element));
CEL_RETURN_IF_ERROR(lhs_element.Equal(value_manager, rhs_element, result));
if (auto bool_value = As<BoolValue>(result);
bool_value.has_value() && !bool_value->NativeValue()) {
return absl::OkStatus();
}
}
ABSL_DCHECK(!lhs_iterator->HasNext());
ABSL_DCHECK(!rhs_iterator->HasNext());
result = BoolValue{true};
return absl::OkStatus();
}
absl::Status ListValueEqual(ValueManager& value_manager,
const ParsedListValueInterface& lhs,
const ListValue& rhs, Value& result) {
auto lhs_size = lhs.Size();
CEL_ASSIGN_OR_RETURN(auto rhs_size, rhs.Size());
if (lhs_size != rhs_size) {
result = BoolValue{false};
return absl::OkStatus();
}
CEL_ASSIGN_OR_RETURN(auto lhs_iterator, lhs.NewIterator(value_manager));
CEL_ASSIGN_OR_RETURN(auto rhs_iterator, rhs.NewIterator(value_manager));
Value lhs_element;
Value rhs_element;
for (size_t index = 0; index < lhs_size; ++index) {
ABSL_CHECK(lhs_iterator->HasNext());
ABSL_CHECK(rhs_iterator->HasNext());
CEL_RETURN_IF_ERROR(lhs_iterator->Next(value_manager, lhs_element));
CEL_RETURN_IF_ERROR(rhs_iterator->Next(value_manager, rhs_element));
CEL_RETURN_IF_ERROR(lhs_element.Equal(value_manager, rhs_element, result));
if (auto bool_value = As<BoolValue>(result);
bool_value.has_value() && !bool_value->NativeValue()) {
return absl::OkStatus();
}
}
ABSL_DCHECK(!lhs_iterator->HasNext());
ABSL_DCHECK(!rhs_iterator->HasNext());
result = BoolValue{true};
return absl::OkStatus();
}
}
common_internal::ValueVariant ListValue::ToValueVariant() const& {
return absl::visit(
[](const auto& alternative) -> common_internal::ValueVariant {
return alternative;
},
variant_);
}
common_internal::ValueVariant ListValue::ToValueVariant() && {
return absl::visit(
[](auto&& alternative) -> common_internal::ValueVariant {
return std::move(alternative);
},
std::move(variant_));
}
} | #include <cstdint>
#include <memory>
#include <sstream>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/memory.h"
#include "common/type.h"
#include "common/type_factory.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::ElementsAreArray;
using ::testing::TestParamInfo;
class ListValueTest : public common_internal::ThreadCompatibleValueTest<> {
public:
template <typename... Args>
absl::StatusOr<ListValue> NewIntListValue(Args&&... args) {
CEL_ASSIGN_OR_RETURN(auto builder,
value_manager().NewListValueBuilder(ListType()));
(static_cast<void>(builder->Add(std::forward<Args>(args))), ...);
return std::move(*builder).Build();
}
};
TEST_P(ListValueTest, Default) {
ListValue value;
EXPECT_THAT(value.IsEmpty(), IsOkAndHolds(true));
EXPECT_THAT(value.Size(), IsOkAndHolds(0));
EXPECT_EQ(value.DebugString(), "[]");
}
TEST_P(ListValueTest, Kind) {
ASSERT_OK_AND_ASSIGN(auto value,
NewIntListValue(IntValue(0), IntValue(1), IntValue(2)));
EXPECT_EQ(value.kind(), ListValue::kKind);
EXPECT_EQ(Value(value).kind(), ListValue::kKind);
}
TEST_P(ListValueTest, Type) {
ASSERT_OK_AND_ASSIGN(auto value,
NewIntListValue(IntValue(0), IntValue(1), IntValue(2)));
}
TEST_P(ListValueTest, DebugString) {
ASSERT_OK_AND_ASSIGN(auto value,
NewIntListValue(IntValue(0), IntValue(1), IntValue(2)));
{
std::ostringstream out;
out << value;
EXPECT_EQ(out.str(), "[0, 1, 2]");
}
{
std::ostringstream out;
out << Value(value);
EXPECT_EQ(out.str(), "[0, 1, 2]");
}
}
TEST_P(ListValueTest, IsEmpty) {
ASSERT_OK_AND_ASSIGN(auto value,
NewIntListValue(IntValue(0), IntValue(1), IntValue(2)));
EXPECT_THAT(value.IsEmpty(), IsOkAndHolds(false));
}
TEST_P(ListValueTest, Size) {
ASSERT_OK_AND_ASSIGN(auto value,
NewIntListValue(IntValue(0), IntValue(1), IntValue(2)));
EXPECT_THAT(value.Size(), IsOkAndHolds(3));
}
TEST_P(ListValueTest, Get) {
ASSERT_OK_AND_ASSIGN(auto value,
NewIntListValue(IntValue(0), IntValue(1), IntValue(2)));
ASSERT_OK_AND_ASSIGN(auto element, value.Get(value_manager(), 0));
ASSERT_TRUE(InstanceOf<IntValue>(element));
ASSERT_EQ(Cast<IntValue>(element).NativeValue(), 0);
ASSERT_OK_AND_ASSIGN(element, value.Get(value_manager(), 1));
ASSERT_TRUE(InstanceOf<IntValue>(element));
ASSERT_EQ(Cast<IntValue>(element).NativeValue(), 1);
ASSERT_OK_AND_ASSIGN(element, value.Get(value_manager(), 2));
ASSERT_TRUE(InstanceOf<IntValue>(element));
ASSERT_EQ(Cast<IntValue>(element).NativeValue(), 2);
EXPECT_THAT(value.Get(value_manager(), 3),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_P(ListValueTest, ForEach) {
ASSERT_OK_AND_ASSIGN(auto value,
NewIntListValue(IntValue(0), IntValue(1), IntValue(2)));
std::vector<int64_t> elements;
EXPECT_OK(value.ForEach(value_manager(), [&elements](const Value& element) {
elements.push_back(Cast<IntValue>(element).NativeValue());
return true;
}));
EXPECT_THAT(elements, ElementsAreArray({0, 1, 2}));
}
TEST_P(ListValueTest, Contains) {
ASSERT_OK_AND_ASSIGN(auto value,
NewIntListValue(IntValue(0), IntValue(1), IntValue(2)));
ASSERT_OK_AND_ASSIGN(auto contained,
value.Contains(value_manager(), IntValue(2)));
ASSERT_TRUE(InstanceOf<BoolValue>(contained));
EXPECT_TRUE(Cast<BoolValue>(contained).NativeValue());
ASSERT_OK_AND_ASSIGN(contained, value.Contains(value_manager(), IntValue(3)));
ASSERT_TRUE(InstanceOf<BoolValue>(contained));
EXPECT_FALSE(Cast<BoolValue>(contained).NativeValue());
}
TEST_P(ListValueTest, NewIterator) {
ASSERT_OK_AND_ASSIGN(auto value,
NewIntListValue(IntValue(0), IntValue(1), IntValue(2)));
ASSERT_OK_AND_ASSIGN(auto iterator, value.NewIterator(value_manager()));
std::vector<int64_t> elements;
while (iterator->HasNext()) {
ASSERT_OK_AND_ASSIGN(auto element, iterator->Next(value_manager()));
ASSERT_TRUE(InstanceOf<IntValue>(element));
elements.push_back(Cast<IntValue>(element).NativeValue());
}
EXPECT_EQ(iterator->HasNext(), false);
EXPECT_THAT(iterator->Next(value_manager()),
StatusIs(absl::StatusCode::kFailedPrecondition));
EXPECT_THAT(elements, ElementsAreArray({0, 1, 2}));
}
TEST_P(ListValueTest, ConvertToJson) {
ASSERT_OK_AND_ASSIGN(auto value,
NewIntListValue(IntValue(0), IntValue(1), IntValue(2)));
EXPECT_THAT(value.ConvertToJson(value_manager()),
IsOkAndHolds(Json(MakeJsonArray({0.0, 1.0, 2.0}))));
}
INSTANTIATE_TEST_SUITE_P(
ListValueTest, ListValueTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting)),
ListValueTest::ToString);
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/list_value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/list_value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
3ebcb91f-5f3f-4f11-9ce0-c059d17b31b9 | cpp | google/tensorstore | enum | tensorstore/internal/json_binding/enum.h | tensorstore/internal/json_binding/enum_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_BINDING_ENUM_H_
#define TENSORSTORE_INTERNAL_JSON_BINDING_ENUM_H_
#include <stddef.h>
#include <string>
#include <utility>
#include <variant>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json/same.h"
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_json_binding {
template <typename EnumValue, typename JsonValue, size_t N>
constexpr auto Enum(const std::pair<EnumValue, JsonValue> (&values)[N]) {
return [=](auto is_loading, const auto& options, auto* obj,
auto* j) -> absl::Status {
for (const auto& p : values) {
if constexpr (is_loading) {
if (internal_json::JsonSame(p.second, *j)) {
*obj = p.first;
return absl::OkStatus();
}
} else {
if (p.first == *obj) {
*j = p.second;
return absl::OkStatus();
}
}
}
if constexpr (is_loading) {
return internal_json::ExpectedError(
*j,
tensorstore::StrCat(
"one of ",
absl::StrJoin(values, ", ", [](std::string* out, const auto& p) {
*out += ::nlohmann::json(p.second).dump();
})));
} else {
ABSL_UNREACHABLE();
}
};
}
template <typename Binder, typename... Value, typename... JsonValue>
constexpr auto MapValue(Binder binder, std::pair<Value, JsonValue>... pairs) {
constexpr size_t N = sizeof...(pairs);
static_assert(N > 0);
return [=](auto is_loading, const auto& options, auto* obj,
auto* j) -> absl::Status {
if constexpr (is_loading) {
if (((internal_json::JsonSame(*j, pairs.second) &&
(static_cast<void>(*obj = pairs.first), true)) ||
...))
return absl::OkStatus();
} else {
if ((((*obj == pairs.first) &&
(static_cast<void>(*j = pairs.second), true)) ||
...))
return absl::OkStatus();
}
return binder(is_loading, options, obj, j);
};
}
}
}
#endif | #include "tensorstore/internal/json_binding/enum.h"
#include <memory>
#include <string_view>
#include <utility>
#include <variant>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::MatchesStatus;
namespace jb = tensorstore::internal_json_binding;
namespace {
TEST(JsonBindingTest, Enum) {
enum class TestEnum { a, b };
const auto binder = jb::Enum<TestEnum, std::string_view>({
{TestEnum::a, "a"},
{TestEnum::b, "b"},
});
tensorstore::TestJsonBinderRoundTrip<TestEnum>(
{
{TestEnum::a, "a"},
{TestEnum::b, "b"},
},
binder);
tensorstore::TestJsonBinderFromJson<TestEnum>(
{
{"c",
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected one of \"a\", \"b\", but received: \"c\"")},
},
binder);
}
TEST(JsonBindingTest, MapValue) {
enum class TestMap { a, b };
const auto binder = jb::MapValue(
[](auto...) { return absl::InvalidArgumentError("missing"); },
std::make_pair(TestMap::a, "a"),
std::make_pair(TestMap::b, "b"),
std::make_pair(TestMap::a, 1),
std::make_pair(TestMap::b, 2));
tensorstore::TestJsonBinderRoundTrip<TestMap>(
{
{TestMap::a, "a"},
{TestMap::b, "b"},
},
binder);
tensorstore::TestJsonBinderFromJson<TestMap>(
{
{"a", ::testing::Eq(TestMap::a)},
{"b", ::testing::Eq(TestMap::b)},
{"c",
MatchesStatus(absl::StatusCode::kInvalidArgument, ".*missing.*")},
{1, ::testing::Eq(TestMap::a)},
{2, ::testing::Eq(TestMap::b)},
{3, MatchesStatus(absl::StatusCode::kInvalidArgument, ".*missing.*")},
},
binder);
}
namespace map_variant_test {
struct A {
[[maybe_unused]] friend bool operator==(const A&, const A&) { return true; }
};
struct B {
[[maybe_unused]] friend bool operator==(const B&, const B&) { return true; }
};
struct C {
[[maybe_unused]] friend bool operator==(const C&, const C&) { return true; }
};
}
TEST(JsonBindingTest, MapValueVariant) {
using map_variant_test::A;
using map_variant_test::B;
using map_variant_test::C;
using T = std::variant<A, B, C>;
const auto binder = jb::MapValue(
[](auto...) { return absl::InvalidArgumentError("missing"); },
std::make_pair(T{A{}}, "a"),
std::make_pair(T{B{}}, "b"),
std::make_pair(T{C{}}, 3));
tensorstore::TestJsonBinderRoundTrip<T>(
{
{A{}, "a"},
{B{}, "b"},
{C{}, 3},
},
binder);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/enum.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/enum_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
58dea728-e29e-444e-823b-bcc5f8ff23b4 | cpp | tensorflow/tensorflow | custom_call | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/custom_call.cc | third_party/xla/xla/service/gpu/custom_call_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/custom_call.h"
#include <optional>
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir {
namespace odml {
class ConvertCustomCallOp : public OpConversionPattern<mhlo::CustomCallOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::CustomCallOp mhlo_custom_call, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult ConvertCustomCallOp::matchAndRewrite(
mhlo::CustomCallOp mhlo_custom_call, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
auto call_target_name = mhlo_custom_call.getCallTargetName();
if (!call_target_name.starts_with("custom_call.")) {
return failure();
}
auto tfl_custom = rewriter.create<TFL::CustomOp>(
mhlo_custom_call.getLoc(), mhlo_custom_call.getResultTypes(),
mhlo_custom_call.getInputs());
tfl_custom.setCustomCodeAttr(rewriter.getStringAttr(call_target_name));
if (auto bc = mhlo_custom_call.getBackendConfig()) {
if (auto stringattr = mlir::dyn_cast_or_null<mlir::StringAttr>(*bc)) {
tfl_custom.setCustomOptionAttr(
TFL::ConstBytesAttr::get(rewriter.getContext(), stringattr));
}
} else {
tfl_custom.setCustomOptionAttr(
TFL::ConstBytesAttr::get(rewriter.getContext(), ""));
}
rewriter.replaceOp(mhlo_custom_call, tfl_custom);
return success();
}
class RemoveCustomCallWithShapeAssertion
: public OpRewritePattern<mhlo::CustomCallOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(mhlo::CustomCallOp op,
PatternRewriter& rewriter) const final;
};
LogicalResult RemoveCustomCallWithShapeAssertion::matchAndRewrite(
mhlo::CustomCallOp op, PatternRewriter& rewriter) const {
if (op.getCallTargetName() != "shape_assertion") {
return mlir::failure();
}
rewriter.eraseOp(op);
return success();
}
std::optional<bool> IsCustomCallLegal(mhlo::CustomCallOp op) {
auto call_target_name = op.getCallTargetName();
if (call_target_name.starts_with("custom_call.")) {
auto bc = op.getBackendConfig();
if (!bc || mlir::isa<mlir::StringAttr>(*bc)) {
return false;
}
}
return true;
}
void PopulateCustomCallPatterns(MLIRContext* ctx, RewritePatternSet& patterns,
ConversionTarget& target) {
patterns.add<ConvertCustomCallOp>(ctx);
target.addDynamicallyLegalOp<mhlo::CustomCallOp>(IsCustomCallLegal);
}
void PopulateCustomCallPreparePatterns(MLIRContext* ctx,
RewritePatternSet& patterns) {
patterns.add<RemoveCustomCallWithShapeAssertion>(ctx);
}
}
} | #include <cstddef>
#include <cstdint>
#include <memory>
#include <ostream>
#include <sstream>
#include <string>
#include <string_view>
#include <vector>
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "third_party/gpus/cuda/include/driver_types.h"
#define PLATFORM "CUDA"
#elif TENSORFLOW_USE_ROCM
#include "rocm/include/hip/hip_runtime.h"
#define PLATFORM "ROCM"
#endif
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/client/lib/constants.h"
#include "xla/client/xla_builder.h"
#include "xla/ffi/execution_context.h"
#include "xla/ffi/ffi.h"
#include "xla/ffi/ffi_api.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/custom_call_status.h"
#include "xla/service/custom_call_target_registry.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/scratch_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/test_helpers.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
#if GOOGLE_CUDA
#define gpuSuccess cudaSuccess
#define gpuMemcpyAsync cudaMemcpyAsync
#define gpuMemcpyDeviceToDevice cudaMemcpyDeviceToDevice
#define gpuMemcpy cudaMemcpy
#define gpuMemcpyDeviceToHost cudaMemcpyDeviceToHost
#define gpuMemcpyHostToDevice cudaMemcpyHostToDevice
#elif TENSORFLOW_USE_ROCM
#define gpuSuccess hipSuccess
#define gpuMemcpyAsync hipMemcpyAsync
#define gpuMemcpyDeviceToDevice hipMemcpyDeviceToDevice
#define gpuMemcpy hipMemcpy
#define gpuMemcpyDeviceToHost hipMemcpyDeviceToHost
#define gpuMemcpyHostToDevice hipMemcpyHostToDevice
#endif
namespace xla {
struct Range {
int64_t lo;
int64_t hi;
};
}
XLA_FFI_REGISTER_STRUCT_ATTR_DECODING(::xla::Range, StructMember<int64_t>("lo"),
StructMember<int64_t>("hi"));
namespace xla {
namespace {
class CustomCallTest : public ClientLibraryTestBase {};
bool is_invoked_called = false;
void Callback_IsInvoked(se::gpu::GpuStreamHandle , void** ,
const char* , size_t ) {
is_invoked_called = true;
}
XLA_REGISTER_CUSTOM_CALL_TARGET(Callback_IsInvoked, PLATFORM);
TEST_F(CustomCallTest, IsInvoked) {
XlaBuilder b(TestName());
CustomCall(&b, "Callback_IsInvoked", {},
ShapeUtil::MakeShape(F32, {}),
"");
EXPECT_FALSE(is_invoked_called);
TF_ASSERT_OK(Execute(&b, {}).status());
EXPECT_TRUE(is_invoked_called);
}
TEST_F(CustomCallTest, UnknownTarget) {
XlaBuilder b(TestName());
CustomCall(&b, "UnknownTarget", {},
ShapeUtil::MakeShape(F32, {}),
"");
ASSERT_FALSE(Execute(&b, {}).ok());
}
void Callback_Memcpy(se::gpu::GpuStreamHandle stream, void** buffers,
const char* , size_t ) {
void* src = buffers[0];
void* dst = buffers[1];
auto err = gpuMemcpyAsync(dst, src, sizeof(float) * 128,
gpuMemcpyDeviceToDevice, stream);
ASSERT_EQ(err, gpuSuccess);
}
XLA_REGISTER_CUSTOM_CALL_TARGET(Callback_Memcpy, PLATFORM);
TEST_F(CustomCallTest, Memcpy) {
XlaBuilder b(TestName());
CustomCall(&b, "Callback_Memcpy",
{Broadcast(ConstantR0WithType(&b, F32, 42.0), {128})},
ShapeUtil::MakeShape(F32, {128}), "");
TF_ASSERT_OK_AND_ASSIGN(auto result, ExecuteAndTransfer(&b, {}));
EXPECT_THAT(result.data<float>(), ::testing::Each(42));
}
std::string& kExpectedOpaque = *new std::string("abc\0def", 7);
void Callback_Opaque(se::gpu::GpuStreamHandle , void** ,
const char* opaque, size_t opaque_len) {
std::string opaque_str(opaque, opaque_len);
ASSERT_EQ(opaque_str, kExpectedOpaque);
}
XLA_REGISTER_CUSTOM_CALL_TARGET(Callback_Opaque, PLATFORM);
TEST_F(CustomCallTest, Opaque) {
XlaBuilder b(TestName());
CustomCall(&b, "Callback_Opaque", {},
ShapeUtil::MakeShape(F32, {}), kExpectedOpaque);
TF_ASSERT_OK(Execute(&b, {}).status());
}
void Callback_SubBuffers(se::gpu::GpuStreamHandle stream, void** buffers,
const char* , size_t ) {
auto err = gpuMemcpyAsync(buffers[4], buffers[3], 8 * sizeof(float),
gpuMemcpyDeviceToDevice, stream);
ASSERT_EQ(err, gpuSuccess);
err = gpuMemcpyAsync(buffers[5], buffers[0], 128 * sizeof(float),
gpuMemcpyDeviceToDevice, stream);
ASSERT_EQ(err, gpuSuccess);
err = gpuMemcpyAsync(buffers[6], buffers[1], 256 * sizeof(float),
gpuMemcpyDeviceToDevice, stream);
ASSERT_EQ(err, gpuSuccess);
err = gpuMemcpyAsync(buffers[7], buffers[2], 1024 * sizeof(float),
gpuMemcpyDeviceToDevice, stream);
ASSERT_EQ(err, gpuSuccess);
}
XLA_REGISTER_CUSTOM_CALL_TARGET(Callback_SubBuffers, PLATFORM);
TEST_F(CustomCallTest, SubBuffers) {
XlaBuilder b(TestName());
CustomCall(&b, "Callback_SubBuffers",
{
Tuple(&b,
{
Broadcast(ConstantR0WithType(&b, F32, 1), {128}),
Broadcast(ConstantR0WithType(&b, F32, 2), {256}),
}),
Tuple(&b,
{
Broadcast(ConstantR0WithType(&b, F32, 3), {1024}),
Broadcast(ConstantR0WithType(&b, F32, 4), {8}),
}),
},
ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShape(F32, {8}),
ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShape(F32, {128}),
ShapeUtil::MakeShape(F32, {256}),
}),
ShapeUtil::MakeShape(F32, {1024}),
}),
"");
TF_ASSERT_OK_AND_ASSIGN(auto result, ExecuteAndTransfer(&b, {}));
EXPECT_THAT(result.data<float>({0}), ::testing::Each(4));
EXPECT_THAT(result.data<float>({1, 0}), ::testing::Each(1));
EXPECT_THAT(result.data<float>({1, 1}), ::testing::Each(2));
EXPECT_THAT(result.data<float>({2}), ::testing::Each(3));
}
struct TokenTestCase {
std::string input;
std::string output;
std::string opaque;
};
std::ostream& operator<<(std::ostream& s, const TokenTestCase& tc) {
s << tc.input << "x" << tc.output << "x" << tc.opaque;
return s;
}
void Callback_Tokens(se::gpu::GpuStreamHandle stream, void** buffers,
const char* opaque, size_t opaque_len) {
for (int i = 0; i < opaque_len; ++i) {
char c = opaque[i];
ASSERT_TRUE(c == 'A' || c == 'T');
if (c == 'A') {
ASSERT_NE(buffers[i], nullptr);
} else {
ASSERT_EQ(buffers[i], nullptr);
}
}
}
XLA_REGISTER_CUSTOM_CALL_TARGET(Callback_Tokens, PLATFORM);
std::vector<TokenTestCase> GetTokenTestCases() {
return {{"{AT}{AT}", "{A{AT}A}", "ATATAATA"},
{"{A}", "T", "AT"},
{"{{T}}", "A", "TA"},
{"AA", "{TA}", "AATA"},
{"TA{TA{TA}}", "{AA}", "TATATAAA"}};
}
class CustomCallTokensTest
: public ::testing::WithParamInterface<TokenTestCase>,
public ClientLibraryTestBase {
public:
static std::vector<XlaOp> BuildInputs(XlaBuilder& b,
std::istringstream& str) {
std::vector<XlaOp> values;
while (!str.eof()) {
int ch = str.get();
if (ch == 'A') {
values.push_back(Broadcast(ConstantR0WithType(&b, F32, 1), {128}));
} else if (ch == 'T') {
values.push_back(CreateToken(&b));
} else if (ch == '{') {
std::vector<XlaOp> tuple_elements = BuildInputs(b, str);
values.push_back(Tuple(&b, tuple_elements));
} else if (ch == '}') {
break;
}
}
return values;
}
static std::vector<Shape> BuildOutputType(std::istringstream& str) {
std::vector<Shape> shapes;
while (!str.eof()) {
int ch = str.get();
if (ch == 'A') {
shapes.push_back(ShapeUtil::MakeShape(F32, {8}));
} else if (ch == 'T') {
shapes.push_back(ShapeUtil::MakeTokenShape());
} else if (ch == '{') {
std::vector<Shape> tuple_elements = BuildOutputType(str);
shapes.push_back(ShapeUtil::MakeTupleShape(tuple_elements));
} else if (ch == '}') {
break;
}
}
return shapes;
}
};
TEST_P(CustomCallTokensTest, TokensTest) {
const TokenTestCase& tc = GetParam();
XlaBuilder b("CustomCallTokens");
std::istringstream input(tc.input);
std::istringstream output(tc.output);
std::vector<XlaOp> call_inputs = BuildInputs(b, input);
std::vector<Shape> call_output = BuildOutputType(output);
ASSERT_EQ(call_output.size(), 1);
CustomCall(&b, "Callback_Tokens", call_inputs, call_output.front(),
tc.opaque);
TF_ASSERT_OK(Execute(&b, {}).status());
}
INSTANTIATE_TEST_CASE_P(CustomCallTokens, CustomCallTokensTest,
::testing::ValuesIn(GetTokenTestCases()));
void Callback_WithStatusSucceeded(se::gpu::GpuStreamHandle ,
void** , const char* ,
size_t ,
XlaCustomCallStatus* status) {
XlaCustomCallStatusSetSuccess(status);
}
XLA_REGISTER_CUSTOM_CALL_TARGET(Callback_WithStatusSucceeded, PLATFORM);
TEST_F(CustomCallTest, WithStatusSucceeded) {
XlaBuilder b(TestName());
CustomCall(
&b, "Callback_WithStatusSucceeded", {},
ShapeUtil::MakeShape(F32, {}), "",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_STATUS_RETURNING);
TF_ASSERT_OK(Execute(&b, {}).status());
}
void Callback_WithStatusFailed(se::gpu::GpuStreamHandle ,
void** , const char* ,
size_t ,
XlaCustomCallStatus* status) {
XlaCustomCallStatusSetFailure(status, "Failed", 6);
}
XLA_REGISTER_CUSTOM_CALL_TARGET(Callback_WithStatusFailed, PLATFORM);
TEST_F(CustomCallTest, WithStatusFailed) {
XlaBuilder b(TestName());
CustomCall(
&b, "Callback_WithStatusFailed", {},
ShapeUtil::MakeShape(F32, {}), "",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_STATUS_RETURNING);
auto status = Execute(&b, {}).status();
EXPECT_EQ(status.code(), absl::StatusCode::kInternal);
EXPECT_THAT(status.message(), ::testing::HasSubstr("Failed"));
}
static absl::Status AlwaysFail(ffi::Result<ffi::AnyBuffer>, int32_t value) {
return absl::InternalError(absl::StrCat("Uh oh, wrong value: ", value));
}
XLA_FFI_DEFINE_HANDLER(kAlwaysFail, AlwaysFail,
ffi::Ffi::Bind()
.Ret<ffi::AnyBuffer>()
.Attr<int32_t>("value")
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$always_fail",
PLATFORM, kAlwaysFail);
TEST_F(CustomCallTest, RuntimeCustomCallAlwaysFail) {
XlaBuilder b(TestName());
CustomCall(&b, "__xla_test$$always_fail", {},
ShapeUtil::MakeShape(F32, {}), "{value = 42 : i32}",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
auto status = Execute(&b, {}).status();
EXPECT_EQ(status.code(), absl::StatusCode::kInternal);
EXPECT_THAT(status.message(), ::testing::HasSubstr("Uh oh, wrong value: 42"));
}
static absl::Status Memcpy(se::Stream* stream, ffi::AnyBuffer src,
ffi::Result<ffi::AnyBuffer> dst) {
se::DeviceMemoryBase dst_mem = dst->device_memory();
se::DeviceMemoryBase src_mem = src.device_memory();
return stream->MemcpyD2D(&dst_mem, src_mem, src_mem.size());
}
XLA_FFI_DEFINE_HANDLER(kMemcpy, Memcpy,
ffi::Ffi::Bind()
.Ctx<ffi::Stream>()
.Arg<ffi::AnyBuffer>()
.Ret<ffi::AnyBuffer>()
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$memcpy", PLATFORM,
kMemcpy);
TEST_F(CustomCallTest, ExportedFfiMemcpy) {
XlaBuilder b(TestName());
CustomCall(&b, "__xla_test$$memcpy",
{Broadcast(ConstantR0WithType(&b, F32, 42.0), {128})},
ShapeUtil::MakeShape(F32, {128}), "",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
TF_ASSERT_OK_AND_ASSIGN(auto result, ExecuteAndTransfer(&b, {}));
EXPECT_THAT(result.data<float>(), ::testing::Each(42));
}
static absl::Status HandleUserPointer(ffi::Result<ffi::AnyBuffer>,
const std::string* str) {
return absl::InternalError(*str);
}
XLA_FFI_DEFINE_HANDLER(kHandleUserPointer, HandleUserPointer,
ffi::Ffi::Bind()
.Ret<ffi::AnyBuffer>()
.Attr<ffi::Pointer<std::string>>("message"));
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$user_data", PLATFORM,
kHandleUserPointer);
TEST_F(CustomCallTest, PassUserPointerWithAttrs) {
std::string message = "User-defined message";
auto ptr = reinterpret_cast<uintptr_t>(&message);
XlaBuilder b(TestName());
CustomCall(&b, "__xla_test$$user_data", {},
ShapeUtil::MakeShape(F32, {}),
absl::StrFormat("{message = %d : i64}", ptr),
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
auto status = Execute(&b, {}).status();
EXPECT_EQ(status.code(), absl::StatusCode::kInternal);
EXPECT_THAT(status.message(), ::testing::HasSubstr("User-defined message"));
}
bool is_ffi_invoked = false;
static absl::Status IsInvoked(ffi::Result<ffi::AnyBuffer>) {
is_ffi_invoked = true;
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(
kIsInvoked, IsInvoked,
ffi::Ffi::Bind().Ret<ffi::AnyBuffer>());
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$isinvoked", PLATFORM,
kIsInvoked);
TEST_F(CustomCallTest, ExportedFfiIsInvoked) {
XlaBuilder b(TestName());
CustomCall(&b, "__xla_test$$isinvoked", {},
ShapeUtil::MakeShape(F32, {}), "",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
TF_ASSERT_OK_AND_ASSIGN(auto result, ExecuteAndTransfer(&b, {}));
EXPECT_TRUE(is_ffi_invoked);
}
TEST_F(CustomCallTest, ExportedFfiUnknownTarget) {
XlaBuilder b(TestName());
CustomCall(&b, "__xla_test$$unknown_target", {},
ShapeUtil::MakeShape(F32, {}), "",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
auto status = Execute(&b, {}).status();
EXPECT_EQ(status.code(), absl::StatusCode::kUnimplemented);
EXPECT_THAT(status.message(),
::testing::HasSubstr("No registered implementation"));
}
static absl::Status Opaque(ffi::Result<ffi::AnyBuffer>,
const std::string* str) {
std::string opaque(*str);
if (opaque != kExpectedOpaque)
return absl::InternalError(absl::StrFormat(
"Opaque string does not match. Expected `%s` but got `%s`",
kExpectedOpaque, opaque));
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kOpaque, Opaque,
ffi::Ffi::Bind()
.Ret<ffi::AnyBuffer>()
.Attr<ffi::Pointer<std::string>>("opaque"));
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$opaque", PLATFORM,
kOpaque);
TEST_F(CustomCallTest, ExportedFfiOpaque) {
XlaBuilder b(TestName());
const std::string opaque = absl::StrFormat(
"{opaque = %d : i64}", reinterpret_cast<uintptr_t>(&kExpectedOpaque));
CustomCall(&b, "__xla_test$$opaque", {},
ShapeUtil::MakeShape(F32, {}),
opaque,
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
TF_ASSERT_OK(Execute(&b, {}).status());
}
static absl::Status CheckTokens(std::vector<PrimitiveType> args,
std::string_view pattern) {
if (args.size() != pattern.size()) {
return absl::InternalError("Incorrect number of arguments");
}
for (auto i = 0; i < pattern.size(); ++i) {
char c = pattern[i];
bool is_token = args[i] == PrimitiveType::TOKEN;
if (c == 'T') {
if (!is_token) {
return absl::InvalidArgumentError(
absl::StrFormat("Expected token at position %d", i));
}
} else if (c == 'A') {
if (is_token) {
return absl::InvalidArgumentError(
absl::StrFormat("Unexpected token at position %d", i));
}
} else {
return absl::InternalError(
absl::StrFormat("Unexpected character %c at position %d", c, i));
}
}
return absl::OkStatus();
}
static absl::Status FfiTokens(ffi::RemainingArgs inputs,
ffi::RemainingRets outputs,
std::string_view pattern) {
std::vector<PrimitiveType> types;
for (auto i = 0; i < inputs.size(); ++i) {
types.push_back(inputs.get<ffi::AnyBuffer>(i).value().element_type());
}
for (auto i = 0; i < outputs.size(); ++i) {
types.push_back(outputs.get<ffi::AnyBuffer>(i).value()->element_type());
}
return CheckTokens(types, pattern);
}
XLA_FFI_DEFINE_HANDLER(
kFfiTokens, FfiTokens,
ffi::Ffi::Bind().RemainingArgs().RemainingRets().Attr<std::string_view>(
"pattern"));
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$tokens", PLATFORM,
kFfiTokens);
TEST_P(CustomCallTokensTest, ExportedTokensTest) {
const TokenTestCase& tc = GetParam();
XlaBuilder b(TestName());
std::istringstream input(tc.input);
std::istringstream output(tc.output);
std::vector<XlaOp> call_inputs = BuildInputs(b, input);
std::vector<Shape> call_output = BuildOutputType(output);
ASSERT_GE(call_inputs.size(), 1);
ASSERT_LE(call_inputs.size(), 3);
ASSERT_EQ(call_output.size(), 1);
const std::string custom_call_name = "__xla_test$$tokens";
const std::string opaque = absl::StrFormat("{pattern = \"%s\"}", tc.opaque);
CustomCall(&b, custom_call_name, call_inputs,
call_output.front(),
opaque,
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
TF_ASSERT_OK(Execute(&b, {}).status());
}
INSTANTIATE_TEST_SUITE_P(CustomCallTokensTest, CustomCallTokensTest,
::testing::ValuesIn(GetTokenTestCases()));
static absl::Status AlwaysSucceed(ffi::Result<ffi::AnyBuffer>) {
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kAlwaysSucceed, AlwaysSucceed,
ffi::Ffi::Bind().Ret<ffi::AnyBuffer>());
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$always_succeed",
PLATFORM, kAlwaysSucceed);
TEST_F(CustomCallTest, ExportedFfiWithStatusSucceeded) {
XlaBuilder b(TestName());
CustomCall(&b, "__xla_test$$always_succeed", {},
ShapeUtil::MakeShape(F32, {}), "",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
TF_ASSERT_OK(Execute(&b, {}).status());
}
static absl::Status FfiAttributes(ffi::Result<ffi::AnyBuffer>,
absl::Span<const int32_t> i32_arr,
Range range) {
if (i32_arr.size() != 4)
return absl::InternalError("i32_arr size does not match");
if (i32_arr[0] != 1 || i32_arr[1] != 2 || i32_arr[2] != 3 || i32_arr[3] != 4)
return absl::InternalError("i32_arr values do not match");
if (range.lo != 0 || range.hi != 42) {
return absl::InternalError("range values do not match");
}
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kFfiAttributes, FfiAttributes,
ffi::Ffi::Bind()
.Ret<ffi::AnyBuffer>()
.Attr<absl::Span<const int32_t>>("i32_arr")
.Attr<Range>("range"));
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "xla.gpu.ffi_attributes",
PLATFORM, kFfiAttributes);
TEST_F(CustomCallTest, FfiAttributes) {
XlaBuilder b(TestName());
CustomCall(&b, "xla.gpu.ffi_attributes", {},
ShapeUtil::MakeShape(F32, {}),
"{ i32_arr = array<i32: 1, 2, 3, 4>,"
" range = { lo = 0 : i64, hi = 42 : i64 } }",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
TF_ASSERT_OK(Execute(&b, {}).status());
}
static absl::Status MemcpyWithCalledComputation(
se::Stream* stream, int32_t device_ordinal,
se::DeviceMemoryAllocator* allocator,
se::OwningScratchAllocator<> scratch_allocator, ffi::AnyBuffer src,
ffi::Result<ffi::AnyBuffer> dst, const HloComputation* called_computation) {
if (called_computation == nullptr)
return absl::InternalError("Called computation is not defined");
if (called_computation->instruction_count() != 1)
return absl::InternalError("Unexpected number of instructions");
if (!DynCast<HloParameterInstruction>(called_computation->root_instruction()))
return absl::InternalError("ROOT must be a paremeter");
auto scratch = scratch_allocator.AllocateBytes(1024);
if (!scratch.ok()) return scratch.status();
return Memcpy(stream, src, dst);
}
XLA_FFI_DEFINE_HANDLER(kMemcpyWithCalledComputation,
MemcpyWithCalledComputation,
ffi::Ffi::Bind()
.Ctx<ffi::Stream>()
.Ctx<ffi::DeviceOrdinal>()
.Ctx<ffi::Allocator>()
.Ctx<ffi::ScratchAllocator>()
.Arg<ffi::AnyBuffer>()
.Ret<ffi::AnyBuffer>()
.Ctx<ffi::CalledComputation>());
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(),
"xla.gpu.ext.memcpy_with_called_computation", PLATFORM,
kMemcpyWithCalledComputation);
TEST_F(CustomCallTest, WithCalledComputation) {
auto shape = ShapeUtil::MakeShape(F32, {128});
XlaBuilder copy("copy");
auto p0 = Parameter(©, 0, shape, "l_val");
Copy(p0);
auto copy_computation = copy.Build().value();
XlaBuilder b(TestName());
CustomCallWithComputation(
&b, "xla.gpu.ext.memcpy_with_called_computation",
{Broadcast(ConstantR0WithType(&b, F32, 42.0), {128})},
copy_computation, shape, "",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
TF_ASSERT_OK_AND_ASSIGN(auto result, ExecuteAndTransfer(&b, {}));
EXPECT_THAT(result.data<float>(), ::testing::Each(42));
}
struct SomeExtraContext {
explicit SomeExtraContext(int32_t value) : value(value) {}
int32_t value;
bool prepared = false;
bool initialized = false;
bool executed = false;
};
template <ffi::ExecutionStage stage>
static absl::Status ExecutionContext(ffi::Result<ffi::AnyBuffer>,
SomeExtraContext* ctx) {
if (ctx->value != 42) return absl::InternalError("Unexpected value");
if constexpr (stage == ffi::ExecutionStage::kPrepare) {
ctx->prepared = true;
} else if constexpr (stage == ffi::ExecutionStage::kInitialize) {
ctx->initialized = true;
} else if constexpr (stage == ffi::ExecutionStage::kExecute) {
ctx->executed = true;
} else {
return absl::InternalError("Unexpected stage");
}
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kExecutionContextPrepare,
ExecutionContext<ffi::ExecutionStage::kPrepare>,
ffi::Ffi::Bind<ffi::ExecutionStage::kPrepare>()
.Ret<ffi::AnyBuffer>()
.Ctx<ffi::UserData<SomeExtraContext>>());
XLA_FFI_DEFINE_HANDLER(kExecutionContextInitialize,
ExecutionContext<ffi::ExecutionStage::kInitialize>,
ffi::Ffi::Bind<ffi::ExecutionStage::kInitialize>()
.Ret<ffi::AnyBuffer>()
.Ctx<ffi::UserData<SomeExtraContext>>());
XLA_FFI_DEFINE_HANDLER(kExecutionContextExecute,
ExecutionContext<ffi::ExecutionStage::kExecute>,
ffi::Ffi::Bind<ffi::ExecutionStage::kExecute>()
.Ret<ffi::AnyBuffer>()
.Ctx<ffi::UserData<SomeExtraContext>>());
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "xla.gpu.ffi_execution_context",
PLATFORM,
{
nullptr,
kExecutionContextPrepare,
kExecutionContextInitialize,
kExecutionContextExecute,
});
TEST_F(CustomCallTest, FfiExecutionContext) {
XlaBuilder b(TestName());
CustomCall(&b, "xla.gpu.ffi_execution_context", {},
ShapeUtil::MakeShape(F32, {}),
"",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
ffi::ExecutionContext execution_context;
TF_ASSERT_OK(execution_context.Emplace<SomeExtraContext>(42));
ffi::internal::ScopedExecutionContext scoped_execution_context(
&execution_context);
TF_ASSERT_OK(Execute(&b, {}).status());
TF_ASSERT_OK_AND_ASSIGN(auto* user_context,
execution_context.Lookup<SomeExtraContext>());
EXPECT_TRUE(user_context->prepared);
EXPECT_TRUE(user_context->initialized);
EXPECT_TRUE(user_context->executed);
}
struct SomeState {
explicit SomeState(int32_t value) : value(value) {}
int32_t value = 0;
};
static absl::StatusOr<std::unique_ptr<SomeState>> InstantiateState() {
return std::make_unique<SomeState>(42);
}
static absl::Status GetState(ffi::Result<ffi::AnyBuffer>, SomeState* state) {
if (state->value != 42) {
return absl::InternalError("Unexpected value");
}
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kInstantiateState, InstantiateState,
ffi::Ffi::BindInstantiate());
XLA_FFI_DEFINE_HANDLER(
kGetState, GetState,
ffi::Ffi::Bind().Ret<ffi::AnyBuffer>().Ctx<ffi::State<SomeState>>());
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "xla.gpu.ffi_execution_state",
PLATFORM,
{
kInstantiateState,
nullptr,
nullptr,
kGetState,
});
TEST_F(CustomCallTest, FfiExecutionState) {
XlaBuilder b(TestName());
CustomCall(&b, "xla.gpu.ffi_execution_state", {},
ShapeUtil::MakeShape(F32, {}),
"",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
TF_ASSERT_OK(Execute(&b, {}).status());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/custom_call.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/custom_call_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f7a41391-75c0-4b84-9f2d-3a731b52a23c | cpp | google/quiche | header_validator | quiche/http2/adapter/header_validator.cc | quiche/http2/adapter/header_validator_test.cc | #include "quiche/http2/adapter/header_validator.h"
#include <array>
#include <bitset>
#include <string>
#include "absl/strings/ascii.h"
#include "absl/strings/escaping.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "quiche/http2/adapter/header_validator_base.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
namespace adapter {
namespace {
constexpr absl::string_view kHttpTokenChars =
"!#$%&'*+-.^_`|~0123456789"
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
constexpr absl::string_view kHttp2HeaderNameAllowedChars =
"!#$%&'*+-.0123456789"
"^_`abcdefghijklmnopqrstuvwxyz|~";
constexpr absl::string_view kHttp2HeaderValueAllowedChars =
"\t "
"!\"#$%&'()*+,-./"
"0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`"
"abcdefghijklmnopqrstuvwxyz{|}~";
constexpr absl::string_view kHttp2StatusValueAllowedChars = "0123456789";
constexpr absl::string_view kValidAuthorityChars =
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-._~%!$&'()["
"]*+,;=:";
constexpr absl::string_view kValidPathChars =
"/abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-._~%!$&'()"
"*+,;=:@?";
constexpr absl::string_view kValidPathCharsWithFragment =
"/abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-._~%!$&'()"
"*+,;=:@?#";
using CharMap = std::array<bool, 256>;
constexpr CharMap BuildValidCharMap(absl::string_view valid_chars) {
CharMap map = {};
for (char c : valid_chars) {
map[static_cast<uint8_t>(c)] = true;
}
return map;
}
constexpr CharMap AllowObsText(CharMap map) {
for (uint8_t c = 0xff; c >= 0x80; --c) {
map[c] = true;
}
return map;
}
bool AllCharsInMap(absl::string_view str, const CharMap& map) {
for (char c : str) {
if (!map[static_cast<uint8_t>(c)]) {
return false;
}
}
return true;
}
bool IsValidStatus(absl::string_view status) {
static constexpr CharMap valid_chars =
BuildValidCharMap(kHttp2StatusValueAllowedChars);
return AllCharsInMap(status, valid_chars);
}
bool IsValidMethod(absl::string_view method) {
static constexpr CharMap valid_chars = BuildValidCharMap(kHttpTokenChars);
return AllCharsInMap(method, valid_chars);
}
}
void HeaderValidator::StartHeaderBlock() {
HeaderValidatorBase::StartHeaderBlock();
pseudo_headers_.reset();
pseudo_header_state_.reset();
authority_.clear();
}
void HeaderValidator::RecordPseudoHeader(PseudoHeaderTag tag) {
if (pseudo_headers_[tag]) {
pseudo_headers_[TAG_UNKNOWN_EXTRA] = true;
} else {
pseudo_headers_[tag] = true;
}
}
HeaderValidator::HeaderStatus HeaderValidator::ValidateSingleHeader(
absl::string_view key, absl::string_view value) {
if (key.empty()) {
return HEADER_FIELD_INVALID;
}
if (max_field_size_.has_value() &&
key.size() + value.size() > *max_field_size_) {
QUICHE_VLOG(2) << "Header field size is " << key.size() + value.size()
<< ", exceeds max size of " << *max_field_size_;
return HEADER_FIELD_TOO_LONG;
}
if (key[0] == ':') {
key.remove_prefix(1);
if (key == "status") {
if (value.size() != 3 || !IsValidStatus(value)) {
QUICHE_VLOG(2) << "malformed status value: [" << absl::CEscape(value)
<< "]";
return HEADER_FIELD_INVALID;
}
if (value == "101") {
return HEADER_FIELD_INVALID;
}
status_ = std::string(value);
RecordPseudoHeader(TAG_STATUS);
} else if (key == "method") {
if (value == "OPTIONS") {
pseudo_header_state_[STATE_METHOD_IS_OPTIONS] = true;
} else if (value == "CONNECT") {
pseudo_header_state_[STATE_METHOD_IS_CONNECT] = true;
} else if (!IsValidMethod(value)) {
return HEADER_FIELD_INVALID;
}
RecordPseudoHeader(TAG_METHOD);
} else if (key == "authority") {
if (!ValidateAndSetAuthority(value)) {
return HEADER_FIELD_INVALID;
}
RecordPseudoHeader(TAG_AUTHORITY);
} else if (key == "path") {
if (value == "*") {
pseudo_header_state_[STATE_PATH_IS_STAR] = true;
} else if (value.empty()) {
pseudo_header_state_[STATE_PATH_IS_EMPTY] = true;
return HEADER_FIELD_INVALID;
} else if (validate_path_ &&
!IsValidPath(value, allow_fragment_in_path_)) {
return HEADER_FIELD_INVALID;
}
if (value[0] == '/') {
pseudo_header_state_[STATE_PATH_INITIAL_SLASH] = true;
}
RecordPseudoHeader(TAG_PATH);
} else if (key == "protocol") {
RecordPseudoHeader(TAG_PROTOCOL);
} else if (key == "scheme") {
RecordPseudoHeader(TAG_SCHEME);
} else {
pseudo_headers_[TAG_UNKNOWN_EXTRA] = true;
if (!IsValidHeaderName(key)) {
QUICHE_VLOG(2) << "invalid chars in header name: ["
<< absl::CEscape(key) << "]";
return HEADER_FIELD_INVALID;
}
}
if (!IsValidHeaderValue(value, obs_text_option_)) {
QUICHE_VLOG(2) << "invalid chars in header value: ["
<< absl::CEscape(value) << "]";
return HEADER_FIELD_INVALID;
}
} else {
std::string lowercase_key;
if (allow_uppercase_in_header_names_) {
lowercase_key = absl::AsciiStrToLower(key);
key = lowercase_key;
}
if (!IsValidHeaderName(key)) {
QUICHE_VLOG(2) << "invalid chars in header name: [" << absl::CEscape(key)
<< "]";
return HEADER_FIELD_INVALID;
}
if (!IsValidHeaderValue(value, obs_text_option_)) {
QUICHE_VLOG(2) << "invalid chars in header value: ["
<< absl::CEscape(value) << "]";
return HEADER_FIELD_INVALID;
}
if (key == "host") {
if (pseudo_headers_[TAG_STATUS]) {
} else {
if (!ValidateAndSetAuthority(value)) {
return HEADER_FIELD_INVALID;
}
pseudo_headers_[TAG_AUTHORITY] = true;
}
} else if (key == "content-length") {
const ContentLengthStatus status = HandleContentLength(value);
switch (status) {
case CONTENT_LENGTH_ERROR:
return HEADER_FIELD_INVALID;
case CONTENT_LENGTH_SKIP:
return HEADER_SKIP;
case CONTENT_LENGTH_OK:
return HEADER_OK;
default:
return HEADER_FIELD_INVALID;
}
} else if (key == "te" && value != "trailers") {
return HEADER_FIELD_INVALID;
} else if (key == "upgrade" || GetInvalidHttp2HeaderSet().contains(key)) {
return HEADER_FIELD_INVALID;
}
}
return HEADER_OK;
}
bool HeaderValidator::FinishHeaderBlock(HeaderType type) {
switch (type) {
case HeaderType::REQUEST:
return ValidateRequestHeaders(pseudo_headers_, pseudo_header_state_,
allow_extended_connect_);
case HeaderType::REQUEST_TRAILER:
return ValidateRequestTrailers(pseudo_headers_);
case HeaderType::RESPONSE_100:
case HeaderType::RESPONSE:
return ValidateResponseHeaders(pseudo_headers_);
case HeaderType::RESPONSE_TRAILER:
return ValidateResponseTrailers(pseudo_headers_);
}
return false;
}
bool HeaderValidator::IsValidHeaderName(absl::string_view name) {
static constexpr CharMap valid_chars =
BuildValidCharMap(kHttp2HeaderNameAllowedChars);
return AllCharsInMap(name, valid_chars);
}
bool HeaderValidator::IsValidHeaderValue(absl::string_view value,
ObsTextOption option) {
static constexpr CharMap valid_chars =
BuildValidCharMap(kHttp2HeaderValueAllowedChars);
static constexpr CharMap valid_chars_with_obs_text =
AllowObsText(BuildValidCharMap(kHttp2HeaderValueAllowedChars));
return AllCharsInMap(value, option == ObsTextOption::kAllow
? valid_chars_with_obs_text
: valid_chars);
}
bool HeaderValidator::IsValidAuthority(absl::string_view authority) {
static constexpr CharMap valid_chars =
BuildValidCharMap(kValidAuthorityChars);
return AllCharsInMap(authority, valid_chars);
}
bool HeaderValidator::IsValidPath(absl::string_view path, bool allow_fragment) {
static constexpr CharMap valid_chars = BuildValidCharMap(kValidPathChars);
static constexpr CharMap valid_chars_with_fragment =
BuildValidCharMap(kValidPathCharsWithFragment);
if (allow_fragment) {
return AllCharsInMap(path, valid_chars_with_fragment);
} else {
return AllCharsInMap(path, valid_chars);
}
}
HeaderValidator::ContentLengthStatus HeaderValidator::HandleContentLength(
absl::string_view value) {
if (value.empty()) {
return CONTENT_LENGTH_ERROR;
}
if (status_ == "204" && value != "0") {
return CONTENT_LENGTH_ERROR;
}
if (!status_.empty() && status_[0] == '1' && value != "0") {
return CONTENT_LENGTH_ERROR;
}
size_t content_length = 0;
const bool valid = absl::SimpleAtoi(value, &content_length);
if (!valid) {
return CONTENT_LENGTH_ERROR;
}
if (content_length_.has_value()) {
return content_length == *content_length_ ? CONTENT_LENGTH_SKIP
: CONTENT_LENGTH_ERROR;
}
content_length_ = content_length;
return CONTENT_LENGTH_OK;
}
bool HeaderValidator::ValidateAndSetAuthority(absl::string_view authority) {
if (!IsValidAuthority(authority)) {
return false;
}
if (!allow_different_host_and_authority_ && pseudo_headers_[TAG_AUTHORITY] &&
authority != authority_) {
return false;
}
if (!authority.empty()) {
pseudo_header_state_[STATE_AUTHORITY_IS_NONEMPTY] = true;
if (authority_.empty()) {
authority_ = authority;
} else {
absl::StrAppend(&authority_, ", ", authority);
}
}
return true;
}
bool HeaderValidator::ValidateRequestHeaders(
const PseudoHeaderTagSet& pseudo_headers,
const PseudoHeaderStateSet& pseudo_header_state,
bool allow_extended_connect) {
QUICHE_VLOG(2) << "Request pseudo-headers: [" << pseudo_headers
<< "], pseudo_header_state: [" << pseudo_header_state
<< "], allow_extended_connect: " << allow_extended_connect;
if (pseudo_header_state[STATE_METHOD_IS_CONNECT]) {
if (allow_extended_connect) {
static const auto* kExtendedConnectHeaders =
new PseudoHeaderTagSet(0b0011111);
if (pseudo_headers == *kExtendedConnectHeaders) {
return true;
}
}
static const auto* kConnectHeaders = new PseudoHeaderTagSet(0b0000011);
return pseudo_header_state[STATE_AUTHORITY_IS_NONEMPTY] &&
pseudo_headers == *kConnectHeaders;
}
if (pseudo_header_state[STATE_PATH_IS_EMPTY]) {
return false;
}
if (pseudo_header_state[STATE_PATH_IS_STAR]) {
if (!pseudo_header_state[STATE_METHOD_IS_OPTIONS]) {
return false;
}
} else if (!pseudo_header_state[STATE_PATH_INITIAL_SLASH]) {
return false;
}
static const auto* kRequiredHeaders = new PseudoHeaderTagSet(0b0010111);
return pseudo_headers == *kRequiredHeaders;
}
bool HeaderValidator::ValidateRequestTrailers(
const PseudoHeaderTagSet& pseudo_headers) {
return pseudo_headers.none();
}
bool HeaderValidator::ValidateResponseHeaders(
const PseudoHeaderTagSet& pseudo_headers) {
static const auto* kRequiredHeaders = new PseudoHeaderTagSet(0b0100000);
return pseudo_headers == *kRequiredHeaders;
}
bool HeaderValidator::ValidateResponseTrailers(
const PseudoHeaderTagSet& pseudo_headers) {
return pseudo_headers.none();
}
}
} | #include "quiche/http2/adapter/header_validator.h"
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace adapter {
namespace test {
using ::testing::Optional;
using Header = std::pair<absl::string_view, absl::string_view>;
constexpr Header kSampleRequestPseudoheaders[] = {{":authority", "www.foo.com"},
{":method", "GET"},
{":path", "/foo"},
{":scheme", "https"}};
TEST(HeaderValidatorTest, HeaderNameEmpty) {
HeaderValidator v;
HeaderValidator::HeaderStatus status = v.ValidateSingleHeader("", "value");
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID, status);
}
TEST(HeaderValidatorTest, HeaderValueEmpty) {
HeaderValidator v;
HeaderValidator::HeaderStatus status = v.ValidateSingleHeader("name", "");
EXPECT_EQ(HeaderValidator::HEADER_OK, status);
}
TEST(HeaderValidatorTest, ExceedsMaxSize) {
HeaderValidator v;
v.SetMaxFieldSize(64u);
HeaderValidator::HeaderStatus status =
v.ValidateSingleHeader("name", "value");
EXPECT_EQ(HeaderValidator::HEADER_OK, status);
status = v.ValidateSingleHeader(
"name2",
"Antidisestablishmentariansism is supercalifragilisticexpialodocious.");
EXPECT_EQ(HeaderValidator::HEADER_FIELD_TOO_LONG, status);
}
TEST(HeaderValidatorTest, NameHasInvalidChar) {
HeaderValidator v;
for (const bool is_pseudo_header : {true, false}) {
for (const char* c : {"!", "3", "a", "_", "|", "~"}) {
const std::string name = is_pseudo_header ? absl::StrCat(":met", c, "hod")
: absl::StrCat("na", c, "me");
HeaderValidator::HeaderStatus status =
v.ValidateSingleHeader(name, "value");
EXPECT_EQ(HeaderValidator::HEADER_OK, status);
}
for (const char* c : {"\\", "<", ";", "[", "=", " ", "\r", "\n", ",", "\"",
"\x1F", "\x91"}) {
const std::string name = is_pseudo_header ? absl::StrCat(":met", c, "hod")
: absl::StrCat("na", c, "me");
HeaderValidator::HeaderStatus status =
v.ValidateSingleHeader(name, "value");
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID, status)
<< "with name [" << name << "]";
}
{
const absl::string_view name = is_pseudo_header
? absl::string_view(":met\0hod", 8)
: absl::string_view("na\0me", 5);
HeaderValidator::HeaderStatus status =
v.ValidateSingleHeader(name, "value");
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID, status);
}
const std::string uc_name = is_pseudo_header ? ":Method" : "Name";
HeaderValidator::HeaderStatus status =
v.ValidateSingleHeader(uc_name, "value");
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID, status);
}
}
TEST(HeaderValidatorTest, ValueHasInvalidChar) {
HeaderValidator v;
for (const char* c :
{"!", "3", "a", "_", "|", "~", "\\", "<", ";", "[", "=", "A", "\t"}) {
const std::string value = absl::StrCat("val", c, "ue");
EXPECT_TRUE(
HeaderValidator::IsValidHeaderValue(value, ObsTextOption::kDisallow));
HeaderValidator::HeaderStatus status =
v.ValidateSingleHeader("name", value);
EXPECT_EQ(HeaderValidator::HEADER_OK, status);
}
for (const char* c : {"\r", "\n"}) {
const std::string value = absl::StrCat("val", c, "ue");
EXPECT_FALSE(
HeaderValidator::IsValidHeaderValue(value, ObsTextOption::kDisallow));
HeaderValidator::HeaderStatus status =
v.ValidateSingleHeader("name", value);
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID, status);
}
{
const std::string value("val\0ue", 6);
EXPECT_FALSE(
HeaderValidator::IsValidHeaderValue(value, ObsTextOption::kDisallow));
HeaderValidator::HeaderStatus status =
v.ValidateSingleHeader("name", value);
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID, status);
}
{
const std::string obs_text_value = "val\xa9ue";
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
v.ValidateSingleHeader("name", obs_text_value));
v.SetObsTextOption(ObsTextOption::kDisallow);
EXPECT_FALSE(HeaderValidator::IsValidHeaderValue(obs_text_value,
ObsTextOption::kDisallow));
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
v.ValidateSingleHeader("name", obs_text_value));
v.SetObsTextOption(ObsTextOption::kAllow);
EXPECT_TRUE(HeaderValidator::IsValidHeaderValue(obs_text_value,
ObsTextOption::kAllow));
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("name", obs_text_value));
}
}
TEST(HeaderValidatorTest, StatusHasInvalidChar) {
HeaderValidator v;
for (HeaderType type : {HeaderType::RESPONSE, HeaderType::RESPONSE_100}) {
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
v.ValidateSingleHeader(":status", "bar"));
EXPECT_FALSE(v.FinishHeaderBlock(type));
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
v.ValidateSingleHeader(":status", "10"));
EXPECT_FALSE(v.FinishHeaderBlock(type));
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
v.ValidateSingleHeader(":status", "9000"));
EXPECT_FALSE(v.FinishHeaderBlock(type));
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "400"));
EXPECT_TRUE(v.FinishHeaderBlock(type));
}
}
TEST(HeaderValidatorTest, AuthorityHasInvalidChar) {
for (absl::string_view key : {":authority", "host"}) {
for (const absl::string_view c : {"1", "-", "!", ":", "+", "=", ","}) {
const std::string value = absl::StrCat("ho", c, "st.example.com");
EXPECT_TRUE(HeaderValidator::IsValidAuthority(value));
HeaderValidator v;
v.StartHeaderBlock();
HeaderValidator::HeaderStatus status = v.ValidateSingleHeader(key, value);
EXPECT_EQ(HeaderValidator::HEADER_OK, status)
<< " with name [" << key << "] and value [" << value << "]";
}
for (const absl::string_view c : {"\r", "\n", "|", "\\", "`"}) {
const std::string value = absl::StrCat("ho", c, "st.example.com");
EXPECT_FALSE(HeaderValidator::IsValidAuthority(value));
HeaderValidator v;
v.StartHeaderBlock();
HeaderValidator::HeaderStatus status = v.ValidateSingleHeader(key, value);
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID, status);
}
{
const std::string value = "123.45.67.89";
EXPECT_TRUE(HeaderValidator::IsValidAuthority(value));
HeaderValidator v;
v.StartHeaderBlock();
HeaderValidator::HeaderStatus status = v.ValidateSingleHeader(key, value);
EXPECT_EQ(HeaderValidator::HEADER_OK, status);
}
{
const std::string value1 = "2001:0db8:85a3:0000:0000:8a2e:0370:7334";
EXPECT_TRUE(HeaderValidator::IsValidAuthority(value1));
HeaderValidator v;
v.StartHeaderBlock();
HeaderValidator::HeaderStatus status =
v.ValidateSingleHeader(key, value1);
EXPECT_EQ(HeaderValidator::HEADER_OK, status);
const std::string value2 = "[::1]:80";
EXPECT_TRUE(HeaderValidator::IsValidAuthority(value2));
HeaderValidator v2;
v2.StartHeaderBlock();
status = v2.ValidateSingleHeader(key, value2);
EXPECT_EQ(HeaderValidator::HEADER_OK, status);
}
{
EXPECT_TRUE(HeaderValidator::IsValidAuthority(""));
HeaderValidator v;
v.StartHeaderBlock();
HeaderValidator::HeaderStatus status = v.ValidateSingleHeader(key, "");
EXPECT_EQ(HeaderValidator::HEADER_OK, status);
}
}
}
TEST(HeaderValidatorTest, RequestHostAndAuthority) {
HeaderValidator v;
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("host", "www.foo.com"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
v.ValidateSingleHeader("host", "www.bar.com"));
}
TEST(HeaderValidatorTest, RequestHostAndAuthorityLax) {
HeaderValidator v;
v.SetAllowDifferentHostAndAuthority();
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("host", "www.bar.com"));
}
TEST(HeaderValidatorTest, MethodHasInvalidChar) {
HeaderValidator v;
v.StartHeaderBlock();
std::vector<absl::string_view> bad_methods = {
"In[]valid{}", "co,mma", "spac e", "a@t", "equals=",
"question?mark", "co:lon", "semi;colon", "sla/sh", "back\\slash",
};
std::vector<absl::string_view> good_methods = {
"lowercase", "MiXeDcAsE", "NONCANONICAL", "HASH#",
"under_score", "PI|PE", "Tilde~", "quote'",
};
for (absl::string_view value : bad_methods) {
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
v.ValidateSingleHeader(":method", value));
}
for (absl::string_view value : good_methods) {
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":method", value));
for (Header to_add : kSampleRequestPseudoheaders) {
if (to_add.first == ":method") {
continue;
}
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
}
}
TEST(HeaderValidatorTest, RequestPseudoHeaders) {
HeaderValidator v;
for (Header to_skip : kSampleRequestPseudoheaders) {
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
if (to_add != to_skip) {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
}
EXPECT_FALSE(v.FinishHeaderBlock(HeaderType::REQUEST));
}
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":extra", "blah"));
EXPECT_FALSE(v.FinishHeaderBlock(HeaderType::REQUEST));
for (Header to_repeat : kSampleRequestPseudoheaders) {
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
if (to_add == to_repeat) {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
}
EXPECT_FALSE(v.FinishHeaderBlock(HeaderType::REQUEST));
}
}
TEST(HeaderValidatorTest, ConnectHeaders) {
HeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":authority", "athena.dialup.mit.edu:23"));
EXPECT_FALSE(v.FinishHeaderBlock(HeaderType::REQUEST));
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":method", "CONNECT"));
EXPECT_FALSE(v.FinishHeaderBlock(HeaderType::REQUEST));
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":authority", "athena.dialup.mit.edu:23"));
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":method", "CONNECT"));
EXPECT_EQ(HeaderValidator::HEADER_OK, v.ValidateSingleHeader(":path", "/"));
EXPECT_FALSE(v.FinishHeaderBlock(HeaderType::REQUEST));
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":authority", ""));
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":method", "CONNECT"));
EXPECT_FALSE(v.FinishHeaderBlock(HeaderType::REQUEST));
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":authority", "athena.dialup.mit.edu:23"));
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":method", "CONNECT"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
v.SetAllowExtendedConnect();
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":authority", "athena.dialup.mit.edu:23"));
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":method", "CONNECT"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
}
TEST(HeaderValidatorTest, WebsocketPseudoHeaders) {
HeaderValidator v;
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":protocol", "websocket"));
EXPECT_FALSE(v.FinishHeaderBlock(HeaderType::REQUEST));
v.SetAllowExtendedConnect();
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":protocol", "websocket"));
EXPECT_FALSE(v.FinishHeaderBlock(HeaderType::REQUEST));
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
if (to_add.first == ":method") {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, "CONNECT"));
} else {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
}
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":protocol", "websocket"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
}
TEST(HeaderValidatorTest, AsteriskPathPseudoHeader) {
HeaderValidator v;
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
if (to_add.first == ":path") {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, "*"));
} else {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
}
EXPECT_FALSE(v.FinishHeaderBlock(HeaderType::REQUEST));
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
if (to_add.first == ":path") {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, "*"));
} else if (to_add.first == ":method") {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, "OPTIONS"));
} else {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
}
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
}
TEST(HeaderValidatorTest, InvalidPathPseudoHeader) {
HeaderValidator v;
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
if (to_add.first == ":path") {
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
v.ValidateSingleHeader(to_add.first, ""));
} else {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
}
EXPECT_FALSE(v.FinishHeaderBlock(HeaderType::REQUEST));
v.SetValidatePath();
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
if (to_add.first == ":path") {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, "shawarma"));
} else {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
}
EXPECT_FALSE(v.FinishHeaderBlock(HeaderType::REQUEST));
for (const absl::string_view c :
{"/", "?", "_", "'", "9", "&", "(", "@", ":"}) {
const std::string value = absl::StrCat("/shawa", c, "rma");
HeaderValidator validator;
validator.SetValidatePath();
validator.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
if (to_add.first == ":path") {
EXPECT_EQ(HeaderValidator::HEADER_OK,
validator.ValidateSingleHeader(to_add.first, value))
<< "Problematic char: [" << c << "]";
} else {
EXPECT_EQ(HeaderValidator::HEADER_OK,
validator.ValidateSingleHeader(to_add.first, to_add.second));
}
}
EXPECT_TRUE(validator.FinishHeaderBlock(HeaderType::REQUEST));
}
for (const absl::string_view c : {"[", "<", "}", "`", "\\", " ", "\t", "#"}) {
const std::string value = absl::StrCat("/shawa", c, "rma");
HeaderValidator validator;
validator.SetValidatePath();
validator.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
if (to_add.first == ":path") {
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
validator.ValidateSingleHeader(to_add.first, value));
} else {
EXPECT_EQ(HeaderValidator::HEADER_OK,
validator.ValidateSingleHeader(to_add.first, to_add.second));
}
}
EXPECT_FALSE(validator.FinishHeaderBlock(HeaderType::REQUEST));
}
{
HeaderValidator validator;
validator.SetValidatePath();
validator.SetAllowFragmentInPath();
validator.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
if (to_add.first == ":path") {
EXPECT_EQ(HeaderValidator::HEADER_OK,
validator.ValidateSingleHeader(to_add.first, "/shawa#rma"));
} else {
EXPECT_EQ(HeaderValidator::HEADER_OK,
validator.ValidateSingleHeader(to_add.first, to_add.second));
}
}
EXPECT_TRUE(validator.FinishHeaderBlock(HeaderType::REQUEST));
}
}
TEST(HeaderValidatorTest, ResponsePseudoHeaders) {
HeaderValidator v;
for (HeaderType type : {HeaderType::RESPONSE, HeaderType::RESPONSE_100}) {
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK, v.ValidateSingleHeader("foo", "bar"));
EXPECT_FALSE(v.FinishHeaderBlock(type));
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "199"));
EXPECT_TRUE(v.FinishHeaderBlock(type));
EXPECT_EQ("199", v.status_header());
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "199"));
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "299"));
EXPECT_FALSE(v.FinishHeaderBlock(type));
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "199"));
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":extra", "blorp"));
EXPECT_FALSE(v.FinishHeaderBlock(type));
}
}
TEST(HeaderValidatorTest, ResponseWithHost) {
HeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "200"));
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("host", "myserver.com"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::RESPONSE));
}
TEST(HeaderValidatorTest, Response204) {
HeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "204"));
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("x-content", "is not present"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::RESPONSE));
}
TEST(HeaderValidatorTest, ResponseWithMultipleIdenticalContentLength) {
HeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "200"));
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "13"));
EXPECT_EQ(HeaderValidator::HEADER_SKIP,
v.ValidateSingleHeader("content-length", "13"));
}
TEST(HeaderValidatorTest, ResponseWithMultipleDifferingContentLength) {
HeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "200"));
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "13"));
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
v.ValidateSingleHeader("content-length", "17"));
}
TEST(HeaderValidatorTest, Response204WithContentLengthZero) {
HeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "204"));
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("x-content", "is not present"));
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "0"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::RESPONSE));
}
TEST(HeaderValidatorTest, Response204WithContentLength) {
HeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "204"));
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("x-content", "is not present"));
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
v.ValidateSingleHeader("content-length", "1"));
}
TEST(HeaderValidatorTest, Response100) {
HeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "100"));
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("x-content", "is not present"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::RESPONSE));
}
TEST(HeaderValidatorTest, Response100WithContentLengthZero) {
HeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "100"));
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("x-content", "is not present"));
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "0"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::RESPONSE));
}
TEST(HeaderValidatorTest, Response100WithContentLength) {
HeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "100"));
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("x-content", "is not present"));
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
v.ValidateSingleHeader("content-length", "1"));
}
TEST(HeaderValidatorTest, ResponseTrailerPseudoHeaders) {
HeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK, v.ValidateSingleHeader("foo", "bar"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::RESPONSE_TRAILER));
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "200"));
EXPECT_EQ(HeaderValidator::HEADER_OK, v.ValidateSingleHeader("foo", "bar"));
EXPECT_FALSE(v.FinishHeaderBlock(HeaderType::RESPONSE_TRAILER));
}
TEST(HeaderValidatorTest, ValidContentLength) {
HeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(v.content_length(), std::nullopt);
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "41"));
EXPECT_THAT(v.content_length(), Optional(41));
v.StartHeaderBlock();
EXPECT_EQ(v.content_length(), std::nullopt);
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "42"));
EXPECT_THAT(v.content_length(), Optional(42));
}
TEST(HeaderValidatorTest, InvalidContentLength) {
HeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(v.content_length(), std::nullopt);
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
v.ValidateSingleHeader("content-length", ""));
EXPECT_EQ(v.content_length(), std::nullopt);
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
v.ValidateSingleHeader("content-length", "nan"));
EXPECT_EQ(v.content_length(), std::nullopt);
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
v.ValidateSingleHeader("content-length", "-42"));
EXPECT_EQ(v.content_length(), std::nullopt);
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "42"));
EXPECT_THAT(v.content_length(), Optional(42));
}
TEST(HeaderValidatorTest, TeHeader) {
HeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("te", "trailers"));
v.StartHeaderBlock();
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
v.ValidateSingleHeader("te", "trailers, deflate"));
}
TEST(HeaderValidatorTest, ConnectionSpecificHeaders) {
const std::vector<Header> connection_headers = {
{"connection", "keep-alive"}, {"proxy-connection", "keep-alive"},
{"keep-alive", "timeout=42"}, {"transfer-encoding", "chunked"},
{"upgrade", "h2c"},
};
for (const auto& [connection_key, connection_value] : connection_headers) {
HeaderValidator v;
v.StartHeaderBlock();
for (const auto& [sample_key, sample_value] : kSampleRequestPseudoheaders) {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(sample_key, sample_value));
}
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
v.ValidateSingleHeader(connection_key, connection_value));
}
}
TEST(HeaderValidatorTest, MixedCaseHeaderName) {
HeaderValidator v;
v.SetAllowUppercaseInHeaderNames();
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("MixedCaseName", "value"));
}
TEST(HeaderValidatorTest, MixedCasePseudoHeader) {
HeaderValidator v;
v.SetAllowUppercaseInHeaderNames();
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
v.ValidateSingleHeader(":PATH", "/"));
}
TEST(HeaderValidatorTest, MixedCaseHost) {
HeaderValidator v;
v.SetAllowUppercaseInHeaderNames();
for (Header to_add : kSampleRequestPseudoheaders) {
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
EXPECT_EQ(HeaderValidator::HEADER_FIELD_INVALID,
v.ValidateSingleHeader("Host", "www.bar.com"));
}
TEST(HeaderValidatorTest, MixedCaseContentLength) {
HeaderValidator v;
v.SetAllowUppercaseInHeaderNames();
EXPECT_EQ(v.content_length(), std::nullopt);
EXPECT_EQ(HeaderValidator::HEADER_OK,
v.ValidateSingleHeader("Content-Length", "42"));
EXPECT_THAT(v.content_length(), Optional(42));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/header_validator.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/header_validator_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
7b33fe13-c650-4a7d-9230-9d844a52e739 | cpp | google/cel-cpp | string_value | common/values/string_value.cc | common/values/string_value_test.cc | #include <cstddef>
#include <string>
#include <utility>
#include "absl/functional/overload.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/value.h"
#include "internal/serialize.h"
#include "internal/status_macros.h"
#include "internal/strings.h"
#include "internal/utf8.h"
namespace cel {
namespace {
template <typename Bytes>
std::string StringDebugString(const Bytes& value) {
return value.NativeValue(absl::Overload(
[](absl::string_view string) -> std::string {
return internal::FormatStringLiteral(string);
},
[](const absl::Cord& cord) -> std::string {
if (auto flat = cord.TryFlat(); flat.has_value()) {
return internal::FormatStringLiteral(*flat);
}
return internal::FormatStringLiteral(static_cast<std::string>(cord));
}));
}
}
std::string StringValue::DebugString() const {
return StringDebugString(*this);
}
absl::Status StringValue::SerializeTo(AnyToJsonConverter&,
absl::Cord& value) const {
return NativeValue([&value](const auto& bytes) -> absl::Status {
return internal::SerializeStringValue(bytes, value);
});
}
absl::StatusOr<Json> StringValue::ConvertToJson(AnyToJsonConverter&) const {
return NativeCord();
}
absl::Status StringValue::Equal(ValueManager&, const Value& other,
Value& result) const {
if (auto other_value = As<StringValue>(other); other_value.has_value()) {
result = NativeValue([other_value](const auto& value) -> BoolValue {
return other_value->NativeValue(
[&value](const auto& other_value) -> BoolValue {
return BoolValue{value == other_value};
});
});
return absl::OkStatus();
}
result = BoolValue{false};
return absl::OkStatus();
}
size_t StringValue::Size() const {
return NativeValue([](const auto& alternative) -> size_t {
return internal::Utf8CodePointCount(alternative);
});
}
bool StringValue::IsEmpty() const {
return NativeValue(
[](const auto& alternative) -> bool { return alternative.empty(); });
}
bool StringValue::Equals(absl::string_view string) const {
return NativeValue([string](const auto& alternative) -> bool {
return alternative == string;
});
}
bool StringValue::Equals(const absl::Cord& string) const {
return NativeValue([&string](const auto& alternative) -> bool {
return alternative == string;
});
}
bool StringValue::Equals(const StringValue& string) const {
return string.NativeValue(
[this](const auto& alternative) -> bool { return Equals(alternative); });
}
namespace {
int CompareImpl(absl::string_view lhs, absl::string_view rhs) {
return lhs.compare(rhs);
}
int CompareImpl(absl::string_view lhs, const absl::Cord& rhs) {
return -rhs.Compare(lhs);
}
int CompareImpl(const absl::Cord& lhs, absl::string_view rhs) {
return lhs.Compare(rhs);
}
int CompareImpl(const absl::Cord& lhs, const absl::Cord& rhs) {
return lhs.Compare(rhs);
}
}
int StringValue::Compare(absl::string_view string) const {
return NativeValue([string](const auto& alternative) -> int {
return CompareImpl(alternative, string);
});
}
int StringValue::Compare(const absl::Cord& string) const {
return NativeValue([&string](const auto& alternative) -> int {
return CompareImpl(alternative, string);
});
}
int StringValue::Compare(const StringValue& string) const {
return string.NativeValue(
[this](const auto& alternative) -> int { return Compare(alternative); });
}
} | #include <sstream>
#include <string>
#include "absl/hash/hash.h"
#include "absl/strings/cord.h"
#include "absl/strings/cord_test_helpers.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/native_type.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::An;
using ::testing::Ne;
using StringValueTest = common_internal::ThreadCompatibleValueTest<>;
TEST_P(StringValueTest, Kind) {
EXPECT_EQ(StringValue("foo").kind(), StringValue::kKind);
EXPECT_EQ(Value(StringValue(absl::Cord("foo"))).kind(), StringValue::kKind);
}
TEST_P(StringValueTest, DebugString) {
{
std::ostringstream out;
out << StringValue("foo");
EXPECT_EQ(out.str(), "\"foo\"");
}
{
std::ostringstream out;
out << StringValue(absl::MakeFragmentedCord({"f", "o", "o"}));
EXPECT_EQ(out.str(), "\"foo\"");
}
{
std::ostringstream out;
out << Value(StringValue(absl::Cord("foo")));
EXPECT_EQ(out.str(), "\"foo\"");
}
}
TEST_P(StringValueTest, ConvertToJson) {
EXPECT_THAT(StringValue("foo").ConvertToJson(value_manager()),
IsOkAndHolds(Json(JsonString("foo"))));
}
TEST_P(StringValueTest, NativeValue) {
std::string scratch;
EXPECT_EQ(StringValue("foo").NativeString(), "foo");
EXPECT_EQ(StringValue("foo").NativeString(scratch), "foo");
EXPECT_EQ(StringValue("foo").NativeCord(), "foo");
}
TEST_P(StringValueTest, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(StringValue("foo")),
NativeTypeId::For<StringValue>());
EXPECT_EQ(NativeTypeId::Of(Value(StringValue(absl::Cord("foo")))),
NativeTypeId::For<StringValue>());
}
TEST_P(StringValueTest, InstanceOf) {
EXPECT_TRUE(InstanceOf<StringValue>(StringValue("foo")));
EXPECT_TRUE(InstanceOf<StringValue>(Value(StringValue(absl::Cord("foo")))));
}
TEST_P(StringValueTest, Cast) {
EXPECT_THAT(Cast<StringValue>(StringValue("foo")), An<StringValue>());
EXPECT_THAT(Cast<StringValue>(Value(StringValue(absl::Cord("foo")))),
An<StringValue>());
}
TEST_P(StringValueTest, As) {
EXPECT_THAT(As<StringValue>(Value(StringValue(absl::Cord("foo")))),
Ne(absl::nullopt));
}
TEST_P(StringValueTest, HashValue) {
EXPECT_EQ(absl::HashOf(StringValue("foo")),
absl::HashOf(absl::string_view("foo")));
EXPECT_EQ(absl::HashOf(StringValue(absl::string_view("foo"))),
absl::HashOf(absl::string_view("foo")));
EXPECT_EQ(absl::HashOf(StringValue(absl::Cord("foo"))),
absl::HashOf(absl::string_view("foo")));
}
TEST_P(StringValueTest, Equality) {
EXPECT_NE(StringValue("foo"), "bar");
EXPECT_NE("bar", StringValue("foo"));
EXPECT_NE(StringValue("foo"), StringValue("bar"));
EXPECT_NE(StringValue("foo"), absl::Cord("bar"));
EXPECT_NE(absl::Cord("bar"), StringValue("foo"));
}
TEST_P(StringValueTest, LessThan) {
EXPECT_LT(StringValue("bar"), "foo");
EXPECT_LT("bar", StringValue("foo"));
EXPECT_LT(StringValue("bar"), StringValue("foo"));
EXPECT_LT(StringValue("bar"), absl::Cord("foo"));
EXPECT_LT(absl::Cord("bar"), StringValue("foo"));
}
INSTANTIATE_TEST_SUITE_P(
StringValueTest, StringValueTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting)),
StringValueTest::ToString);
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/string_value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/string_value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
a7de02f3-1f9b-4d79-9383-5b28d3fecda7 | cpp | tensorflow/tensorflow | op_def_util | tensorflow/python/framework/op_def_util.cc | tensorflow/core/framework/op_def_util_test.cc | #include "tensorflow/python/framework/op_def_util.h"
#include <map>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/python/lib/core/safe_pyobject_ptr.h"
#include "tensorflow/python/util/util.h"
using ::tensorflow::swig::GetRegisteredPyObject;
#if PY_MAJOR_VERSION < 3
#define PY_STRING_CHECK(x) (PyString_Check(x) || PyUnicode_Check(x))
#define PY_STRING_FROMSTRING(x) (PyString_FromString(x))
#define PY_INT_CHECK(x) (PyInt_Check(x))
#define PY_INT_TYPE PyInt_Type
#define PY_INT_FROM_LONG(x) (PyInt_FromLong(x))
#else
#define PY_STRING_CHECK(x) (PyBytes_Check(x) || PyUnicode_Check(x))
#define PY_STRING_FROMSTRING(x) (PyUnicode_FromString(x))
#define PY_INT_CHECK(x) (PyLong_Check(x))
#define PY_INT_TYPE PyLong_Type
#define PY_INT_FROM_LONG(x) (PyLong_FromLong(x))
#endif
namespace tensorflow {
namespace {
const std::map<std::string, AttributeType>* AttributeTypeNameMap() {
static auto* type_map = new std::map<std::string, AttributeType>(
{{"any", AttributeType::ANY},
{"float", AttributeType::FLOAT},
{"int", AttributeType::INT},
{"string", AttributeType::STRING},
{"bool", AttributeType::BOOL},
{"shape", AttributeType::SHAPE},
{"type", AttributeType::DTYPE},
{"tensor", AttributeType::TENSOR},
{"list(any)", AttributeType::LIST_ANY},
{"list(float)", AttributeType::LIST_FLOAT},
{"list(int)", AttributeType::LIST_INT},
{"list(string)", AttributeType::LIST_STRING},
{"list(bool)", AttributeType::LIST_BOOL},
{"list(type)", AttributeType::LIST_DTYPE},
{"list(shape)", AttributeType::LIST_SHAPE},
{"list(tensor)", AttributeType::LIST_TENSOR}});
return type_map;
}
struct ConvertAnyFunctor {
Safe_PyObjectPtr operator()(PyObject* value) {
Py_INCREF(value);
return Safe_PyObjectPtr(value);
}
};
struct ConvertFloatFunctor {
Safe_PyObjectPtr operator()(PyObject* value) {
Safe_PyObjectPtr result;
if (PyFloat_Check(value)) {
Py_INCREF(value);
result.reset(value);
} else if (!PY_STRING_CHECK(value)) {
result.reset(PyObject_CallFunctionObjArgs(
reinterpret_cast<PyObject*>(&PyFloat_Type), value, nullptr));
}
return result;
}
};
struct ConvertIntFunctor {
Safe_PyObjectPtr operator()(PyObject* value) {
Safe_PyObjectPtr result;
if (PY_INT_CHECK(value)) {
Py_INCREF(value);
result.reset(value);
} else if (!PY_STRING_CHECK(value)) {
result.reset(PyObject_CallFunctionObjArgs(
reinterpret_cast<PyObject*>(&PY_INT_TYPE), value, nullptr));
}
return result;
}
};
struct ConvertStringFunctor {
Safe_PyObjectPtr operator()(PyObject* value) {
Safe_PyObjectPtr result;
if (PY_STRING_CHECK(value)) {
Py_INCREF(value);
result.reset(value);
}
return result;
}
};
struct ConvertBoolFunctor {
Safe_PyObjectPtr operator()(PyObject* value) {
Safe_PyObjectPtr result;
if (PyBool_Check(value)) {
Py_INCREF(value);
result.reset(value);
}
return result;
}
};
struct ConvertDTypeFunctor {
Safe_PyObjectPtr operator()(PyObject* value) {
Safe_PyObjectPtr result;
static PyObject* dtype = GetRegisteredPyObject("tf.dtypes.DType");
static PyObject* as_dtype = GetRegisteredPyObject("tf.dtypes.as_dtype");
if (reinterpret_cast<PyObject*>(value->ob_type) == dtype) {
Py_INCREF(value);
result.reset(value);
} else {
result.reset(PyObject_CallFunctionObjArgs(as_dtype, value, nullptr));
}
return result;
}
};
struct ConvertTensorShapeFunctor {
Safe_PyObjectPtr operator()(PyObject* value) {
Safe_PyObjectPtr result;
static PyObject* shape = GetRegisteredPyObject("tf.TensorShape");
static PyObject* as_shape = GetRegisteredPyObject("tf.as_shape");
if (reinterpret_cast<PyObject*>(value->ob_type) == shape) {
Py_INCREF(value);
result.reset(value);
} else {
result.reset(PyObject_CallFunctionObjArgs(as_shape, value, nullptr));
}
return result;
}
};
struct ConvertTensorProtoFunctor {
Safe_PyObjectPtr operator()(PyObject* value) {
Safe_PyObjectPtr result;
static PyObject* tensor_proto = GetRegisteredPyObject("tf.TensorProto");
static PyObject* text_format_parse =
GetRegisteredPyObject("text_format.Parse");
if (reinterpret_cast<PyObject*>(value->ob_type) == tensor_proto) {
Py_INCREF(value);
result.reset(value);
} else if (PY_STRING_CHECK(value)) {
result.reset(PyObject_CallObject(tensor_proto, nullptr));
if (result) {
if (!PyObject_CallFunctionObjArgs(text_format_parse, value,
result.get(), nullptr)) {
return nullptr;
}
}
}
return result;
}
};
template <typename T>
Safe_PyObjectPtr ConvertListAttr(PyObject* value, T convert_functor) {
Safe_PyObjectPtr result(PySequence_List(value));
if (!result) return nullptr;
Py_ssize_t len = PySequence_Fast_GET_SIZE(result.get());
PyObject** items = PySequence_Fast_ITEMS(result.get());
for (Py_ssize_t i = 0; i < len; ++i) {
if (!PyFloat_Check(value)) {
Safe_PyObjectPtr item = convert_functor(items[i]);
if (!item) return nullptr;
PySequence_SetItem(result.get(), i, item.get());
}
}
return result;
}
Safe_PyObjectPtr ConvertAttrOrNull(PyObject* value, AttributeType attr_type) {
switch (attr_type) {
case AttributeType::ANY:
return ConvertAnyFunctor()(value);
case AttributeType::FLOAT:
return ConvertFloatFunctor()(value);
case AttributeType::INT:
return ConvertIntFunctor()(value);
case AttributeType::STRING:
return ConvertStringFunctor()(value);
case AttributeType::BOOL:
return ConvertBoolFunctor()(value);
case AttributeType::DTYPE:
return ConvertDTypeFunctor()(value);
case AttributeType::SHAPE:
return ConvertTensorShapeFunctor()(value);
case AttributeType::TENSOR:
return ConvertTensorProtoFunctor()(value);
case AttributeType::LIST_ANY:
return ConvertListAttr(value, ConvertAnyFunctor());
case AttributeType::LIST_FLOAT:
return ConvertListAttr(value, ConvertFloatFunctor());
case AttributeType::LIST_INT:
return ConvertListAttr(value, ConvertIntFunctor());
case AttributeType::LIST_STRING:
return ConvertListAttr(value, ConvertStringFunctor());
case AttributeType::LIST_BOOL:
return ConvertListAttr(value, ConvertBoolFunctor());
case AttributeType::LIST_DTYPE:
return ConvertListAttr(value, ConvertDTypeFunctor());
case AttributeType::LIST_SHAPE:
return ConvertListAttr(value, ConvertTensorShapeFunctor());
case AttributeType::LIST_TENSOR:
return ConvertListAttr(value, ConvertTensorProtoFunctor());
default:
return nullptr;
}
}
PyObject* PyBool_FromBool(bool b) {
PyObject* result = b ? Py_True : Py_False;
Py_INCREF(result);
return result;
}
Safe_PyObjectPtr AttrValueListToPyObject(AttrValue::ListValue list) {
if (list.s_size()) {
Safe_PyObjectPtr result(PyList_New(list.s_size()));
for (int i = 0; i < list.s_size(); ++i) {
PyList_SET_ITEM(result.get(), i, PY_STRING_FROMSTRING(list.s(i).c_str()));
}
return result;
} else if (list.i_size()) {
Safe_PyObjectPtr result(PyList_New(list.i_size()));
for (int i = 0; i < list.i_size(); ++i) {
PyList_SET_ITEM(result.get(), i, PY_INT_FROM_LONG(list.i(i)));
}
return result;
} else if (list.f_size()) {
Safe_PyObjectPtr result(PyList_New(list.f_size()));
for (int i = 0; i < list.f_size(); ++i) {
PyList_SET_ITEM(result.get(), i, PyFloat_FromDouble(list.f(i)));
}
return result;
} else if (list.b_size()) {
Safe_PyObjectPtr result(PyList_New(list.b_size()));
for (int i = 0; i < list.b_size(); ++i) {
PyList_SET_ITEM(result.get(), i, PyBool_FromBool(list.b(i)));
}
return result;
} else if (list.type_size()) {
Safe_PyObjectPtr result(PyList_New(list.type_size()));
for (int i = 0; i < list.type_size(); ++i) {
Safe_PyObjectPtr item(DataTypeToPyObject(list.type(i)));
Py_INCREF(item.get());
PyList_SET_ITEM(result.get(), i, item.get());
}
return result;
} else if (list.shape_size()) {
Safe_PyObjectPtr result(PyList_New(list.shape_size()));
for (int i = 0; i < list.shape_size(); ++i) {
Safe_PyObjectPtr item(TensorShapeProtoToPyObject(list.shape(i)));
Py_INCREF(item.get());
PyList_SET_ITEM(result.get(), i, item.get());
}
return result;
} else if (list.tensor_size() || list.func_size()) {
PyErr_SetString(PyExc_TypeError, "Unsupported AttrValue type");
return nullptr;
} else {
return Safe_PyObjectPtr(PyList_New(0));
}
}
}
AttributeType AttributeTypeFromName(const std::string& type_name) {
const auto* type_map = AttributeTypeNameMap();
auto it = type_map->find(type_name);
return it != type_map->end() ? it->second : AttributeType::UNKNOWN;
}
std::string AttributeTypeToName(AttributeType attr_type) {
for (const auto& pair : *AttributeTypeNameMap()) {
if (pair.second == attr_type) {
return pair.first;
}
}
return "<unknown>";
}
Safe_PyObjectPtr ConvertPyObjectToAttributeType(PyObject* value,
AttributeType type) {
Safe_PyObjectPtr result = ConvertAttrOrNull(value, type);
if (!result) {
auto err = absl::StrCat("Failed to convert value of type '",
value->ob_type->tp_name, "' to type '",
AttributeTypeToName(type), "'.");
PyErr_SetString(PyExc_TypeError, err.c_str());
}
return result;
}
Safe_PyObjectPtr AttrValueToPyObject(const AttrValue& attr_value) {
switch (attr_value.value_case()) {
case tensorflow::AttrValue::kS:
return Safe_PyObjectPtr(PY_STRING_FROMSTRING(attr_value.s().c_str()));
case tensorflow::AttrValue::kI:
return Safe_PyObjectPtr(PY_INT_FROM_LONG(attr_value.i()));
case tensorflow::AttrValue::kF:
return Safe_PyObjectPtr(PyFloat_FromDouble(attr_value.f()));
case tensorflow::AttrValue::kB:
return Safe_PyObjectPtr(PyBool_FromBool(attr_value.b()));
case tensorflow::AttrValue::kType:
return DataTypeToPyObject(attr_value.type());
case tensorflow::AttrValue::kShape:
return TensorShapeProtoToPyObject(attr_value.shape());
case tensorflow::AttrValue::kList:
return AttrValueListToPyObject(attr_value.list());
default:
PyErr_SetString(PyExc_ValueError, "Unsupported AttrValue type");
return nullptr;
}
}
Safe_PyObjectPtr DataTypeToPyObject(const DataType& data_type) {
Safe_PyObjectPtr enum_value(PY_INT_FROM_LONG(data_type));
return ConvertDTypeFunctor()(enum_value.get());
}
Safe_PyObjectPtr TensorShapeProtoToPyObject(
const TensorShapeProto& tensor_shape) {
if (tensor_shape.unknown_rank()) {
return ConvertTensorShapeFunctor()(Py_None);
} else {
Safe_PyObjectPtr dims(PyTuple_New(tensor_shape.dim_size()));
for (int i = 0; i < tensor_shape.dim_size(); ++i) {
PyTuple_SET_ITEM(dims.get(), i,
PY_INT_FROM_LONG(tensor_shape.dim(i).size()));
}
return ConvertTensorShapeFunctor()(dims.get());
}
}
} | #include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
OpDef FromText(const string& text) {
OpDef op_def;
EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &op_def));
return op_def;
}
OpDef::AttrDef ADef(const string& text) {
OpDef::AttrDef attr_def;
EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &attr_def));
return attr_def;
}
class ValidateOpDefTest : public ::testing::Test {
protected:
Status TestProto(const string& text) { return ValidateOpDef(FromText(text)); }
Status TestBuilder(const OpDefBuilder& builder) {
OpRegistrationData op_reg_data;
Status status = builder.Finalize(&op_reg_data);
TF_EXPECT_OK(status);
if (!status.ok()) {
return status;
} else {
return ValidateOpDef(op_reg_data.op_def);
}
}
};
namespace {
void ExpectFailure(const Status& status, const string& message) {
EXPECT_FALSE(status.ok()) << "Did not see error with: " << message;
if (!status.ok()) {
LOG(INFO) << "message: " << status;
EXPECT_TRUE(absl::StrContains(status.ToString(), message))
<< "Actual: " << status << "\nExpected to contain: " << message;
}
}
}
TEST_F(ValidateOpDefTest, OpDefValid) {
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Input("a: int32")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Output("a: bool")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("t: type").Input("a: t")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int = 3")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int >= -5")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int >= -5")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int >= -5 = 3")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: numbertype")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("Uppercase")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("Namespace>X").Attr("a: int")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("Namespace>X>Y").Attr("a: int")));
}
TEST_F(ValidateOpDefTest, InvalidName) {
ExpectFailure(TestBuilder(OpDefBuilder("lower").Attr("a: int")),
"Invalid name");
ExpectFailure(TestBuilder(OpDefBuilder("BadSuffix 7%")), "Invalid name");
ExpectFailure(TestBuilder(OpDefBuilder(">OpName").Attr("a: int")),
"Invalid name");
ExpectFailure(TestBuilder(OpDefBuilder("OpName>").Attr("a: int")),
"Invalid name");
ExpectFailure(TestBuilder(OpDefBuilder("OpName>b").Attr("a: int")),
"Invalid name");
ExpectFailure(TestBuilder(OpDefBuilder("OpName>A>>B").Attr("a: int")),
"Invalid name");
}
TEST_F(ValidateOpDefTest, DuplicateName) {
ExpectFailure(
TestBuilder(OpDefBuilder("DupeName").Input("a: int32").Input("a: float")),
"Duplicate name: a");
ExpectFailure(
TestBuilder(
OpDefBuilder("DupeName").Input("a: int32").Output("a: float")),
"Duplicate name: a");
ExpectFailure(
TestBuilder(
OpDefBuilder("DupeName").Output("a: int32").Output("a: float")),
"Duplicate name: a");
ExpectFailure(
TestBuilder(OpDefBuilder("DupeName").Input("a: int32").Attr("a: float")),
"Duplicate name: a");
ExpectFailure(
TestBuilder(OpDefBuilder("DupeName").Output("a: int32").Attr("a: float")),
"Duplicate name: a");
ExpectFailure(
TestBuilder(OpDefBuilder("DupeName").Attr("a: int").Attr("a: float")),
"Duplicate name: a");
}
TEST_F(ValidateOpDefTest, BadAttrName) {
ExpectFailure(TestBuilder(OpDefBuilder("BadAttrtude").Attr("int32: int")),
"Attr can't have name int32 that matches a data type");
ExpectFailure(TestBuilder(OpDefBuilder("BadAttrtude").Attr("float: string")),
"Attr can't have name float that matches a data type");
}
TEST_F(ValidateOpDefTest, BadAttrType) {
ExpectFailure(
TestProto("name: 'BadAttrType' attr { name: 'a' type: 'illegal' }"),
"Unrecognized type");
ExpectFailure(
TestProto("name: 'BadAttrType' attr { name: 'a' type: 'list(illegal)' }"),
"Unrecognized type");
ExpectFailure(
TestProto("name: 'BadAttrType' attr { name: 'a' type: 'int extra' }"),
"Extra ' extra' at the end");
ExpectFailure(
TestProto(
"name: 'BadAttrType' attr { name: 'a' type: 'list(int extra)' }"),
"'list(' is missing ')' in attr");
ExpectFailure(
TestProto(
"name: 'BadAttrType' attr { name: 'a' type: 'list(int) extra' }"),
"Extra ' extra' at the end");
}
TEST_F(ValidateOpDefTest, BadAttrDefault) {
ExpectFailure(
TestProto("name: 'BadAttrDef' attr { name: 'a' "
"type: 'int' default_value { s: 'x' } }"),
"AttrValue had value with type 'string' when 'int' expected\n\t for "
"attr 'a'\n\t in Op 'BadAttrDef'");
ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' "
"type: 'int' default_value { f: 0.5 } }"),
"AttrValue had value with type 'float' when 'int' expected\n"
"\t for attr 'a'\n\t in Op 'BadAttrDef'");
ExpectFailure(
TestProto("name: 'BadAttrDef' attr { name: 'a' type: 'int' "
"default_value { i: 5 list { i: [2] } } }"),
"AttrValue had value with type 'list(int)' when 'int' expected\n\t for "
"attr 'a'\n\t in Op 'BadAttrDef'");
ExpectFailure(
TestProto("name: 'BadAttrDef' attr { name: 'a' "
"type: 'list(int)' default_value { f: 0.5 } }"),
"AttrValue had value with type 'float' when 'list(int)' expected\n\t "
"for attr 'a'\n\t in Op 'BadAttrDef'");
ExpectFailure(
TestProto("name: 'BadAttrDef' attr { name: 'a' type: 'list(int)' "
"default_value { list { i: [5] f: [0.5] } } }"),
"AttrValue had value with type 'list(float)' when 'list(int)' "
"expected\n\t for attr 'a'\n\t in Op 'BadAttrDef'");
ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' "
"type: 'type' default_value { } }"),
"AttrValue missing value with expected type 'type'\n\t for "
"attr 'a'\n\t in Op 'BadAttrDef'");
ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' "
"type: 'shape' default_value { } }"),
"AttrValue missing value with expected type 'shape'\n\t for "
"attr 'a'\n\t in Op 'BadAttrDef'");
ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' "
"type: 'tensor' default_value { } }"),
"AttrValue missing value with expected type 'tensor'\n\t for "
"attr 'a'\n\t in Op 'BadAttrDef'");
TF_EXPECT_OK(
TestProto("name: 'GoodAttrDef' attr { name: 'a' "
"type: 'list(int)' default_value { } }"));
TF_EXPECT_OK(
TestProto("name: 'GoodAttrDef' attr { name: 'a' "
"type: 'list(int)' default_value { list { } } }"));
TF_EXPECT_OK(
TestBuilder(OpDefBuilder("GoodAttrDef").Attr("a: list(int) = []")));
ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' "
"type: 'list(int)' has_minimum: true minimum: 2 "
"default_value { list { } } }"),
"Length for attr 'a' of 0 must be at least minimum 2\n\t in Op "
"'BadAttrDef'");
ExpectFailure(
TestBuilder(OpDefBuilder("GoodAttrDef").Attr("a: list(bool) >=2 = []")),
"Length for attr 'a' of 0 must be at least minimum 2\n\t in Op "
"'GoodAttrDef'");
ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' type: "
"'list(string)' has_minimum: true minimum: 2 "
"default_value { list { s: ['foo'] } } }"),
"Length for attr 'a' of 1 must be at least minimum 2\n\t in Op "
"'BadAttrDef'");
ExpectFailure(
TestBuilder(
OpDefBuilder("GoodAttrDef").Attr("a: list(type) >=2 = [DT_STRING]")),
"Length for attr 'a' of 1 must be at least minimum 2\n\t in Op "
"'GoodAttrDef'");
}
TEST_F(ValidateOpDefTest, NoRefTypes) {
ExpectFailure(TestBuilder(OpDefBuilder("BadAttrDef").Input("i: float_ref")),
"Illegal use of ref type 'float_ref'. "
"Use 'Ref(type)' instead for input 'i'");
ExpectFailure(
TestBuilder(OpDefBuilder("BadAttrDef").Attr("T: type = DT_INT32_REF")),
"AttrValue must not have reference type value of int32_ref");
ExpectFailure(
TestBuilder(
OpDefBuilder("BadAttrDef").Attr("T: list(type) = [DT_STRING_REF]")),
"AttrValue must not have reference type value of string_ref");
}
TEST_F(ValidateOpDefTest, BadAttrMin) {
ExpectFailure(TestProto("name: 'BadAttrMin' attr { name: 'a' type: 'string' "
"has_minimum: true minimum: 0 }"),
"minimum for unsupported type string");
ExpectFailure(
TestProto("name: 'BadAttrMin' attr { name: 'a' type: 'int' default_value "
"{ i: 2 } has_minimum: true minimum: 7 }"),
"Value for attr 'a' of 2 must be at least minimum 7\n\t in Op "
"'BadAttrMin'");
ExpectFailure(
TestProto("name: 'BadAttrMin' attr { name: 'a' "
"type: 'list(string)' has_minimum: true minimum: -5 }"),
"list type must have a non-negative minimum, not -5");
TF_EXPECT_OK(
TestProto("name: 'GoodAttrMin' attr { name: 'a' type: 'list(string)' "
"has_minimum: true minimum: 1 }"));
ExpectFailure(TestProto("name: 'NoHasMin' attr { name: 'a' "
"type: 'list(string)' minimum: 3 }"),
"Attr 'a' with has_minimum = false but minimum 3 not equal to "
"default of 0");
}
TEST_F(ValidateOpDefTest, BadAttrAllowed) {
TF_EXPECT_OK(TestBuilder(
OpDefBuilder("GoodAttrtude").Attr("x: numbertype = DT_INT32")));
ExpectFailure(
TestBuilder(
OpDefBuilder("BadAttrtude").Attr("x: numbertype = DT_STRING")),
"attr 'x' of string is not in the list of allowed values");
ExpectFailure(
TestBuilder(OpDefBuilder("BadAttrtude")
.Attr("x: list(realnumbertype) = [DT_COMPLEX64]")),
"attr 'x' of complex64 is not in the list of allowed values");
ExpectFailure(
TestBuilder(OpDefBuilder("BadAttrtude")
.Attr("x: list(realnumbertype) = [DT_COMPLEX128]")),
"attr 'x' of complex128 is not in the list of allowed values");
TF_EXPECT_OK(TestBuilder(
OpDefBuilder("GoodAttrtude").Attr("x: {'foo', 'bar'} = 'bar'")));
ExpectFailure(
TestBuilder(
OpDefBuilder("BadAttrtude").Attr("x: {'foo', 'bar'} = 'baz'")),
"attr 'x' of \"baz\" is not in the list of allowed values");
ExpectFailure(TestBuilder(OpDefBuilder("BadAttrtude")
.Attr("x: list({'foo', 'bar'}) = ['baz']")),
"attr 'x' of \"baz\" is not in the list of allowed values");
ExpectFailure(TestProto("name: 'BadAttrtude' attr { name: 'a' "
"type: 'string' allowed_values { s: 'not list' } }"),
"with type 'string' when 'list(string)' expected");
ExpectFailure(
TestProto("name: 'BadAttrtude' attr { name: 'a' "
"type: 'string' allowed_values { list { i: [6] } } }"),
"with type 'list(int)' when 'list(string)' expected");
}
TEST_F(ValidateOpDefTest, BadArgType) {
ExpectFailure(TestProto("name: 'BadArg' input_arg { name: 'a' "
"type: DT_INT32 } input_arg { name: 'b' }"),
"Missing type for input 'b'");
ExpectFailure(TestProto("name: 'BadArg' input_arg { name: 'a' "
"type: DT_INT32 } output_arg { name: 'b' }"),
"Missing type for output 'b'");
ExpectFailure(
TestProto("name: 'BadArg' input_arg { name: 'a' type: "
"DT_INT32 type_attr: 'x' } attr { name: 'x' type: 'type' }"),
"Exactly one of type, type_attr, type_list_attr must be set for input "
"'a'");
ExpectFailure(TestProto("name: 'BadArg' input_arg { name: 'a' "
"type_attr: 'x' } attr { name: 'x' type: 'int' }"),
"Attr 'x' used as type_attr for input 'a' has type int");
ExpectFailure(
TestProto("name: 'BadArg' input_arg { name: 'a' "
"type_attr: 'x' } attr { name: 'x' type: 'list(type)' }"),
"Attr 'x' used as type_attr for input 'a' has type list(type)");
ExpectFailure(
TestProto("name: 'BadArg' input_arg { name: 'a' "
"type_list_attr: 'x' } attr { name: 'x' type: 'int' }"),
"Attr 'x' used as type_list_attr for input 'a' has type int");
ExpectFailure(
TestProto("name: 'BadArg' input_arg { name: 'a' "
"type_list_attr: 'x' } attr { name: 'x' type: 'type' }"),
"Attr 'x' used as type_list_attr for input 'a' has type type");
ExpectFailure(TestProto("name: 'BadArg' input_arg { name: 'a' "
"type_attr: 'x' }"),
"No attr with name 'x' for input 'a'");
ExpectFailure(
TestProto("name: 'BadArg' input_arg { name: 'a' number_attr: 'n' "
"type_attr: 'x' } attr { name: 'x' type: 'list(type)' } "
"attr { name: 'n' type: 'int' has_minimum: true minimum: 1 }"),
"Attr 'x' used as type_attr for input 'a' has type list(type)");
TF_EXPECT_OK(TestProto(
"name: 'Arg' input_arg { name: 'a' type_list_attr: 'x' } "
"attr { name: 'x' type: 'list(type)' } attr { name: 'n' type: 'int' "
"has_minimum: true minimum: 1 }"));
TF_EXPECT_OK(TestProto(
"name: 'Arg' input_arg { name: 'a' type: DT_INT32 number_attr: 'n' } "
"attr { name: 'n' type: 'int' has_minimum: true minimum: 0 }"));
ExpectFailure(TestProto("name: 'Arg' input_arg { name: 'a' type: DT_INT32 "
"number_attr: 'n' }"),
"No attr with name 'n'");
ExpectFailure(
TestProto(
"name: 'Arg' input_arg { name: 'a' type: "
"DT_INT32 number_attr: 'n' } attr { name: 'n' type: 'string' }"),
"Attr 'n' used as length for input 'a' has type string");
ExpectFailure(
TestProto("name: 'Arg' input_arg { name: 'a' type: "
"DT_INT32 number_attr: 'n' } attr { name: 'n' type: 'int' }"),
"Attr 'n' used as length for input 'a' must have minimum;");
ExpectFailure(
TestProto("name: 'Arg' input_arg { name: 'a' type: DT_INT32 number_attr: "
"'n' } attr { name: 'n' type: 'int' has_minimum: true minimum: "
"-5 }"),
"Attr 'n' used as length for input 'a' must have minimum >= 0;");
ExpectFailure(
TestProto("name: 'Arg' input_arg { name: 'a' number_attr: 'n' } attr { "
"name: 'n' type: 'int' has_minimum: true minimum: 2 }"),
"Missing type for input 'a'; in OpDef:");
ExpectFailure(TestProto("name: 'BadArg' input_arg { name: 'a' number_attr: "
"'n' type_list_attr: 'x' } attr { name: 'n' type: "
"'int' has_minimum: true minimum: 1 } attr { name: "
"'x' type: 'list(type)' }"),
"Can't have both number_attr and type_list_attr for input 'a'");
}
void ExpectDifferent(const OpDef::AttrDef& a1, const OpDef::AttrDef& a2) {
EXPECT_FALSE(AttrDefEqual(a1, a2));
EXPECT_FALSE(AttrDefEqual(a2, a1));
EXPECT_NE(AttrDefHash(a1), AttrDefHash(a2));
}
TEST(AttrDefUtilTest, EqualAndHash) {
OpDef::AttrDef a = ADef(
"name: 'foo' type: 'string' description: 'cool' has_minimum: true "
"minimum: 2 default_value { i: 2 } allowed_values { i: 5 }");
EXPECT_TRUE(AttrDefEqual(a, a));
EXPECT_EQ(AttrDefHash(a), AttrDefHash(a));
ExpectDifferent(
a,
ADef("name: 'FOO' type: 'string' description: 'cool' has_minimum: true "
"minimum: 2 default_value { i: 2 } allowed_values { i: 5 }"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'int32' description: 'cool' has_minimum: true "
"minimum: 2 default_value { i: 2 } allowed_values { i: 5 }"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'string' description: 'COOL' has_minimum: true "
"minimum: 2 default_value { i: 2 } allowed_values { i: 5 }"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'string' description: 'cool' has_minimum: false "
"minimum: 2 default_value { i: 2 } allowed_values { i: 5 }"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'string' description: 'cool' has_minimum: true "
"minimum: 3 default_value { i: 2 } allowed_values { i: 5 }"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'string' description: 'cool' has_minimum: true "
"minimum: 2 default_value { i: 3 } allowed_values { i: 5 }"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'string' description: 'cool' has_minimum: true "
"minimum: 2 default_value { i: 2 } allowed_values { i: 6 }"));
a = ADef(
"name: 'foo' type: 'string' description: 'cool' has_minimum: true "
"minimum: 2");
EXPECT_TRUE(AttrDefEqual(a, a));
EXPECT_EQ(AttrDefHash(a), AttrDefHash(a));
ExpectDifferent(
a,
ADef("name: 'FOO' type: 'string' description: 'cool' has_minimum: true "
"minimum: 2"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'int32' description: 'cool' has_minimum: true "
"minimum: 2"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'string' description: 'COOL' has_minimum: true "
"minimum: 2"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'string' description: 'cool' has_minimum: false "
"minimum: 2"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'string' description: 'cool' has_minimum: true "
"minimum: 3"));
}
protobuf::RepeatedPtrField<OpDef::AttrDef> Rep(
const std::vector<OpDef::AttrDef>& defs) {
protobuf::RepeatedPtrField<OpDef::AttrDef> rep;
for (const OpDef::AttrDef& def : defs) {
rep.Add()->MergeFrom(def);
}
return rep;
}
void ExpectEqual(const protobuf::RepeatedPtrField<OpDef::AttrDef>& a1,
const protobuf::RepeatedPtrField<OpDef::AttrDef>& a2) {
EXPECT_TRUE(RepeatedAttrDefEqual(a1, a2));
EXPECT_TRUE(RepeatedAttrDefEqual(a2, a1));
EXPECT_EQ(RepeatedAttrDefHash(a1), RepeatedAttrDefHash(a2));
}
void ExpectDifferent(const protobuf::RepeatedPtrField<OpDef::AttrDef>& a1,
const protobuf::RepeatedPtrField<OpDef::AttrDef>& a2) {
EXPECT_FALSE(RepeatedAttrDefEqual(a1, a2));
EXPECT_FALSE(RepeatedAttrDefEqual(a2, a1));
EXPECT_NE(RepeatedAttrDefHash(a1), RepeatedAttrDefHash(a2));
}
TEST(AttrDefUtilTest, EqualAndHash_Repeated) {
OpDef::AttrDef a1 = ADef(
"name: 'foo1' type: 'string' description: 'cool' has_minimum: true "
"minimum: 2 default_value { i: 2 } allowed_values { i: 5 }");
OpDef::AttrDef a2 = ADef(
"name: 'foo2' type: 'string' description: 'cool' has_minimum: true "
"minimum: 2 default_value { i: 2 } allowed_values { i: 5 }");
OpDef::AttrDef a3 = ADef(
"name: 'foo1' type: 'string' description: 'cool' has_minimum: true "
"minimum: 3 default_value { i: 2 } allowed_values { i: 5 }");
OpDef::AttrDef a4 = ADef(
"name: 'foo3' type: 'string' description: 'cool' has_minimum: true "
"minimum: 3 default_value { i: 2 } allowed_values { i: 5 }");
ExpectEqual(Rep({}), Rep({}));
ExpectEqual(Rep({a1}), Rep({a1}));
ExpectEqual(Rep({a1, a2}), Rep({a1, a2}));
ExpectEqual(Rep({a1, a2}), Rep({a2, a1}));
ExpectEqual(Rep({a1, a4}), Rep({a4, a1}));
ExpectDifferent(Rep({a1}), Rep({}));
ExpectDifferent(Rep({a1}), Rep({a2}));
ExpectDifferent(Rep({a1}), Rep({a3}));
ExpectDifferent(Rep({a1}), Rep({a4}));
ExpectDifferent(Rep({a1}), Rep({a1, a2}));
ExpectDifferent(Rep({a1, a2}), Rep({a1, a4}));
ExpectDifferent(Rep({a1, a2}), Rep({a1, a2, a4}));
}
void ExpectEqual(const OpDef& o1, const OpDef& o2) {
EXPECT_TRUE(OpDefEqual(o1, o2));
EXPECT_TRUE(OpDefEqual(o2, o1));
EXPECT_EQ(OpDefHash(o1), OpDefHash(o2));
}
void ExpectDifferent(const OpDef& o1, const OpDef& o2) {
EXPECT_FALSE(OpDefEqual(o1, o2));
EXPECT_FALSE(OpDefEqual(o2, o1));
EXPECT_NE(OpDefHash(o1), OpDefHash(o2));
}
TEST(OpDefEqualityTest, EqualAndHash) {
string a1 = "attr { name: 'a' type: 'string' } ";
string a2 = "attr { name: 'b' type: 'string' } ";
string a3 = "attr { name: 'c' type: 'int32' } ";
OpDef o1 = FromText(strings::StrCat("name: 'MatMul' ", a1));
OpDef o2 = FromText(strings::StrCat("name: 'MatMul' ", a2));
OpDef o3 = FromText(strings::StrCat("name: 'MatMul' ", a1, a2));
OpDef o4 = FromText(strings::StrCat("name: 'MatMul' ", a2, a1));
ExpectEqual(o1, o1);
ExpectEqual(o3, o4);
ExpectDifferent(o1, o2);
ExpectDifferent(o1, o3);
}
TEST(OpDefAttrDefaultsUnchangedTest, Foo) {
const auto& op1 = FromText("name: 'op1' attr { name: 'n' type: 'string'}");
const auto& op2 = FromText(
"name: 'op2' attr { name: 'n' type: 'string' default_value: {s: 'x'}}");
const auto& op3 = FromText(
"name: 'op3' attr { name: 'n' type: 'string' default_value: {s: 'y'}}");
TF_EXPECT_OK(OpDefAttrDefaultsUnchanged(op1, op2));
Status changed_attr = OpDefAttrDefaultsUnchanged(op2, op3);
ExpectFailure(changed_attr,
"Attr 'n' has changed it's default value; from \"x\" to \"y\"");
Status removed_attr = OpDefAttrDefaultsUnchanged(op2, op1);
ExpectFailure(removed_attr,
"Attr 'n' has removed it's default; from \"x\" to no default");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/framework/op_def_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/op_def_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0dc80c6e-c24a-4fbb-9eae-1afc9c6733d1 | cpp | tensorflow/tensorflow | batch_util | tensorflow/core/util/batch_util.cc | tensorflow/core/framework/batch_util_test.cc | #include "tensorflow/core/util/batch_util.h"
#include <algorithm>
#include <utility>
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/tstring.h"
#define TF_CALL_DATASET_TYPES(m) TF_CALL_ALL_TYPES(m) TF_CALL_QUANTIZED_TYPES(m)
namespace tensorflow {
namespace batch_util {
namespace {
Status ValidateInput(const Tensor& parent, const Tensor& element,
int64_t index) {
DCHECK_NE(parent.dim_size(0), 0);
DCHECK_GE(index, 0);
if (element.NumElements() != (parent.NumElements() / parent.dim_size(0))) {
TensorShape chip_shape = parent.shape();
chip_shape.RemoveDim(0);
return errors::Internal(
"ValidateInput Cannot perform copy: number of elements does not match. "
" Shapes are: [element]: ",
element.shape().DebugString(),
", [parent slice]: ", chip_shape.DebugString());
}
return absl::OkStatus();
}
template <typename T>
Status HandleElementToSlice(const Tensor& , T* src, T* dest,
int64_t num_values) {
static_assert(tsl::is_simple_type<T>::value,
"Memcpy requires a simple type.");
memcpy(dest, src, num_values * sizeof(T));
return absl::OkStatus();
}
template <>
Status HandleElementToSlice<tstring>(const Tensor& element, tstring* src,
tstring* dest, int64_t num_values) {
if (element.RefCountIsOne()) {
for (int64_t i = 0; i < num_values; ++i) {
*dest++ = std::move(*src++);
}
} else {
std::copy_n(src, num_values, dest);
}
return absl::OkStatus();
}
template <>
Status HandleElementToSlice<Variant>(const Tensor& element, Variant* src,
Variant* dest, int64_t num_values) {
if (element.RefCountIsOne()) {
for (int64_t i = 0; i < num_values; ++i) {
*dest++ = std::move(*src++);
}
} else {
std::copy_n(src, num_values, dest);
}
return absl::OkStatus();
}
template <>
Status HandleElementToSlice<ResourceHandle>(const Tensor& ,
ResourceHandle* src,
ResourceHandle* dest,
int64_t num_values) {
std::copy_n(src, num_values, dest);
return absl::OkStatus();
}
template <>
Status HandleElementToSlice<Eigen::half>(const Tensor& ,
Eigen::half* src, Eigen::half* dest,
int64_t num_values) {
std::copy_n(src, num_values, dest);
return absl::OkStatus();
}
template <typename T>
void HandleSliceToElement(const T* src, T* dest, int64_t num_values) {
static_assert(tsl::is_simple_type<T>::value,
"Memcpy requires a simple type.");
memcpy(dest, src, num_values * sizeof(T));
}
template <>
void HandleSliceToElement<tstring>(const tstring* src, tstring* dest,
int64_t num_values) {
std::copy_n(src, num_values, dest);
}
template <>
void HandleSliceToElement<Variant>(const Variant* src, Variant* dest,
int64_t num_values) {
std::copy_n(src, num_values, dest);
}
template <>
void HandleSliceToElement<ResourceHandle>(const ResourceHandle* src,
ResourceHandle* dest,
int64_t num_values) {
std::copy_n(src, num_values, dest);
}
template <>
void HandleSliceToElement<Eigen::half>(const Eigen::half* src,
Eigen::half* dest, int64_t num_values) {
std::copy_n(src, num_values, dest);
}
template <typename T>
void HandleSliceToElement(Tensor* parent, T* src, T* dest, int64_t num_values) {
static_assert(tsl::is_simple_type<T>::value,
"Memcpy requires a simple type.");
memcpy(dest, src, num_values * sizeof(T));
}
template <>
void HandleSliceToElement<tstring>(Tensor* parent, tstring* src, tstring* dest,
int64_t num_values) {
if (parent->RefCountIsOne()) {
for (int64_t i = 0; i < num_values; ++i) {
dest[i] = std::move(src[i]);
}
} else {
std::copy_n(src, num_values, dest);
}
}
template <>
void HandleSliceToElement<Variant>(Tensor* parent, Variant* src, Variant* dest,
int64_t num_values) {
if (parent->RefCountIsOne()) {
for (int64_t i = 0; i < num_values; ++i) {
dest[i] = std::move(src[i]);
}
} else {
std::copy_n(src, num_values, dest);
}
}
template <>
void HandleSliceToElement<ResourceHandle>(Tensor* parent, ResourceHandle* src,
ResourceHandle* dest,
int64_t num_values) {
std::copy_n(src, num_values, dest);
}
template <>
void HandleSliceToElement<Eigen::half>(Tensor* parent, Eigen::half* src,
Eigen::half* dest, int64_t num_values) {
std::copy_n(src, num_values, dest);
}
}
Status CopyElementToSlice(Tensor element, Tensor* parent, int64_t index) {
TF_RETURN_IF_ERROR(ValidateInput(*parent, element, index));
const int64_t num_values = element.NumElements();
#define HANDLE_TYPE(T) \
case DataTypeToEnum<T>::value: { \
T* src = element.base<T>(); \
T* dest = parent->base<T>() + (num_values * index); \
return HandleElementToSlice<T>(element, src, dest, num_values); \
}
switch (element.dtype()) {
TF_CALL_ALL_TYPES(HANDLE_TYPE);
TF_CALL_QUANTIZED_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return errors::Unimplemented("CopyElementToSlice Unhandled data type: ",
element.dtype());
}
}
Status CopySliceToElement(const Tensor& parent, Tensor* element,
int64_t index) {
TF_RETURN_IF_ERROR(ValidateInput(parent, *element, index));
const int64_t num_values = element->NumElements();
#define HANDLE_TYPE(T) \
case DataTypeToEnum<T>::value: { \
const T* src = parent.base<T>() + (num_values * index); \
T* dest = element->base<T>(); \
HandleSliceToElement<T>(src, dest, num_values); \
return OkStatus(); \
}
switch (parent.dtype()) {
TF_CALL_ALL_TYPES(HANDLE_TYPE);
TF_CALL_QUANTIZED_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return errors::Unimplemented("CopySliceToElement Unhandled data type: ",
element->dtype());
}
}
Status MaybeMoveContiguousSlices(Tensor& src, int64_t src_offset,
int64_t dst_offset, int64_t num_slices,
Tensor* dst) {
if (src.dtype() != dst->dtype()) {
return absl::FailedPreconditionError(absl::StrCat(
"MaybeMoveContiguousSlices cannot perform copy: src and dst have "
"different "
"dtypes. Source dtype: ",
src.dtype(), " dstination dtype: ", dst->dtype(), "."));
}
if (src.dims() < 1) {
return absl::FailedPreconditionError(absl::StrCat(
"MaybeMoveContiguousSlices cannot perform copy: src has to be a tensor "
"with "
"rank >= 1. Source shape: ",
src.shape().DebugString()));
}
if (dst->dims() < 1) {
return absl::FailedPreconditionError(absl::StrCat(
"MaybeMoveContiguousSlices cannot perform copy: dst has to be a tensor "
"with rank >= 1. Dest shape: ",
dst->shape().DebugString()));
}
const int64_t src_dim0 = src.dim_size(0);
const int64_t dst_dim0 = dst->dim_size(0);
int64_t src_chip_size = 1;
int64_t dst_chip_size = 1;
for (int i = 1; i < src.dims(); ++i) {
src_chip_size *= src.dim_size(i);
}
for (int i = 1; i < dst->dims(); ++i) {
dst_chip_size *= dst->dim_size(i);
}
if (src_chip_size != dst_chip_size) {
return absl::FailedPreconditionError(absl::StrCat(
"MaybeMoveContiguousSlices cannot perform copy: source and dst shapes "
"are"
"not compatible. Source shape: ",
src.shape().DebugString(),
", dst shape: ", dst->shape().DebugString()));
}
if (src_chip_size == 0 && dst_chip_size == 0) {
return absl::OkStatus();
}
if (src_offset < 0 || src_offset + num_slices > src_dim0 || dst_offset < 0 ||
dst_offset + num_slices > dst_dim0) {
return absl::FailedPreconditionError(absl::StrCat(
"MaybeMoveContiguousSlices cannot perform copy: index out of range. "
"src_offset: ",
src_offset, ", num_slices: ", num_slices, ", src_dim0: ", src_dim0,
", dst_offset: ", dst_offset, ", dst_dim0: ", dst_dim0, "."));
}
#define HANDLE_TYPE(T) \
case DataTypeToEnum<T>::value: { \
T* src_p = src.base<T>() + (src_chip_size * src_offset); \
T* dst_p = dst->base<T>() + (dst_chip_size * dst_offset); \
HandleSliceToElement<T>(&src, src_p, dst_p, src_chip_size * num_slices); \
return OkStatus(); \
}
switch (src.dtype()) {
TF_CALL_ALL_TYPES(HANDLE_TYPE);
TF_CALL_QUANTIZED_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return absl::FailedPreconditionError(absl::StrCat(
"MaybeMoveContiguousSlices unhandled data type: ", src.dtype()));
}
}
Status CopyContiguousSlices(const Tensor& src, int64_t src_offset,
int64_t dst_offset, int64_t num_slices,
Tensor* dst) {
if (src.dtype() != dst->dtype()) {
return errors::FailedPrecondition(
"CopyContiguousSlices cannot perform copy: src and dst have different "
"dtypes. Source dtype: ",
src.dtype(), " dstination dtype: ", dst->dtype(), ".");
}
if (src.dims() < 1) {
return errors::FailedPrecondition(
"CopyContiguousSlices cannot perform copy: src has to be a tensor with "
"rank >= 1. Source shape: ",
src.shape().DebugString());
}
if (dst->dims() < 1) {
return errors::FailedPrecondition(
"CopyContiguousSlices cannot perform copy: dst has to be a tensor "
"with rank >= 1. Dest shape: ",
dst->shape().DebugString());
}
const int64_t src_dim0 = src.dim_size(0);
const int64_t dst_dim0 = dst->dim_size(0);
int64_t src_chip_size = 1;
int64_t dst_chip_size = 1;
for (int i = 1; i < src.dims(); ++i) {
src_chip_size *= src.dim_size(i);
}
for (int i = 1; i < dst->dims(); ++i) {
dst_chip_size *= dst->dim_size(i);
}
if (src_chip_size != dst_chip_size) {
return errors::FailedPrecondition(
"CopyContiguousSlices cannot perform copy: source and dst shapes are"
"not compatible. Source shape: ",
src.shape().DebugString(), ", dst shape: ", dst->shape().DebugString());
}
if (src_chip_size == 0 && dst_chip_size == 0) {
return absl::OkStatus();
}
if (src_offset < 0 || src_offset + num_slices > src_dim0 || dst_offset < 0 ||
dst_offset + num_slices > dst_dim0) {
return errors::FailedPrecondition(
"CopyContiguousSlices cannot perform copy: index out of range. "
"src_offset: ",
src_offset, ", num_slices: ", num_slices, ", src_dim0: ", src_dim0,
", dst_offset: ", dst_offset, ", dst_dim0: ", dst_dim0, ".");
}
#define HANDLE_TYPE(T) \
case DataTypeToEnum<T>::value: { \
const T* src_p = src.base<T>() + (src_chip_size * src_offset); \
T* dst_p = dst->base<T>() + (dst_chip_size * dst_offset); \
HandleSliceToElement<T>(src_p, dst_p, src_chip_size * num_slices); \
return OkStatus(); \
}
switch (src.dtype()) {
TF_CALL_ALL_TYPES(HANDLE_TYPE);
TF_CALL_QUANTIZED_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return errors::Unimplemented("CopyContiguousSlices unhandled data type: ",
src.dtype());
}
}
Status MaybeMoveSliceToElement(Tensor* parent, Tensor* element, int64_t index) {
TF_RETURN_IF_ERROR(ValidateInput(*parent, *element, index));
const int64_t num_values = element->NumElements();
#define HANDLE_TYPE(T) \
case DataTypeToEnum<T>::value: { \
T* src = parent->base<T>() + (num_values * index); \
T* dest = element->base<T>(); \
HandleSliceToElement<T>(parent, src, dest, num_values); \
return OkStatus(); \
}
switch (parent->dtype()) {
TF_CALL_ALL_TYPES(HANDLE_TYPE);
TF_CALL_QUANTIZED_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return errors::Unimplemented(
"MaybeMoveSliceToElement Unhandled data type: ", element->dtype());
}
}
Status ValidateElementToLargerSlice(const Tensor& element, Tensor* parent) {
DCHECK_NE(parent->dim_size(0), 0);
if (element.NumElements() > (parent->NumElements() / parent->dim_size(0))) {
TensorShape chip_shape = parent->shape();
chip_shape.RemoveDim(0);
return errors::Internal(
"HandleElementToLargerSlice Cannot copy slice: number of entries in "
"element is greater than number of elements in parent slice. ",
"Shapes are: [element]: ", element.shape().DebugString(),
", [parent slice]: ", chip_shape.DebugString());
}
return absl::OkStatus();
}
template <typename T, int NDIMS>
Status HandleElementToLargerSlice(const Tensor& element, Tensor* parent,
int index) {
TF_RETURN_IF_ERROR(ValidateElementToLargerSlice(element, parent));
if (element.NumElements() == 0) {
return absl::OkStatus();
}
auto element_t = element.tensor<T, NDIMS>();
auto parent_t = parent->tensor<T, NDIMS + 1>();
Eigen::DSizes<Eigen::DenseIndex, NDIMS + 1> slice_indices;
slice_indices[0] = index;
Eigen::DSizes<Eigen::DenseIndex, NDIMS + 1> slice_size;
slice_size[0] = 1;
for (size_t i = 1; i < slice_size.size(); ++i) {
slice_size[i] = element_t.dimension(i - 1);
}
parent_t.slice(slice_indices, slice_size) = element_t.reshape(slice_size);
return absl::OkStatus();
}
template <int NDIMS>
Status HandleElementToLargerSliceWithRank(const Tensor& element, Tensor* parent,
int index) {
#define HANDLE_TYPE(T) \
case DataTypeToEnum<T>::value: { \
return HandleElementToLargerSlice<T, NDIMS>(element, parent, index); \
}
switch (element.dtype()) {
TF_CALL_DATASET_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return errors::Unimplemented(
"HandleElementToLargerSliceWithRank Unhandled data type: ",
element.dtype());
}
}
Status CopyElementToLargerSlice(const Tensor& element, Tensor* parent,
int index) {
if (parent->dims() != element.dims() + 1) {
return errors::Internal(
"Mismatched ranks. Element's rank is: ", element.dims(),
" but element is meant to be a slice in output Tensor having rank: ",
parent->dims(), " (should be: ", element.dims() + 1, ")");
}
#define HANDLE_DIMS(NDIMS) \
case NDIMS: { \
TF_RETURN_IF_ERROR( \
HandleElementToLargerSliceWithRank<NDIMS>(element, parent, index)); \
return OkStatus(); \
}
switch (element.dims()) {
HANDLE_DIMS(0);
HANDLE_DIMS(1);
HANDLE_DIMS(2);
HANDLE_DIMS(3);
HANDLE_DIMS(4);
HANDLE_DIMS(5);
#undef HANDLE_DIMS
default:
return errors::Unimplemented("CopyElementToLargerSlice Unhandled rank: ",
element.dims());
}
}
Status SetElementZero(Tensor* element, const Tensor& padding) {
#define HANDLE_TYPE(T) \
if (element->dtype() == DataTypeToEnum<T>::value) { \
element->flat<T>().setConstant(padding.scalar<T>()()); \
return OkStatus(); \
}
TF_CALL_DATASET_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
return errors::Unimplemented("SetElementZero Unhandled data type: ",
element->dtype());
}
}
} | #include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(CopyContiguousSlicesTest, CompatibleShape) {
Tensor src(DT_FLOAT, {7, 1, 2});
Tensor dst(DT_FLOAT, {9, 2, 1});
auto s = batch_util::CopyContiguousSlices(
src, 2, 0, 5, &dst);
ASSERT_EQ(error::OK, s.code());
}
TEST(CopyContiguousSlicesTest, SourceOffsetOutOfRange) {
Tensor src(DT_FLOAT, {7, 1, 2});
Tensor dst(DT_FLOAT, {9, 2, 1});
auto s = batch_util::CopyContiguousSlices(
src, 7, 0, 5, &dst);
ASSERT_EQ(error::FAILED_PRECONDITION, s.code());
}
TEST(CopyContiguousSlicesTest, DstOffsetOutOfRange) {
Tensor src(DT_FLOAT, {7, 1, 2});
Tensor dst(DT_FLOAT, {9, 2, 1});
auto s = batch_util::CopyContiguousSlices(
src, 0, 0, 8, &dst);
ASSERT_EQ(error::FAILED_PRECONDITION, s.code());
}
TEST(CopyContiguousSlicesTest, CheckDstWithExpectedValues) {
auto src = test::AsTensor<float>({0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
TensorShape({5, 2}));
Tensor dst(DT_FLOAT, {9, 2, 1});
auto s = batch_util::CopyContiguousSlices(
src, 1, 5, 3, &dst);
ASSERT_EQ(error::OK, s.code());
test::ExpectTensorEqual<float>(
test::AsTensor<float>({2, 3, 4, 5, 6, 7}, TensorShape({3, 2, 1})),
dst.Slice(5, 8));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/batch_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/batch_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4ecfefe6-3b23-43e8-a12b-076143182fdb | cpp | google/leveldb | coding | util/coding.cc | util/coding_test.cc | #include "util/coding.h"
namespace leveldb {
void PutFixed32(std::string* dst, uint32_t value) {
char buf[sizeof(value)];
EncodeFixed32(buf, value);
dst->append(buf, sizeof(buf));
}
void PutFixed64(std::string* dst, uint64_t value) {
char buf[sizeof(value)];
EncodeFixed64(buf, value);
dst->append(buf, sizeof(buf));
}
char* EncodeVarint32(char* dst, uint32_t v) {
uint8_t* ptr = reinterpret_cast<uint8_t*>(dst);
static const int B = 128;
if (v < (1 << 7)) {
*(ptr++) = v;
} else if (v < (1 << 14)) {
*(ptr++) = v | B;
*(ptr++) = v >> 7;
} else if (v < (1 << 21)) {
*(ptr++) = v | B;
*(ptr++) = (v >> 7) | B;
*(ptr++) = v >> 14;
} else if (v < (1 << 28)) {
*(ptr++) = v | B;
*(ptr++) = (v >> 7) | B;
*(ptr++) = (v >> 14) | B;
*(ptr++) = v >> 21;
} else {
*(ptr++) = v | B;
*(ptr++) = (v >> 7) | B;
*(ptr++) = (v >> 14) | B;
*(ptr++) = (v >> 21) | B;
*(ptr++) = v >> 28;
}
return reinterpret_cast<char*>(ptr);
}
void PutVarint32(std::string* dst, uint32_t v) {
char buf[5];
char* ptr = EncodeVarint32(buf, v);
dst->append(buf, ptr - buf);
}
char* EncodeVarint64(char* dst, uint64_t v) {
static const int B = 128;
uint8_t* ptr = reinterpret_cast<uint8_t*>(dst);
while (v >= B) {
*(ptr++) = v | B;
v >>= 7;
}
*(ptr++) = static_cast<uint8_t>(v);
return reinterpret_cast<char*>(ptr);
}
void PutVarint64(std::string* dst, uint64_t v) {
char buf[10];
char* ptr = EncodeVarint64(buf, v);
dst->append(buf, ptr - buf);
}
void PutLengthPrefixedSlice(std::string* dst, const Slice& value) {
PutVarint32(dst, value.size());
dst->append(value.data(), value.size());
}
int VarintLength(uint64_t v) {
int len = 1;
while (v >= 128) {
v >>= 7;
len++;
}
return len;
}
const char* GetVarint32PtrFallback(const char* p, const char* limit,
uint32_t* value) {
uint32_t result = 0;
for (uint32_t shift = 0; shift <= 28 && p < limit; shift += 7) {
uint32_t byte = *(reinterpret_cast<const uint8_t*>(p));
p++;
if (byte & 128) {
result |= ((byte & 127) << shift);
} else {
result |= (byte << shift);
*value = result;
return reinterpret_cast<const char*>(p);
}
}
return nullptr;
}
bool GetVarint32(Slice* input, uint32_t* value) {
const char* p = input->data();
const char* limit = p + input->size();
const char* q = GetVarint32Ptr(p, limit, value);
if (q == nullptr) {
return false;
} else {
*input = Slice(q, limit - q);
return true;
}
}
const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* value) {
uint64_t result = 0;
for (uint32_t shift = 0; shift <= 63 && p < limit; shift += 7) {
uint64_t byte = *(reinterpret_cast<const uint8_t*>(p));
p++;
if (byte & 128) {
result |= ((byte & 127) << shift);
} else {
result |= (byte << shift);
*value = result;
return reinterpret_cast<const char*>(p);
}
}
return nullptr;
}
bool GetVarint64(Slice* input, uint64_t* value) {
const char* p = input->data();
const char* limit = p + input->size();
const char* q = GetVarint64Ptr(p, limit, value);
if (q == nullptr) {
return false;
} else {
*input = Slice(q, limit - q);
return true;
}
}
bool GetLengthPrefixedSlice(Slice* input, Slice* result) {
uint32_t len;
if (GetVarint32(input, &len) && input->size() >= len) {
*result = Slice(input->data(), len);
input->remove_prefix(len);
return true;
} else {
return false;
}
}
} | #include "util/coding.h"
#include <vector>
#include "gtest/gtest.h"
namespace leveldb {
TEST(Coding, Fixed32) {
std::string s;
for (uint32_t v = 0; v < 100000; v++) {
PutFixed32(&s, v);
}
const char* p = s.data();
for (uint32_t v = 0; v < 100000; v++) {
uint32_t actual = DecodeFixed32(p);
ASSERT_EQ(v, actual);
p += sizeof(uint32_t);
}
}
TEST(Coding, Fixed64) {
std::string s;
for (int power = 0; power <= 63; power++) {
uint64_t v = static_cast<uint64_t>(1) << power;
PutFixed64(&s, v - 1);
PutFixed64(&s, v + 0);
PutFixed64(&s, v + 1);
}
const char* p = s.data();
for (int power = 0; power <= 63; power++) {
uint64_t v = static_cast<uint64_t>(1) << power;
uint64_t actual;
actual = DecodeFixed64(p);
ASSERT_EQ(v - 1, actual);
p += sizeof(uint64_t);
actual = DecodeFixed64(p);
ASSERT_EQ(v + 0, actual);
p += sizeof(uint64_t);
actual = DecodeFixed64(p);
ASSERT_EQ(v + 1, actual);
p += sizeof(uint64_t);
}
}
TEST(Coding, EncodingOutput) {
std::string dst;
PutFixed32(&dst, 0x04030201);
ASSERT_EQ(4, dst.size());
ASSERT_EQ(0x01, static_cast<int>(dst[0]));
ASSERT_EQ(0x02, static_cast<int>(dst[1]));
ASSERT_EQ(0x03, static_cast<int>(dst[2]));
ASSERT_EQ(0x04, static_cast<int>(dst[3]));
dst.clear();
PutFixed64(&dst, 0x0807060504030201ull);
ASSERT_EQ(8, dst.size());
ASSERT_EQ(0x01, static_cast<int>(dst[0]));
ASSERT_EQ(0x02, static_cast<int>(dst[1]));
ASSERT_EQ(0x03, static_cast<int>(dst[2]));
ASSERT_EQ(0x04, static_cast<int>(dst[3]));
ASSERT_EQ(0x05, static_cast<int>(dst[4]));
ASSERT_EQ(0x06, static_cast<int>(dst[5]));
ASSERT_EQ(0x07, static_cast<int>(dst[6]));
ASSERT_EQ(0x08, static_cast<int>(dst[7]));
}
TEST(Coding, Varint32) {
std::string s;
for (uint32_t i = 0; i < (32 * 32); i++) {
uint32_t v = (i / 32) << (i % 32);
PutVarint32(&s, v);
}
const char* p = s.data();
const char* limit = p + s.size();
for (uint32_t i = 0; i < (32 * 32); i++) {
uint32_t expected = (i / 32) << (i % 32);
uint32_t actual;
const char* start = p;
p = GetVarint32Ptr(p, limit, &actual);
ASSERT_TRUE(p != nullptr);
ASSERT_EQ(expected, actual);
ASSERT_EQ(VarintLength(actual), p - start);
}
ASSERT_EQ(p, s.data() + s.size());
}
TEST(Coding, Varint64) {
std::vector<uint64_t> values;
values.push_back(0);
values.push_back(100);
values.push_back(~static_cast<uint64_t>(0));
values.push_back(~static_cast<uint64_t>(0) - 1);
for (uint32_t k = 0; k < 64; k++) {
const uint64_t power = 1ull << k;
values.push_back(power);
values.push_back(power - 1);
values.push_back(power + 1);
}
std::string s;
for (size_t i = 0; i < values.size(); i++) {
PutVarint64(&s, values[i]);
}
const char* p = s.data();
const char* limit = p + s.size();
for (size_t i = 0; i < values.size(); i++) {
ASSERT_TRUE(p < limit);
uint64_t actual;
const char* start = p;
p = GetVarint64Ptr(p, limit, &actual);
ASSERT_TRUE(p != nullptr);
ASSERT_EQ(values[i], actual);
ASSERT_EQ(VarintLength(actual), p - start);
}
ASSERT_EQ(p, limit);
}
TEST(Coding, Varint32Overflow) {
uint32_t result;
std::string input("\x81\x82\x83\x84\x85\x11");
ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(),
&result) == nullptr);
}
TEST(Coding, Varint32Truncation) {
uint32_t large_value = (1u << 31) + 100;
std::string s;
PutVarint32(&s, large_value);
uint32_t result;
for (size_t len = 0; len < s.size() - 1; len++) {
ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr);
}
ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) !=
nullptr);
ASSERT_EQ(large_value, result);
}
TEST(Coding, Varint64Overflow) {
uint64_t result;
std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11");
ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(),
&result) == nullptr);
}
TEST(Coding, Varint64Truncation) {
uint64_t large_value = (1ull << 63) + 100ull;
std::string s;
PutVarint64(&s, large_value);
uint64_t result;
for (size_t len = 0; len < s.size() - 1; len++) {
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr);
}
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) !=
nullptr);
ASSERT_EQ(large_value, result);
}
TEST(Coding, Strings) {
std::string s;
PutLengthPrefixedSlice(&s, Slice(""));
PutLengthPrefixedSlice(&s, Slice("foo"));
PutLengthPrefixedSlice(&s, Slice("bar"));
PutLengthPrefixedSlice(&s, Slice(std::string(200, 'x')));
Slice input(s);
Slice v;
ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
ASSERT_EQ("", v.ToString());
ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
ASSERT_EQ("foo", v.ToString());
ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
ASSERT_EQ("bar", v.ToString());
ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
ASSERT_EQ(std::string(200, 'x'), v.ToString());
ASSERT_EQ("", input.ToString());
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/coding.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/coding_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
1f1b2c1e-3bb9-4fc4-ae64-7e71e3f290d5 | cpp | tensorflow/tensorflow | encode_wav_op | tensorflow/core/kernels/encode_wav_op.cc | tensorflow/core/kernels/encode_wav_op_test.cc | #include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/wav/wav_io.h"
namespace tensorflow {
class EncodeWavOp : public OpKernel {
public:
explicit EncodeWavOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& audio = context->input(0);
OP_REQUIRES(context, audio.dims() == 2,
errors::InvalidArgument("audio must be 2-dimensional",
audio.shape().DebugString()));
const Tensor& sample_rate_tensor = context->input(1);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(sample_rate_tensor.shape()),
errors::InvalidArgument(
"Input sample_rate should be a scalar tensor, got ",
sample_rate_tensor.shape().DebugString(), " instead."));
const int32_t sample_rate = sample_rate_tensor.scalar<int32>()();
OP_REQUIRES(
context,
FastBoundsCheck(audio.NumElements(), std::numeric_limits<int32>::max()),
errors::InvalidArgument(
"Cannot encode audio with >= max int32 elements"));
const int32_t channel_count = static_cast<int32>(audio.dim_size(1));
const int32_t sample_count = static_cast<int32>(audio.dim_size(0));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, TensorShape({}), &output));
OP_REQUIRES_OK(context,
wav::EncodeAudioAsS16LEWav(
audio.flat<float>().data(), sample_rate, channel_count,
sample_count, &output->scalar<tstring>()()));
}
};
REGISTER_KERNEL_BUILDER(Name("EncodeWav").Device(DEVICE_CPU), EncodeWavOp);
} | #define EIGEN_USE_THREADS
#include <functional>
#include <memory>
#include <vector>
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/ops/audio_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace ops {
namespace {
TEST(EncodeWavOpTest, EncodeWavTest) {
Scope root = Scope::DisabledShapeInferenceScope();
Tensor audio_tensor(DT_FLOAT, {4, 2});
test::FillValues<float>(
&audio_tensor, {0.0f, 0.5f, 1.0f, -1.0f, 0.25f, 0.75f, 1.25f, -0.5f});
Output audio_op =
Const(root.WithOpName("audio_op"), Input::Initializer(audio_tensor));
Output sample_rate_op = Const(root.WithOpName("sample_rate_op"), 44100);
EncodeWav encode_wav_op =
EncodeWav(root.WithOpName("encode_wav_op"), audio_op, sample_rate_op);
DecodeWav decode_wav_op =
DecodeWav(root.WithOpName("decode_wav_op"), encode_wav_op);
TF_ASSERT_OK(root.status());
ClientSession session(root);
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run(ClientSession::FeedType(),
{decode_wav_op.audio, decode_wav_op.sample_rate},
&outputs));
const Tensor& audio = outputs[0];
const int sample_rate = outputs[1].flat<int32>()(0);
EXPECT_EQ(2, audio.dims());
EXPECT_EQ(2, audio.dim_size(1));
EXPECT_EQ(4, audio.dim_size(0));
EXPECT_NEAR(0.0f, audio.flat<float>()(0), 1e-4f);
EXPECT_NEAR(0.5f, audio.flat<float>()(1), 1e-4f);
EXPECT_NEAR(1.0f, audio.flat<float>()(2), 1e-4f);
EXPECT_NEAR(-1.0f, audio.flat<float>()(3), 1e-4f);
EXPECT_NEAR(0.25f, audio.flat<float>()(4), 1e-4f);
EXPECT_NEAR(0.75f, audio.flat<float>()(5), 1e-4f);
EXPECT_NEAR(1.0f, audio.flat<float>()(6), 1e-4f);
EXPECT_NEAR(-0.5f, audio.flat<float>()(7), 1e-4f);
EXPECT_EQ(44100, sample_rate);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/encode_wav_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/encode_wav_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8eb513de-403b-4f62-8099-cef65c43e9f9 | cpp | tensorflow/tensorflow | scoped_allocator_mgr | tensorflow/core/common_runtime/scoped_allocator_mgr.cc | tensorflow/core/common_runtime/scoped_allocator_mgr_test.cc | #include "tensorflow/core/common_runtime/scoped_allocator_mgr.h"
#include "tensorflow/core/common_runtime/scoped_allocator.h"
#include "tensorflow/core/framework/allocator.h"
namespace tensorflow {
Status ScopedAllocatorContainer::AddScopedAllocator(
const Tensor& backing_tensor, int32_t scope_id, const string& scope_name,
const absl::Span<const ScopedAllocator::Field>& fields,
int32_t expected_call_count) {
VLOG(1) << "AddScopedAllocator " << mgr_->device_name()
<< " step_id_=" << step_id_ << " scope_id=" << scope_id;
mutex_lock l(mu_);
auto it = allocators_.find(scope_id);
if (it != allocators_.end()) {
return errors::Internal("Cannot create ScopedAllocator because scope_id ",
scope_id, " for name ", scope_name,
" already exists");
}
for (auto& f : fields) {
if (allocators_.find(f.scope_id) != allocators_.end()) {
return errors::Internal(
"Cannot create ScopedAllocator because field scope_id ", f.scope_id,
" for name ", scope_name, " already exists");
}
}
VLOG(2) << " container " << this << " step_id " << step_id_;
ScopedAllocator* sa = new ScopedAllocator(
backing_tensor, scope_id, scope_name, fields, expected_call_count, this);
allocators_[scope_id] =
ScopedAllocatorContainer::SAField(ScopedAllocator::kBackingIndex, sa);
VLOG(2) << "#fields " << fields.size();
for (int i = 0; i < fields.size(); ++i) {
const ScopedAllocator::Field& f = fields[i];
VLOG(2) << "Adding instance with for " << mgr_->device_name()
<< " scope_id=" << f.scope_id;
allocators_[f.scope_id] = ScopedAllocatorContainer::SAField(
i, new ScopedAllocatorInstance(sa, i));
}
return absl::OkStatus();
}
ScopedAllocator* ScopedAllocatorContainer::GetAllocator(int32_t scope_id) {
mutex_lock l(mu_);
auto it = allocators_.find(scope_id);
if (it != allocators_.end()) {
CHECK_EQ(ScopedAllocator::kBackingIndex, it->second.field_index);
return it->second.scoped_allocator;
} else {
LOG(ERROR) << "Failed to find ScopedAllocator for " << scope_id
<< " in container for step " << step_id_ << " on "
<< mgr_->device_name();
return nullptr;
}
}
ScopedAllocatorInstance* ScopedAllocatorContainer::GetInstance(
int32_t scope_id) {
VLOG(2) << "GetInstance " << scope_id << " step " << step_id_ << " on "
<< mgr_->device_name();
mutex_lock l(mu_);
auto it = allocators_.find(scope_id);
if (it != allocators_.end()) {
return it->second.instance;
}
LOG(FATAL) << "Failed to find instance " << scope_id << " in container "
<< step_id_ << " on " << mgr_->device_name();
return nullptr;
}
void ScopedAllocatorContainer::Drop(int32_t scope_id, ScopedAllocator* sa) {
VLOG(2) << "Drop " << scope_id << " from container " << this << " step "
<< step_id_ << " on " << mgr_->device_name();
mutex_lock l(mu_);
auto it = allocators_.find(scope_id);
if (it != allocators_.end()) {
if (it->second.field_index != ScopedAllocator::kBackingIndex) {
it->second.instance->DropFromTable();
}
allocators_.erase(it);
}
}
ScopedAllocatorContainer::~ScopedAllocatorContainer() {
VLOG(2) << "~ScopedAllocatorContainer " << this << " step " << step_id_
<< " on " << mgr_->device_name();
mutex_lock l(mu_);
for (auto& it : allocators_) {
if (it.second.field_index == ScopedAllocator::kBackingIndex) {
delete it.second.scoped_allocator;
} else {
it.second.instance->DropFromTable();
}
}
}
ScopedAllocatorMgr::~ScopedAllocatorMgr() {
mutex_lock l(mu_);
for (auto it : per_step_map_) {
while (!it.second->Unref()) {
}
}
}
void ScopedAllocatorMgr::Cleanup(int64_t step_id) {
mutex_lock l(mu_);
auto it = per_step_map_.find(step_id);
if (it != per_step_map_.end()) {
it->second->Unref();
per_step_map_.erase(it);
}
}
ScopedAllocatorContainer* ScopedAllocatorMgr::GetContainer(int64_t step_id) {
VLOG(2) << "GetContainer " << step_id << " on " << device_name();
ScopedAllocatorContainer* sac = nullptr;
mutex_lock l(mu_);
auto it = per_step_map_.find(step_id);
if (it == per_step_map_.end()) {
sac = new ScopedAllocatorContainer(this, step_id);
per_step_map_[step_id] = sac;
} else {
sac = it->second;
}
return sac;
}
Status ScopedAllocatorMgr::AddScopedAllocator(
const Tensor& backing_tensor, int64_t step_id, int32_t scope_id,
const string& scope_name,
const absl::Span<const ScopedAllocator::Field>& fields,
int32_t expected_call_count) {
ScopedAllocatorContainer* sac = GetContainer(step_id);
return sac->AddScopedAllocator(backing_tensor, scope_id, scope_name, fields,
expected_call_count);
}
size_t ScopedAllocatorMgr::PopulateFields(
int32_t scope_id, const absl::Span<const TensorShape>& shapes,
const DataType dtype, std::vector<ScopedAllocator::Field>* fields) {
const int32_t num_fields = static_cast<int32>(shapes.size());
fields->resize(num_fields);
size_t offset = 0;
for (int32_t i = 0; i < num_fields; ++i) {
size_t bytes_requested = shapes[i].num_elements() * DataTypeSize(dtype);
auto* field = &((*fields)[i]);
field->scope_id = scope_id + 1 + i;
field->bytes_requested = bytes_requested;
field->offset = offset;
offset += bytes_requested;
size_t bytes_allocated = bytes_requested;
size_t overshoot = offset % Allocator::kAllocatorAlignment;
if (overshoot > 0) {
size_t alignment_bytes = Allocator::kAllocatorAlignment - overshoot;
bytes_allocated += alignment_bytes;
offset += alignment_bytes;
}
field->bytes_allocated = bytes_allocated;
VLOG(1) << "field=" << i << " scope_id=" << field->scope_id
<< " bytes_requested=" << field->bytes_requested
<< " offset=" << field->offset
<< " bytes_allocated=" << field->bytes_allocated;
}
return offset;
}
} | #include "tensorflow/core/common_runtime/scoped_allocator_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/scoped_allocator.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class ScopedAllocatorMgrTest : public ::testing::Test {
public:
ScopedAllocatorMgrTest() : sam_("CPU0") {}
void InitTensor() {
backing_tensor_ = Tensor(cpu_allocator(), DT_FLOAT, backing_tensor_shape_);
}
void PopulateFields() {
ScopedAllocatorMgr::PopulateFields(scope_id_, fields_shapes_, DT_FLOAT,
&fields_);
}
Status AddScopedAllocator(int expected_use_count, int scope_id) {
VLOG(2) << "Adding ScopedAllocator step_id " << step_id_ << " scope_id "
<< scope_id_ << " #fields " << fields_.size()
<< " expected_use_count " << expected_use_count;
return sam_.AddScopedAllocator(backing_tensor_, step_id_, scope_id,
"tensor_shape_599", fields_,
expected_use_count);
}
Status PrepScopedAllocatorMgr(int expected_use_count) {
InitTensor();
PopulateFields();
return AddScopedAllocator(expected_use_count, scope_id_);
}
void SaveInstances(int num_instances) {
sa_instances_.clear();
sa_instances_.resize(num_instances);
ScopedAllocatorContainer* sac = sam_.GetContainer(step_id_);
for (int i = 0; i < num_instances; i++) {
sa_instances_[i] = sac->GetInstance(scope_id_ + 1 + i);
}
}
int AlignmentPadding() {
int alignment_padding =
(Allocator::kAllocatorAlignment -
(521 * sizeof(float)) % Allocator::kAllocatorAlignment) %
Allocator::kAllocatorAlignment;
return alignment_padding;
}
void PrintShapes() {
VLOG(2) << "tensor_shape=" << backing_tensor_shape_.DebugString();
for (int i = 0; i < fields_shapes_.size(); i++) {
VLOG(2) << "fields_shapes[" << i
<< "]=" << fields_shapes_[i].DebugString();
}
}
protected:
TensorShape backing_tensor_shape_;
Tensor backing_tensor_;
std::vector<TensorShape> fields_shapes_;
std::vector<ScopedAllocator::Field> fields_;
ScopedAllocatorMgr sam_;
const int step_id_ = 101;
const int scope_id_ = 599;
std::vector<ScopedAllocatorInstance*> sa_instances_;
};
TEST_F(ScopedAllocatorMgrTest, ContainerAllocation) {
ScopedAllocatorContainer* sac_101 = sam_.GetContainer(101);
EXPECT_TRUE(sac_101 != nullptr);
ScopedAllocatorContainer* sac_201 = sam_.GetContainer(201);
EXPECT_TRUE(sac_201 != nullptr);
EXPECT_NE(sac_101, sac_201);
ScopedAllocatorContainer* also_sac_101 = sam_.GetContainer(101);
EXPECT_EQ(sac_101, also_sac_101);
sam_.Cleanup(101);
}
TEST_F(ScopedAllocatorMgrTest, PopulateFields) {
backing_tensor_shape_ = TensorShape({512 + 9 + 512 + 16});
fields_shapes_ = std::vector<TensorShape>({{512}, {3, 3}, {2, 256}});
InitTensor();
PopulateFields();
EXPECT_EQ(0, fields_[0].offset);
EXPECT_EQ(512 * sizeof(float), fields_[0].bytes_requested);
EXPECT_EQ(scope_id_ + 1, fields_[0].scope_id);
EXPECT_EQ(512 * sizeof(float), fields_[1].offset);
EXPECT_EQ(9 * sizeof(float), fields_[1].bytes_requested);
EXPECT_EQ(scope_id_ + 2, fields_[1].scope_id);
EXPECT_EQ(521 * sizeof(float) + AlignmentPadding(), fields_[2].offset);
EXPECT_EQ(512 * sizeof(float), fields_[2].bytes_requested);
EXPECT_EQ(scope_id_ + 3, fields_[2].scope_id);
}
TEST_F(ScopedAllocatorMgrTest, ContainerAddAllocator) {
backing_tensor_shape_ = TensorShape({1024});
fields_shapes_ = std::vector<TensorShape>({{512}, {512}});
Status s = PrepScopedAllocatorMgr(2);
EXPECT_TRUE(s.ok());
SaveInstances(fields_shapes_.size());
s = AddScopedAllocator(2, scope_id_);
EXPECT_FALSE(s.ok());
fields_[0].scope_id = scope_id_ + 1;
s = AddScopedAllocator(2, scope_id_ + 3);
EXPECT_FALSE(s.ok());
void* ptr0 =
sa_instances_[0]->AllocateRaw(0 , 512 * sizeof(float));
void* ptr1 =
sa_instances_[1]->AllocateRaw(0 , 512 * sizeof(float));
sa_instances_[0]->DeallocateRaw(ptr0);
sa_instances_[1]->DeallocateRaw(ptr1);
}
TEST_F(ScopedAllocatorMgrTest, AllocatorSuccess) {
ScopedAllocatorContainer* sac = sam_.GetContainer(step_id_);
ScopedAllocator* other = sac->GetAllocator(scope_id_);
EXPECT_EQ(other, nullptr);
backing_tensor_shape_ = TensorShape({512 + 9 + 512 + 16});
fields_shapes_ = std::vector<TensorShape>({{512}, {3, 3}, {2, 256}});
Status s = PrepScopedAllocatorMgr(3);
other = sac->GetAllocator(scope_id_);
ScopedAllocatorInstance* inst0 = sac->GetInstance(scope_id_ + 1);
char* ptr0 = static_cast<char*>(inst0->AllocateRaw(0, 512 * sizeof(float)));
const char* base =
static_cast<const char*>(DMAHelper::base(&backing_tensor_));
EXPECT_EQ(ptr0, base);
ScopedAllocatorInstance* inst1 = sac->GetInstance(scope_id_ + 2);
char* ptr1 = static_cast<char*>(inst1->AllocateRaw(0, 9 * sizeof(float)));
EXPECT_EQ(ptr1, ptr0 + (512 * sizeof(float)));
ScopedAllocatorInstance* inst2 = sac->GetInstance(scope_id_ + 3);
char* ptr2 = static_cast<char*>(inst2->AllocateRaw(0, 512 * sizeof(float)));
EXPECT_EQ(ptr2, ptr1 + AlignmentPadding() + (9 * sizeof(float)));
EXPECT_EQ(nullptr, sac->GetAllocator(scope_id_));
inst0->DeallocateRaw(ptr0);
inst1->DeallocateRaw(ptr1);
inst2->DeallocateRaw(ptr2);
}
TEST_F(ScopedAllocatorMgrTest, AllocatorInitFail) {
backing_tensor_shape_ = TensorShape({8});
InitTensor();
fields_.resize(1);
fields_[0].scope_id = scope_id_ + 1;
fields_[0].offset = 0;
fields_[0].bytes_requested =
backing_tensor_shape_.num_elements() * 2 * sizeof(float);
EXPECT_DEATH(Status s = AddScopedAllocator(1, scope_id_), "");
}
TEST_F(ScopedAllocatorMgrTest, AllocatorFail) {
backing_tensor_shape_ = TensorShape({1024});
fields_shapes_ = std::vector<TensorShape>({{512}, {512}});
Status s = PrepScopedAllocatorMgr(2);
EXPECT_TRUE(s.ok());
SaveInstances(fields_shapes_.size());
char* ptr0 =
static_cast<char*>(sa_instances_[0]->AllocateRaw(0, 512 * sizeof(float)));
VLOG(2) << "Should fail because we deallocate ptr="
<< static_cast<void*>(ptr0 + 8) << " which we never allocated.";
EXPECT_DEATH(sa_instances_[0]->DeallocateRaw(ptr0 + 8), "");
VLOG(2) << "Should fail because we allocate smaller than the size of the "
<< "field.";
EXPECT_EQ(nullptr, sa_instances_[1]->AllocateRaw(0, 256 * sizeof(float)));
VLOG(2) << "Should fail because we allocate larger than the size of the "
<< "field.";
EXPECT_EQ(nullptr, sa_instances_[1]->AllocateRaw(0, 1024 * sizeof(float)));
void* ptr1 = sa_instances_[1]->AllocateRaw(0, 512 * sizeof(float));
VLOG(2) << "Should fail because we exceed expected_use_count.";
EXPECT_EQ(nullptr, sa_instances_[0]->AllocateRaw(0, 512 * sizeof(float)));
sa_instances_[0]->DeallocateRaw(ptr0);
sa_instances_[1]->DeallocateRaw(ptr1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/scoped_allocator_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/scoped_allocator_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
591c41b1-f54a-486f-8805-bb5cd8bfa084 | cpp | tensorflow/tensorflow | triton_support_legacy | third_party/xla/xla/service/gpu/fusions/triton/triton_support_legacy.cc | third_party/xla/xla/service/gpu/fusions/triton/triton_support_legacy_test.cc | #include <cstdint>
#include <iterator>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/fusions/triton/triton_support.h"
#include "xla/service/gpu/variant_visitor.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/tensor_float_32_utils.h"
namespace xla {
namespace gpu {
namespace legacy_triton {
bool IsDistributiveOverAddition(const HloInstruction& hlo) {
if (hlo.opcode() == HloOpcode::kMultiply ||
hlo.opcode() == HloOpcode::kNegate ||
hlo.opcode() == HloOpcode::kBitcast ||
hlo.opcode() == HloOpcode::kReshape || hlo.opcode() == HloOpcode::kCopy ||
hlo.opcode() == HloOpcode::kTranspose ||
hlo.opcode() == HloOpcode::kConvert ||
hlo.opcode() == HloOpcode::kBroadcast ||
hlo.opcode() == HloOpcode::kSlice) {
return true;
}
return false;
}
bool IsTritonSupportedDotOutputType(
const PrimitiveType t, const se::GpuComputeCapability& gpu_version) {
switch (t) {
case F16:
case F32:
return true;
case F8E5M2:
return std::visit(VariantVisitor{[](const se::CudaComputeCapability& cc) {
return cc.IsAtLeastAmpere();
},
[](const se::RocmComputeCapability& cc) {
return false;
}},
gpu_version);
case F8E4M3FN:
return std::visit(VariantVisitor{[](const se::CudaComputeCapability& cc) {
return cc.IsAtLeastHopper();
},
[](const se::RocmComputeCapability& cc) {
return false;
}},
gpu_version);
case BF16:
return std::visit(VariantVisitor{[](const se::CudaComputeCapability& cc) {
return true;
},
[](const se::RocmComputeCapability& cc) {
return cc.has_bf16_dtype_support();
}},
gpu_version);
default:
return false;
}
};
bool IsTritonSupportedDataType(PrimitiveType type,
const se::GpuComputeCapability& gpu_version) {
if (IsTritonSupportedDotOutputType(type, gpu_version)) {
return true;
}
switch (type) {
case PRED:
case S8:
case S16:
case S32:
return true;
default:
return false;
}
}
CodegenDecision IsInstructionSupportsDataTypes(
const HloInstruction& instr, const se::GpuComputeCapability& gpu_version) {
if (!IsTritonSupportedDataType(instr.shape().element_type(), gpu_version)) {
return CodegenDecision::Forbid("Unsupported output data type.");
}
for (const HloInstruction* operand : instr.operands()) {
const auto operand_type = operand->shape().element_type();
switch (instr.opcode()) {
case HloOpcode::kConvert:
if (operand_type == S4) continue;
[[fallthrough]];
default:
if (!IsTritonSupportedDataType(operand_type, gpu_version)) {
return CodegenDecision::Forbid("Unsupported input data type.");
}
}
}
return CodegenDecision::Allow();
}
std::vector<HloOpcode> TritonSupportedUnaryElementwiseUpToFloatNormalization(
PrimitiveType element_type) {
std::vector<HloOpcode> ret = {HloOpcode::kConvert};
if (element_type == PrimitiveType::PRED) {
ret.push_back(HloOpcode::kNot);
return ret;
}
ret.push_back(HloOpcode::kAbs);
ret.push_back(HloOpcode::kNegate);
if (element_type == PrimitiveType::F32 ||
element_type == PrimitiveType::BF16 ||
element_type == PrimitiveType::F64) {
absl::c_copy(std::vector<HloOpcode>{HloOpcode::kCos, HloOpcode::kExp,
HloOpcode::kExpm1, HloOpcode::kFloor,
HloOpcode::kCeil, HloOpcode::kLog,
HloOpcode::kLog1p, HloOpcode::kRsqrt,
HloOpcode::kSin, HloOpcode::kSqrt,
HloOpcode::kCbrt, HloOpcode::kTan,
HloOpcode::kTanh, HloOpcode::kErf},
std::back_inserter(ret));
}
return ret;
}
std::vector<HloOpcode> TritonSupportedBinaryElementwiseUpToFloatNormalization(
PrimitiveType element_type) {
if (element_type == PrimitiveType::PRED) {
return {HloOpcode::kAnd, HloOpcode::kOr, HloOpcode::kXor,
HloOpcode::kCompare};
}
std::vector<HloOpcode> ret = {HloOpcode::kAdd, HloOpcode::kCompare,
HloOpcode::kMaximum, HloOpcode::kMinimum,
HloOpcode::kMultiply, HloOpcode::kSubtract};
if (element_type == PrimitiveType::F32 ||
element_type == PrimitiveType::BF16 ||
element_type == PrimitiveType::F64) {
ret.push_back(HloOpcode::kAtan2);
ret.push_back(HloOpcode::kDivide);
ret.push_back(HloOpcode::kPower);
}
return ret;
}
std::vector<HloOpcode> TritonSupportedTernaryElementwiseUpToFloatNormalization(
PrimitiveType element_type) {
return {HloOpcode::kSelect, HloOpcode::kClamp};
}
bool IsTritonSupportedElementwiseUpToFloatNormalization(
HloOpcode opcode, PrimitiveType element_type) {
return absl::c_linear_search(
TritonSupportedUnaryElementwiseUpToFloatNormalization(
element_type),
opcode) ||
absl::c_linear_search(
TritonSupportedBinaryElementwiseUpToFloatNormalization(
element_type),
opcode) ||
absl::c_linear_search(
TritonSupportedTernaryElementwiseUpToFloatNormalization(
element_type),
opcode);
}
CodegenDecision CanTritonHandleElementwise(
const HloInstruction& instr, const se::GpuComputeCapability& gpu_version) {
if (auto decision = IsInstructionSupportsDataTypes(instr, gpu_version);
!decision.CanFuse()) {
return decision;
}
if (instr.opcode() == HloOpcode::kConstant) {
return CodegenDecision::Allow();
} else if (!IsTritonSupportedElementwiseUpToFloatNormalization(
instr.opcode(), instr.operand(0)->shape().element_type())) {
return CodegenDecision::Forbid("Unsupported elementwise operation.");
}
return CodegenDecision::Allow();
}
bool IsDotAlgorithmSupportedByTriton(
PrecisionConfig::Algorithm algorithm,
const se::GpuComputeCapability& gpu_version) {
auto cuda_compute_capability =
std::get_if<se::CudaComputeCapability>(&gpu_version);
auto rocm_compute_capability =
std::get_if<se::RocmComputeCapability>(&gpu_version);
switch (algorithm) {
case PrecisionConfig::ALG_DOT_TF32_TF32_F32_X3:
case PrecisionConfig::ALG_DOT_TF32_TF32_F32:
if (cuda_compute_capability) {
return true;
}
return false;
case PrecisionConfig::ALG_DOT_BF16_BF16_F32:
case PrecisionConfig::ALG_DOT_BF16_BF16_F32_X3:
case PrecisionConfig::ALG_DOT_BF16_BF16_F32_X6:
if (cuda_compute_capability) {
return true;
}
if (rocm_compute_capability) {
return rocm_compute_capability->has_bf16_dtype_support();
}
return false;
case PrecisionConfig::ALG_DOT_F16_F16_F32:
case PrecisionConfig::ALG_DOT_F32_F32_F32:
default:
return false;
}
}
CodegenDecision CanTritonHandleGEMM(
const HloDotInstruction& dot, const se::GpuComputeCapability& gpu_version) {
auto cuda_compute_capability =
std::get_if<se::CudaComputeCapability>(&gpu_version);
auto rocm_compute_capability =
std::get_if<se::RocmComputeCapability>(&gpu_version);
CHECK(cuda_compute_capability || rocm_compute_capability);
if (dot.precision_config().algorithm() == PrecisionConfig::ALG_UNSET) {
if (!tsl::tensor_float_32_execution_enabled() ||
absl::c_any_of(dot.precision_config().operand_precision(),
[](int x) { return x != PrecisionConfig::DEFAULT; })) {
return CodegenDecision::Forbid(
"Having non-default operand precisions or TensorFloat-32 disabled "
"for Dot op with unset algorithm.");
}
} else {
if (!IsDotAlgorithmSupportedByTriton(dot.precision_config().algorithm(),
gpu_version)) {
return CodegenDecision::Forbid(absl::StrFormat(
"Unsupported algorithm on the current device(s): %s",
PrecisionConfig::Algorithm_Name(dot.precision_config().algorithm())));
}
}
if (!IsTritonSupportedDotOutputType(dot.shape().element_type(),
gpu_version)) {
return CodegenDecision::Forbid("Unsupported output data type for Dot op.");
}
if (!IsTritonSupportedDataType(dot.operand(0)->shape().element_type(),
gpu_version) ||
!IsTritonSupportedDataType(dot.operand(1)->shape().element_type(),
gpu_version)) {
return CodegenDecision::Forbid("Unsupported input data type for Dot op.");
}
const DotDimensionNumbers& dim_numbers = dot.dot_dimension_numbers();
if (dim_numbers.lhs_batch_dimensions().size() > 1) {
return CodegenDecision::Forbid("Multiple batch dimensions.");
}
return CodegenDecision::Allow();
}
bool NoNonContractingDimension(const HloDotInstruction& dot) {
const DotDimensionNumbers& dim_numbers = dot.dot_dimension_numbers();
if (dim_numbers.lhs_batch_dimensions().size() +
dim_numbers.lhs_contracting_dimensions().size() ==
dot.operand(0)->shape().rank() ||
dim_numbers.rhs_batch_dimensions().size() +
dim_numbers.rhs_contracting_dimensions().size() ==
dot.operand(1)->shape().rank()) {
return true;
}
return false;
}
CodegenDecision IsTritonSupportedDynamicSlice(
const HloDynamicSliceInstruction& instr) {
for (const HloInstruction* index_operand : instr.index_operands()) {
switch (index_operand->shape().element_type()) {
case S8:
case S16:
case S32:
break;
default:
return CodegenDecision::Forbid(
"Dynamic slice is only supported with S8, S16, or S32 indices.");
}
}
const HloInstruction* input = instr.operand(0);
Layout in_layout = input->shape().layout();
int64_t majormost_dim_id =
in_layout.minor_to_major(in_layout.minor_to_major_size() - 1);
for (int i = 0; i < input->shape().dimensions_size(); ++i) {
if (i == majormost_dim_id) {
continue;
} else if (input->shape().dimensions(i) != instr.slice_sizes(i)) {
return CodegenDecision::Forbid(
"Unsupported dynamic slice on non-major-most dimension.");
}
}
return CodegenDecision::Allow();
}
CodegenDecision IsTritonSupportedInstruction(
const HloInstruction& instr, const se::GpuComputeCapability& gpu_version) {
if (instr.IsElementwise()) {
return CanTritonHandleElementwise(instr, gpu_version);
}
switch (instr.opcode()) {
case HloOpcode::kDot: {
auto* dot = Cast<HloDotInstruction>(&instr);
if (NoNonContractingDimension(*dot)) {
return CodegenDecision::Forbid("No non-contracting dimensions.");
}
return CanTritonHandleGEMM(*dot, gpu_version);
}
case HloOpcode::kTuple: {
if (instr.IsRoot()) {
return CodegenDecision::Allow();
}
return CodegenDecision::Forbid("Only supports root tuples.");
}
case HloOpcode::kDynamicSlice: {
return IsTritonSupportedDynamicSlice(
*Cast<HloDynamicSliceInstruction>(&instr));
}
case HloOpcode::kBitcast:
case HloOpcode::kTranspose:
case HloOpcode::kSlice:
case HloOpcode::kReshape:
case HloOpcode::kPad:
case HloOpcode::kConcatenate:
case HloOpcode::kParameter:
case HloOpcode::kBroadcast:
return CodegenDecision::Allow();
default:
break;
}
return CodegenDecision::Forbid("Unsupported opcode.");
}
}
}
} | #include "xla/service/gpu/fusions/triton/triton_support_legacy.h"
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/fusions/triton/triton_fusion_emitter.h"
#include "xla/service/gpu/fusions/triton/triton_test_utils.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/triton_fusion_analysis.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
se::GpuComputeCapability GetComputeCapability() {
return se::CudaComputeCapability::Ampere();
}
bool CombinationCrashesTriton(PrimitiveType lhs_type, PrimitiveType rhs_type,
PrimitiveType output_type,
se::GpuComputeCapability gpu_compute_capability) {
if (std::holds_alternative<se::CudaComputeCapability>(
gpu_compute_capability)) {
auto cuda_compute_capability =
std::get<se::CudaComputeCapability>(gpu_compute_capability);
if (!cuda_compute_capability.IsAtLeastHopper() &&
(lhs_type == F8E4M3FN || rhs_type == F8E4M3FN ||
output_type == F8E4M3FN)) {
return true;
}
}
return false;
}
class DotTest : public TritonSupportTestBaseWithParam {
protected:
void TestDotWithTypes(PrimitiveType lhs_type, PrimitiveType rhs_type,
PrimitiveType output_type) {
if (lhs_type == BF16 && !SupportsBF16(GetComputeCapability())) {
GTEST_SKIP();
}
const HloOpcode opcode = HloOpcode::kDot;
const std::string lhs =
primitive_util::LowercasePrimitiveTypeName(lhs_type);
const std::string rhs =
primitive_util::LowercasePrimitiveTypeName(rhs_type);
const std::string output =
primitive_util::LowercasePrimitiveTypeName(output_type);
const std::string kHloTestTemplate = R"(
triton_computation {
parameter_0 = $0[92,11]{1,0} parameter(0)
parameter_1 = $1[11,63]{1,0} parameter(1)
ROOT dot = $2[92,63]{1,0} $3(parameter_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
parameter_0 = $0[92,11]{1,0} parameter(0)
parameter_1 = $1[11,63]{1,0} parameter(1)
ROOT triton_op = $2[92,63]{1,0} fusion(parameter_0, parameter_1), kind=kCustom,
calls=triton_computation,
backend_config={"fusion_backend_config":{"kind":"__triton_gemm",
triton_gemm_config:
{"block_m":16,"block_n":32,"block_k":512,
"split_k":1,"num_stages":4,"num_warps":8,
"num_ctas":1}}}
})";
const std::string hlo_test = absl::Substitute(
kHloTestTemplate, lhs, rhs, output, HloOpcodeString(opcode));
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(hlo_test, {}, opcode));
if (legacy_triton::IsTritonSupportedInstruction(ti.Instruction(),
GetComputeCapability())) {
TF_EXPECT_OK(
ApplyFloatNormalization(ti.Module().get(), GetComputeCapability()));
EXPECT_TRUE(RunAndCompareNoHloPasses(
std::move(ti.Module()),
ErrorSpec{primitive_util::IsF8Type(lhs_type) ? 1.0 : 2e-4,
2e-4}));
} else {
if (CombinationCrashesTriton(lhs_type, rhs_type, output_type,
GetComputeCapability())) {
return;
}
const se::DeviceDescription dev_info =
TestGpuDeviceInfo::RTXA6000DeviceInfo(GetComputeCapability());
BlockLevelParameters block_level_parameters;
block_level_parameters.num_ctas = 1;
block_level_parameters.num_stages = 4;
block_level_parameters.num_warps = 8;
EXPECT_THAT(
TritonWrapper("test_fn", &ti.TritonFusion(), GetComputeCapability(),
dev_info, block_level_parameters, &llvm_module_,
mlir_context_),
tsl::testing::StatusIs(
absl::StatusCode::kInternal,
::testing::HasSubstr("Failed to compile Triton kernel")));
}
}
};
TEST_P(DotTest, IsTritonSupportedExecutesCorrectlyForDot) {
PrimitiveType data_type;
HloOpcode opcode;
std::tie(data_type, opcode) = GetParam();
CHECK_EQ(opcode, HloOpcode::kDot);
TestDotWithTypes(data_type, data_type, data_type);
switch (data_type) {
case F8E5M2:
TestDotWithTypes(F8E5M2, F8E4M3FN, F32);
TestDotWithTypes(F8E5M2, F8E5M2, F16);
TestDotWithTypes(F8E5M2, F8E5M2, F32);
break;
case F8E4M3FN:
TestDotWithTypes(F8E4M3FN, F8E5M2, F32);
TestDotWithTypes(F8E4M3FN, F8E4M3FN, F16);
TestDotWithTypes(F8E4M3FN, F8E4M3FN, F32);
break;
default:
break;
}
}
INSTANTIATE_TEST_SUITE_P(DotTestTestSuite, DotTest,
::testing::Combine(::testing::Values(F16, F32, BF16,
F8E5M2, F8E4M3FN),
::testing::Values(HloOpcode::kDot)),
TritonSupportTestTypeAndOpcodeToString);
struct DynamicSliceTestParam {
PrimitiveType data_type;
PrimitiveType index_type;
bool is_the_majormost_dim_being_sliced;
using TupleType = std::tuple<PrimitiveType, PrimitiveType, bool>;
explicit DynamicSliceTestParam(const TupleType& tuple)
: data_type(std::get<0>(tuple)),
index_type(std::get<1>(tuple)),
is_the_majormost_dim_being_sliced(std::get<2>(tuple)) {}
};
std::string DynamicSliceTestParamToString(
const ::testing::TestParamInfo<DynamicSliceTestParam::TupleType>& info) {
const DynamicSliceTestParam param(info.param);
return absl::StrCat(
primitive_util::LowercasePrimitiveTypeName(param.data_type), "_",
primitive_util::LowercasePrimitiveTypeName(param.index_type), "_",
param.is_the_majormost_dim_being_sliced ? "majormost" : "not_majormost");
}
class DynamicSliceTest
: public TritonSupportTestBase,
public ::testing::WithParamInterface<DynamicSliceTestParam::TupleType> {};
TEST_P(DynamicSliceTest, IsTritonSupportedDynamicSlice) {
const DynamicSliceTestParam param(GetParam());
if (param.data_type == BF16 && !SupportsBF16(GetComputeCapability())) {
GTEST_SKIP();
}
constexpr absl::string_view kHloTestTemplate =
R"(
triton_computation {
dynamic_slice_input = $0[$2,$3] parameter(0)
dot_rhs = f32[2,4] parameter(1)
start_index0 = $1[] parameter(2)
start_index1 = $1[] parameter(3)
dynamic_slice = $0[5,2] dynamic-slice(dynamic_slice_input, start_index0, start_index1),
dynamic_slice_sizes={5,2}
convert = f32[5,2] convert(dynamic_slice)
ROOT dot = f32[5, 4] dot(convert, dot_rhs),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
dynamic_slice_input = $0[$2,$3] parameter(0)
dot_rhs = f32[2,4] parameter(1)
start_index0 = $1[] constant($4)
start_index1 = $1[] constant($5)
ROOT fusion = f32[5,4] fusion(dynamic_slice_input, dot_rhs, start_index0, start_index1),
kind=kCustom, calls=triton_computation,
backend_config={
"fusion_backend_config":{
"kind":"__triton_gemm","triton_gemm_config":{
"block_m":"32","block_n":"32","block_k":"32","split_k":"1",
"num_stages":"1","num_warps":"4","num_ctas":"1"}}}
})";
const std::string hlo_test = absl::Substitute(
kHloTestTemplate,
primitive_util::LowercasePrimitiveTypeName(param.data_type),
primitive_util::LowercasePrimitiveTypeName(param.index_type),
param.is_the_majormost_dim_being_sliced ? 7 : 5,
param.is_the_majormost_dim_being_sliced ? 2 : 4,
param.is_the_majormost_dim_being_sliced ? 1 : 0,
param.is_the_majormost_dim_being_sliced ? 0 : 1
);
TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction(
hlo_test, {},
HloOpcode::kDynamicSlice));
const bool is_supported_instruction =
legacy_triton::IsTritonSupportedInstruction(ti.Instruction(),
GetComputeCapability())
.CanFuse();
const bool is_supported_dynamic_slice =
legacy_triton::IsTritonSupportedDynamicSlice(
*Cast<HloDynamicSliceInstruction>(&ti.Instruction()))
.CanFuse();
EXPECT_EQ(is_supported_instruction, is_supported_dynamic_slice);
if (is_supported_instruction) {
TF_EXPECT_OK(
ApplyFloatNormalization(ti.Module().get(), GetComputeCapability()));
EXPECT_TRUE(RunAndCompareNoHloPasses(
std::move(ti.Module()), ErrorSpec{2e-4, 2e-4}));
} else {
EXPECT_THAT(TritonFusionAnalysis::Execute(ti.TritonComputation()),
tsl::testing::StatusIs(absl::StatusCode::kFailedPrecondition));
}
}
INSTANTIATE_TEST_SUITE_P(
All, DynamicSliceTest,
::testing::Combine(::testing::Values(F16, BF16, F32),
::testing::Values(S8, S16, S32, S64, U8, U16, U32, U64),
::testing::Bool()),
DynamicSliceTestParamToString);
TEST_F(TritonSupportTestBase,
UnsupportedDotOutputTypeFailsGracefullyWithTriton) {
const std::string kHloTest = R"(
triton_computation {
parameter_0 = f32[92,11]{1,0} parameter(0)
parameter_1 = f32[11,63]{1,0} parameter(1)
ROOT dot = pred[92,63]{1,0} dot(parameter_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
parameter_0 = f32[92,11]{1,0} parameter(0)
parameter_1 = f32[11,63]{1,0} parameter(1)
ROOT triton_op = pred[92,63]{1,0} fusion(parameter_0, parameter_1), kind=kCustom,
calls=triton_computation,
backend_config={"fusion_backend_config":{"kind":"__triton_gemm",
triton_gemm_config:
{"block_m":16,"block_n":32,"block_k":512,
"split_k":1,"num_stages":4,"num_warps":8,
"num_ctas":1}}}
})";
TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti,
ParseTemplateAndGetInstruction(
kHloTest, {}, HloOpcode::kDot));
const se::DeviceDescription dev_info =
TestGpuDeviceInfo::RTXA6000DeviceInfo(GetComputeCapability());
EXPECT_THAT(legacy_triton::IsTritonSupportedInstruction(
ti.Instruction(), GetComputeCapability())
.Explain(),
::testing::HasSubstr("Unsupported output data type for Dot op."));
BlockLevelParameters block_level_parameters;
block_level_parameters.num_ctas = 1;
block_level_parameters.num_stages = 4;
block_level_parameters.num_warps = 8;
EXPECT_THAT(
TritonWrapper("test_fn", &ti.TritonFusion(), GetComputeCapability(),
dev_info, block_level_parameters, &llvm_module_,
mlir_context_),
tsl::testing::StatusIs(
absl::StatusCode::kInternal,
::testing::HasSubstr("Failed to verify Triton module for fusion")));
}
TEST_F(TritonSupportTestBase,
UnsupportedDotWithMultipleBatchDimensionsFailsGracefullyWithTriton) {
const std::string kHloTest = R"(
triton_computation {
parameter_0 = f32[2,2,2,2]{3,2,1,0} parameter(0)
parameter_1 = f32[2,2,2,2]{3,2,1,0} parameter(1)
ROOT dot = f32[2,2,2,2]{3,2,1,0} dot(parameter_0, parameter_1),
lhs_contracting_dims={3}, lhs_batch_dims={1,0}, rhs_contracting_dims={2},
rhs_batch_dims={1,0}
}
ENTRY e {
parameter_0 = f32[2,2,2,2]{3,2,1,0} parameter(0)
parameter_1 = f32[2,2,2,2]{3,2,1,0} parameter(1)
ROOT triton_op = f32[2,2,2,2]{3,2,1,0} fusion(parameter_0, parameter_1),
kind=kCustom, calls=triton_computation,
backend_config={"fusion_backend_config":{"kind":"__triton_gemm",
triton_gemm_config:
{"block_m":16,"block_n":32,"block_k":512,
"split_k":1,"num_stages":4,"num_warps":8,
"num_ctas":1}}}
})";
TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti,
ParseTemplateAndGetInstruction(
kHloTest, {}, HloOpcode::kDot));
const se::DeviceDescription dev_info =
TestGpuDeviceInfo::RTXA6000DeviceInfo(GetComputeCapability());
EXPECT_THAT(legacy_triton::IsTritonSupportedInstruction(
ti.Instruction(), GetComputeCapability())
.Explain(),
::testing::HasSubstr("Multiple batch dimensions"));
BlockLevelParameters block_level_parameters;
block_level_parameters.num_ctas = 1;
block_level_parameters.num_stages = 4;
block_level_parameters.num_warps = 8;
EXPECT_THAT(
TritonWrapper("test_fn", &ti.TritonFusion(), GetComputeCapability(),
dev_info, block_level_parameters, &llvm_module_,
mlir_context_),
tsl::testing::StatusIs(absl::StatusCode::kInternal,
::testing::HasSubstr("num_batch_dims <= 1")));
}
TEST_F(TritonSupportTestBase,
UnsupportedDotWithNoNonContractingDimensionsFailsGracefullyWithTriton) {
const std::string kHloTest = R"(
triton_computation {
parameter_0 = f32[2]{0} parameter(0)
parameter_1 = f32[2]{0} parameter(1)
ROOT dot = f32[] dot(parameter_0, parameter_1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY e {
parameter_0 = f32[2]{0} parameter(0)
parameter_1 = f32[2]{0} parameter(1)
ROOT triton_op = f32[] fusion(parameter_0, parameter_1), kind=kCustom,
calls=triton_computation,
backend_config={"fusion_backend_config":{"kind":"__triton_gemm"}}
})";
TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti,
ParseTemplateAndGetInstruction(
kHloTest, {}, HloOpcode::kDot));
EXPECT_THAT(legacy_triton::IsTritonSupportedInstruction(
ti.Instruction(), GetComputeCapability())
.Explain(),
::testing::HasSubstr("No non-contracting dimensions."));
EXPECT_THAT(TritonFusionAnalysis::Execute(ti.TritonComputation()),
tsl::testing::StatusIs(
absl::StatusCode::kInternal,
::testing::HasSubstr("non_contracting_dims.size() == 1")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/triton/triton_support_legacy.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/triton/triton_support_legacy_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
57f1cd51-8a49-41d2-8617-e678c4a4dc31 | cpp | tensorflow/tensorflow | tensor_or_memref | third_party/xla/xla/mlir/tools/mlir_interpreter/framework/tensor_or_memref.cc | third_party/xla/xla/mlir/tools/mlir_interpreter/framework/tests/tensor_or_memref_test.cc | #include "xla/mlir/tools/mlir_interpreter/framework/tensor_or_memref.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <utility>
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Utils/IndexingUtils.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
namespace mlir {
namespace interpreter {
std::optional<int64_t> BufferView::GetPhysicalIndex(
llvm::ArrayRef<int64_t> view_indices) const {
int64_t result = offset;
if (!InBounds(view_indices)) {
return std::nullopt;
}
for (int64_t i = 0; i < view_indices.size(); ++i) {
result += view_indices[i] * strides[i];
}
return result;
}
bool BufferView::InBounds(llvm::ArrayRef<int64_t> view_indices) const {
if (view_indices.size() > sizes.size()) {
return false;
}
for (auto [index, size] : llvm::zip(view_indices, sizes)) {
if (index < 0 || index >= size) {
return false;
}
}
return true;
}
SmallVector<int64_t> BufferView::GetDefaultStrides(ArrayRef<int64_t> sizes) {
SmallVector<int64_t> result(sizes.size());
int64_t stride = 1;
for (int64_t i = result.size() - 1; i >= 0; --i) {
result[i] = stride;
stride *= sizes[i];
}
return result;
}
SmallVector<int64_t> BufferView::GetStridesForLayout(ArrayRef<int64_t> sizes,
ArrayRef<int64_t> layout) {
if (layout.empty()) return GetDefaultStrides(sizes);
auto inverse_layout = invertPermutationVector(layout);
SmallVector<int64_t> result(sizes.size());
int64_t stride = 1;
for (int64_t i = 0; i < layout.size(); ++i) {
result[inverse_layout[i]] = stride;
stride *= sizes[inverse_layout[i]];
}
return result;
}
LogicalResult BufferView::Slice(int64_t dim_index, int64_t dim_offset) {
llvm::SmallVector<int64_t> offsets(Rank(), 0);
offsets[dim_index] = dim_offset;
if (auto new_offset = GetPhysicalIndex(offsets)) {
offset = *new_offset;
} else {
return failure();
}
if (dim_index >= Rank()) --*num_vector_dims;
strides.erase(strides.begin() + dim_index);
sizes.erase(sizes.begin() + dim_index);
return success();
}
LogicalResult BufferView::Slice(int64_t dim_index, int64_t dim_offset,
int64_t dim_size, int64_t dim_stride) {
llvm::SmallVector<int64_t> offsets(Rank(), 0);
offsets[dim_index] = dim_offset;
if (dim_size == 0) {
offset = 0;
} else if (auto new_offset = GetPhysicalIndex(offsets)) {
offset = *new_offset;
} else {
return failure();
}
sizes[dim_index] = dim_size;
strides[dim_index] *= dim_stride;
return success();
}
LogicalResult BufferView::Subview(ArrayRef<int64_t> subview_offsets,
ArrayRef<int64_t> subview_sizes,
ArrayRef<int64_t> subview_strides) {
if (auto new_offset = GetPhysicalIndex(subview_offsets)) {
offset = *new_offset;
} else {
return failure();
}
for (auto [in_size, subview_offset, subview_size, subview_stride] :
llvm::zip(sizes, subview_offsets, subview_sizes, subview_strides)) {
int64_t limit_index = subview_offset + (subview_size - 1) * subview_stride;
if (subview_offset < 0 || subview_offset >= in_size || limit_index < 0 ||
limit_index >= in_size) {
return failure();
}
}
for (auto [in_stride, subview_stride] : llvm::zip(strides, subview_strides)) {
in_stride *= subview_stride;
}
sizes = llvm::to_vector(subview_sizes);
return success();
}
int64_t BufferView::GetNumElements(bool include_vector_dims) const {
size_t n = 1;
for (auto size : ArrayRef<int64_t>(sizes).drop_back(
include_vector_dims ? 0 : num_vector_dims.value_or(0))) {
n *= size;
}
return n;
}
std::optional<int64_t> BufferView::GetCollapsedStride(
llvm::ArrayRef<int64_t> dims) const {
using StrideAndDim = std::pair<int64_t, int64_t>;
llvm::SmallVector<StrideAndDim> strides_and_dims;
for (auto dim : dims) {
if (sizes[dim] != 1) {
strides_and_dims.emplace_back(strides[dim], dim);
}
}
if (strides_and_dims.empty()) {
return 0;
}
llvm::sort(strides_and_dims);
int64_t next_stride = strides_and_dims.front().first;
for (auto [stride, dim] : strides_and_dims) {
if (stride != next_stride) {
return std::nullopt;
}
next_stride *= sizes[dim];
}
return strides_and_dims.front().first;
}
}
} | #include "xla/mlir/tools/mlir_interpreter/framework/tensor_or_memref.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_join.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Support/LLVM.h"
namespace mlir {
namespace interpreter {
namespace {
using ::testing::ElementsAre;
TEST(TensorOrMemrefTest, DefaultStrides) {
EXPECT_THAT(BufferView::GetDefaultStrides({1, 2, 3}), ElementsAre(6, 3, 1));
}
TEST(TensorOrMemrefTest, StridesForLayout) {
EXPECT_THAT(BufferView::GetStridesForLayout({1, 2, 3}, {2, 1, 0}),
ElementsAre(6, 3, 1));
EXPECT_THAT(BufferView::GetStridesForLayout({1, 2, 3}, {0, 1, 2}),
ElementsAre(1, 1, 2));
EXPECT_THAT(BufferView::GetStridesForLayout({3, 3, 3, 3}, {3, 0, 1, 2}),
ElementsAre(27, 1, 3, 9));
}
std::optional<int64_t> GetCollapsedStrideNaive(llvm::ArrayRef<int64_t> dims,
const BufferView& view) {
BufferView f;
for (int64_t dim : dims) {
f.sizes.push_back(view.sizes[dim]);
}
llvm::SmallBitVector v(view.GetNumElements());
for (const auto& indices : f.Indices()) {
SmallVector<int64_t> view_indices(view.Rank());
for (auto [dim, index] : llvm::zip(dims, indices)) {
view_indices[dim] = index;
}
v[*view.GetPhysicalIndex(view_indices)] = true;
}
if (v.count() != f.GetNumElements()) return std::nullopt;
if (f.GetNumElements() <= 1) return 0;
int64_t min = v.find_first();
int64_t expected_stride = (v.find_last() - min) / (f.GetNumElements() - 1);
for (int64_t i = 0; i < f.GetNumElements(); ++i) {
if (!v[i * expected_stride + min]) {
return std::nullopt;
}
}
return expected_stride;
}
TEST(TensorOrMemrefTest, CollapsedStride) {
BufferView view{.sizes = {1, 2, 3, 1, 5},
.strides = BufferView::GetDefaultStrides({1, 2, 3, 1, 5})};
auto check_all = [&]() {
for (int64_t i = 0; i < (1 << view.Rank()); ++i) {
SmallVector<int64_t> dims;
for (int64_t dim = 0; dim < view.Rank(); ++dim) {
if (i & (1 << dim)) dims.push_back(dim);
}
do {
auto v = view.GetCollapsedStride(dims);
auto n = GetCollapsedStrideNaive(dims, view);
EXPECT_EQ(n, v) << "checking " << absl::StrJoin(dims, ", ");
} while (std::next_permutation(dims.begin(), dims.end()));
}
};
check_all();
ASSERT_TRUE(view.Slice(3, 0).succeeded());
check_all();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/mlir/tools/mlir_interpreter/framework/tensor_or_memref.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/mlir/tools/mlir_interpreter/framework/tests/tensor_or_memref_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
be659964-e353-481a-b2dd-23f6f1b2652a | cpp | google/googletest | gmock-spec-builders | googlemock/src/gmock-spec-builders.cc | googlemock/test/gmock-spec-builders_test.cc | #include "gmock/gmock-spec-builders.h"
#include <stdlib.h>
#include <iostream>
#include <map>
#include <memory>
#include <set>
#include <sstream>
#include <string>
#include <unordered_map>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "gtest/internal/gtest-port.h"
#if defined(GTEST_OS_CYGWIN) || defined(GTEST_OS_LINUX) || defined(GTEST_OS_MAC)
#include <unistd.h>
#endif
#ifdef GTEST_OS_QURT
#include <qurt_event.h>
#endif
#if defined(_MSC_VER) && (_MSC_VER == 1900)
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800)
#endif
namespace testing {
namespace internal {
GTEST_API_ GTEST_DEFINE_STATIC_MUTEX_(g_gmock_mutex);
GTEST_API_ void LogWithLocation(testing::internal::LogSeverity severity,
const char* file, int line,
const std::string& message) {
::std::ostringstream s;
s << internal::FormatFileLocation(file, line) << " " << message
<< ::std::endl;
Log(severity, s.str(), 0);
}
ExpectationBase::ExpectationBase(const char* a_file, int a_line,
const std::string& a_source_text)
: file_(a_file),
line_(a_line),
source_text_(a_source_text),
cardinality_specified_(false),
cardinality_(Exactly(1)),
call_count_(0),
retired_(false),
extra_matcher_specified_(false),
repeated_action_specified_(false),
retires_on_saturation_(false),
last_clause_(kNone),
action_count_checked_(false) {}
ExpectationBase::~ExpectationBase() = default;
void ExpectationBase::SpecifyCardinality(const Cardinality& a_cardinality) {
cardinality_specified_ = true;
cardinality_ = a_cardinality;
}
void ExpectationBase::RetireAllPreRequisites()
GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
if (is_retired()) {
return;
}
::std::vector<ExpectationBase*> expectations(1, this);
while (!expectations.empty()) {
ExpectationBase* exp = expectations.back();
expectations.pop_back();
for (ExpectationSet::const_iterator it =
exp->immediate_prerequisites_.begin();
it != exp->immediate_prerequisites_.end(); ++it) {
ExpectationBase* next = it->expectation_base().get();
if (!next->is_retired()) {
next->Retire();
expectations.push_back(next);
}
}
}
}
bool ExpectationBase::AllPrerequisitesAreSatisfied() const
GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
g_gmock_mutex.AssertHeld();
::std::vector<const ExpectationBase*> expectations(1, this);
while (!expectations.empty()) {
const ExpectationBase* exp = expectations.back();
expectations.pop_back();
for (ExpectationSet::const_iterator it =
exp->immediate_prerequisites_.begin();
it != exp->immediate_prerequisites_.end(); ++it) {
const ExpectationBase* next = it->expectation_base().get();
if (!next->IsSatisfied()) return false;
expectations.push_back(next);
}
}
return true;
}
void ExpectationBase::FindUnsatisfiedPrerequisites(ExpectationSet* result) const
GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
g_gmock_mutex.AssertHeld();
::std::vector<const ExpectationBase*> expectations(1, this);
while (!expectations.empty()) {
const ExpectationBase* exp = expectations.back();
expectations.pop_back();
for (ExpectationSet::const_iterator it =
exp->immediate_prerequisites_.begin();
it != exp->immediate_prerequisites_.end(); ++it) {
const ExpectationBase* next = it->expectation_base().get();
if (next->IsSatisfied()) {
if (next->call_count_ == 0) {
expectations.push_back(next);
}
} else {
*result += *it;
}
}
}
}
void ExpectationBase::DescribeCallCountTo(::std::ostream* os) const
GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
g_gmock_mutex.AssertHeld();
*os << " Expected: to be ";
cardinality().DescribeTo(os);
*os << "\n Actual: ";
Cardinality::DescribeActualCallCountTo(call_count(), os);
*os << " - "
<< (IsOverSaturated() ? "over-saturated"
: IsSaturated() ? "saturated"
: IsSatisfied() ? "satisfied"
: "unsatisfied")
<< " and " << (is_retired() ? "retired" : "active");
}
void ExpectationBase::CheckActionCountIfNotDone() const
GTEST_LOCK_EXCLUDED_(mutex_) {
bool should_check = false;
{
MutexLock l(&mutex_);
if (!action_count_checked_) {
action_count_checked_ = true;
should_check = true;
}
}
if (should_check) {
if (!cardinality_specified_) {
return;
}
const int action_count = static_cast<int>(untyped_actions_.size());
const int upper_bound = cardinality().ConservativeUpperBound();
const int lower_bound = cardinality().ConservativeLowerBound();
bool too_many;
if (action_count > upper_bound ||
(action_count == upper_bound && repeated_action_specified_)) {
too_many = true;
} else if (0 < action_count && action_count < lower_bound &&
!repeated_action_specified_) {
too_many = false;
} else {
return;
}
::std::stringstream ss;
DescribeLocationTo(&ss);
ss << "Too " << (too_many ? "many" : "few") << " actions specified in "
<< source_text() << "...\n"
<< "Expected to be ";
cardinality().DescribeTo(&ss);
ss << ", but has " << (too_many ? "" : "only ") << action_count
<< " WillOnce()" << (action_count == 1 ? "" : "s");
if (repeated_action_specified_) {
ss << " and a WillRepeatedly()";
}
ss << ".";
Log(kWarning, ss.str(), -1);
}
}
void ExpectationBase::UntypedTimes(const Cardinality& a_cardinality) {
if (last_clause_ == kTimes) {
ExpectSpecProperty(false,
".Times() cannot appear "
"more than once in an EXPECT_CALL().");
} else {
ExpectSpecProperty(
last_clause_ < kTimes,
".Times() may only appear *before* .InSequence(), .WillOnce(), "
".WillRepeatedly(), or .RetiresOnSaturation(), not after.");
}
last_clause_ = kTimes;
SpecifyCardinality(a_cardinality);
}
GTEST_API_ ThreadLocal<Sequence*> g_gmock_implicit_sequence;
void ReportUninterestingCall(CallReaction reaction, const std::string& msg) {
const int stack_frames_to_skip =
GMOCK_FLAG_GET(verbose) == kInfoVerbosity ? 3 : -1;
switch (reaction) {
case kAllow:
Log(kInfo, msg, stack_frames_to_skip);
break;
case kWarn:
Log(kWarning,
msg +
"\nNOTE: You can safely ignore the above warning unless this "
"call should not happen. Do not suppress it by blindly adding "
"an EXPECT_CALL() if you don't mean to enforce the call. "
"See "
"https:
"gmock_cook_book.md#"
"knowing-when-to-expect-useoncall for details.\n",
stack_frames_to_skip);
break;
default:
Expect(false, nullptr, -1, msg);
}
}
UntypedFunctionMockerBase::UntypedFunctionMockerBase()
: mock_obj_(nullptr), name_("") {}
UntypedFunctionMockerBase::~UntypedFunctionMockerBase() = default;
void UntypedFunctionMockerBase::RegisterOwner(const void* mock_obj)
GTEST_LOCK_EXCLUDED_(g_gmock_mutex) {
{
MutexLock l(&g_gmock_mutex);
mock_obj_ = mock_obj;
}
Mock::Register(mock_obj, this);
}
void UntypedFunctionMockerBase::SetOwnerAndName(const void* mock_obj,
const char* name)
GTEST_LOCK_EXCLUDED_(g_gmock_mutex) {
MutexLock l(&g_gmock_mutex);
mock_obj_ = mock_obj;
name_ = name;
}
const void* UntypedFunctionMockerBase::MockObject() const
GTEST_LOCK_EXCLUDED_(g_gmock_mutex) {
const void* mock_obj;
{
MutexLock l(&g_gmock_mutex);
Assert(mock_obj_ != nullptr, __FILE__, __LINE__,
"MockObject() must not be called before RegisterOwner() or "
"SetOwnerAndName() has been called.");
mock_obj = mock_obj_;
}
return mock_obj;
}
const char* UntypedFunctionMockerBase::Name() const
GTEST_LOCK_EXCLUDED_(g_gmock_mutex) {
const char* name;
{
MutexLock l(&g_gmock_mutex);
Assert(name_ != nullptr, __FILE__, __LINE__,
"Name() must not be called before SetOwnerAndName() has "
"been called.");
name = name_;
}
return name;
}
Expectation UntypedFunctionMockerBase::GetHandleOf(ExpectationBase* exp) {
for (UntypedExpectations::const_iterator it = untyped_expectations_.begin();
it != untyped_expectations_.end(); ++it) {
if (it->get() == exp) {
return Expectation(*it);
}
}
Assert(false, __FILE__, __LINE__, "Cannot find expectation.");
return Expectation();
}
bool UntypedFunctionMockerBase::VerifyAndClearExpectationsLocked()
GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
g_gmock_mutex.AssertHeld();
bool expectations_met = true;
for (UntypedExpectations::const_iterator it = untyped_expectations_.begin();
it != untyped_expectations_.end(); ++it) {
ExpectationBase* const untyped_expectation = it->get();
if (untyped_expectation->IsOverSaturated()) {
expectations_met = false;
} else if (!untyped_expectation->IsSatisfied()) {
expectations_met = false;
::std::stringstream ss;
const ::std::string& expectation_name =
untyped_expectation->GetDescription();
ss << "Actual function ";
if (!expectation_name.empty()) {
ss << "\"" << expectation_name << "\" ";
}
ss << "call count doesn't match " << untyped_expectation->source_text()
<< "...\n";
untyped_expectation->MaybeDescribeExtraMatcherTo(&ss);
untyped_expectation->DescribeCallCountTo(&ss);
Expect(false, untyped_expectation->file(), untyped_expectation->line(),
ss.str());
}
}
UntypedExpectations expectations_to_delete;
untyped_expectations_.swap(expectations_to_delete);
g_gmock_mutex.Unlock();
expectations_to_delete.clear();
g_gmock_mutex.Lock();
return expectations_met;
}
static CallReaction intToCallReaction(int mock_behavior) {
if (mock_behavior >= kAllow && mock_behavior <= kFail) {
return static_cast<internal::CallReaction>(mock_behavior);
}
return kWarn;
}
}
namespace {
typedef std::set<internal::UntypedFunctionMockerBase*> FunctionMockers;
struct MockObjectState {
MockObjectState()
: first_used_file(nullptr), first_used_line(-1), leakable(false) {}
const char* first_used_file;
int first_used_line;
::std::string first_used_test_suite;
::std::string first_used_test;
bool leakable;
FunctionMockers function_mockers;
};
class MockObjectRegistry {
public:
typedef std::map<const void*, MockObjectState> StateMap;
~MockObjectRegistry() {
if (!GMOCK_FLAG_GET(catch_leaked_mocks)) return;
internal::MutexLock l(&internal::g_gmock_mutex);
int leaked_count = 0;
for (StateMap::const_iterator it = states_.begin(); it != states_.end();
++it) {
if (it->second.leakable)
continue;
std::cout << "\n";
const MockObjectState& state = it->second;
std::cout << internal::FormatFileLocation(state.first_used_file,
state.first_used_line);
std::cout << " ERROR: this mock object";
if (!state.first_used_test.empty()) {
std::cout << " (used in test " << state.first_used_test_suite << "."
<< state.first_used_test << ")";
}
std::cout << " should be deleted but never is. Its address is @"
<< it->first << ".";
leaked_count++;
}
if (leaked_count > 0) {
std::cout << "\nERROR: " << leaked_count << " leaked mock "
<< (leaked_count == 1 ? "object" : "objects")
<< " found at program exit. Expectations on a mock object are "
"verified when the object is destructed. Leaking a mock "
"means that its expectations aren't verified, which is "
"usually a test bug. If you really intend to leak a mock, "
"you can suppress this error using "
"testing::Mock::AllowLeak(mock_object), or you may use a "
"fake or stub instead of a mock.\n";
std::cout.flush();
::std::cerr.flush();
#ifdef GTEST_OS_QURT
qurt_exception_raise_fatal();
#else
_Exit(1);
#endif
}
}
StateMap& states() { return states_; }
private:
StateMap states_;
};
MockObjectRegistry g_mock_object_registry;
std::unordered_map<uintptr_t, internal::CallReaction>&
UninterestingCallReactionMap() {
static auto* map = new std::unordered_map<uintptr_t, internal::CallReaction>;
return *map;
}
void SetReactionOnUninterestingCalls(uintptr_t mock_obj,
internal::CallReaction reaction)
GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
internal::MutexLock l(&internal::g_gmock_mutex);
UninterestingCallReactionMap()[mock_obj] = reaction;
}
}
void Mock::AllowUninterestingCalls(uintptr_t mock_obj)
GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
SetReactionOnUninterestingCalls(mock_obj, internal::kAllow);
}
void Mock::WarnUninterestingCalls(uintptr_t mock_obj)
GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
SetReactionOnUninterestingCalls(mock_obj, internal::kWarn);
}
void Mock::FailUninterestingCalls(uintptr_t mock_obj)
GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
SetReactionOnUninterestingCalls(mock_obj, internal::kFail);
}
void Mock::UnregisterCallReaction(uintptr_t mock_obj)
GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
internal::MutexLock l(&internal::g_gmock_mutex);
UninterestingCallReactionMap().erase(static_cast<uintptr_t>(mock_obj));
}
internal::CallReaction Mock::GetReactionOnUninterestingCalls(
const void* mock_obj) GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
internal::MutexLock l(&internal::g_gmock_mutex);
return (UninterestingCallReactionMap().count(
reinterpret_cast<uintptr_t>(mock_obj)) == 0)
? internal::intToCallReaction(
GMOCK_FLAG_GET(default_mock_behavior))
: UninterestingCallReactionMap()[reinterpret_cast<uintptr_t>(
mock_obj)];
}
void Mock::AllowLeak(const void* mock_obj)
GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
internal::MutexLock l(&internal::g_gmock_mutex);
g_mock_object_registry.states()[mock_obj].leakable = true;
}
bool Mock::VerifyAndClearExpectations(void* mock_obj)
GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
internal::MutexLock l(&internal::g_gmock_mutex);
return VerifyAndClearExpectationsLocked(mock_obj);
}
bool Mock::VerifyAndClear(void* mock_obj)
GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
internal::MutexLock l(&internal::g_gmock_mutex);
ClearDefaultActionsLocked(mock_obj);
return VerifyAndClearExpectationsLocked(mock_obj);
}
bool Mock::VerifyAndClearExpectationsLocked(void* mock_obj)
GTEST_EXCLUSIVE_LOCK_REQUIRED_(internal::g_gmock_mutex) {
internal::g_gmock_mutex.AssertHeld();
if (g_mock_object_registry.states().count(mock_obj) == 0) {
return true;
}
bool expectations_met = true;
FunctionMockers& mockers =
g_mock_object_registry.states()[mock_obj].function_mockers;
for (FunctionMockers::const_iterator it = mockers.begin();
it != mockers.end(); ++it) {
if (!(*it)->VerifyAndClearExpectationsLocked()) {
expectations_met = false;
}
}
return expectations_met;
}
bool Mock::IsNaggy(void* mock_obj)
GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
return Mock::GetReactionOnUninterestingCalls(mock_obj) == internal::kWarn;
}
bool Mock::IsNice(void* mock_obj)
GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
return Mock::GetReactionOnUninterestingCalls(mock_obj) == internal::kAllow;
}
bool Mock::IsStrict(void* mock_obj)
GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
return Mock::GetReactionOnUninterestingCalls(mock_obj) == internal::kFail;
}
void Mock::Register(const void* mock_obj,
internal::UntypedFunctionMockerBase* mocker)
GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
internal::MutexLock l(&internal::g_gmock_mutex);
g_mock_object_registry.states()[mock_obj].function_mockers.insert(mocker);
}
void Mock::RegisterUseByOnCallOrExpectCall(const void* mock_obj,
const char* file, int line)
GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
internal::MutexLock l(&internal::g_gmock_mutex);
MockObjectState& state = g_mock_object_registry.states()[mock_obj];
if (state.first_used_file == nullptr) {
state.first_used_file = file;
state.first_used_line = line;
const TestInfo* const test_info =
UnitTest::GetInstance()->current_test_info();
if (test_info != nullptr) {
state.first_used_test_suite = test_info->test_suite_name();
state.first_used_test = test_info->name();
}
}
}
void Mock::UnregisterLocked(internal::UntypedFunctionMockerBase* mocker)
GTEST_EXCLUSIVE_LOCK_REQUIRED_(internal::g_gmock_mutex) {
internal::g_gmock_mutex.AssertHeld();
for (MockObjectRegistry::StateMap::iterator it =
g_mock_object_registry.states().begin();
it != g_mock_object_registry.states().end(); ++it) {
FunctionMockers& mockers = it->second.function_mockers;
if (mockers.erase(mocker) > 0) {
if (mockers.empty()) {
g_mock_object_registry.states().erase(it);
}
return;
}
}
}
void Mock::ClearDefaultActionsLocked(void* mock_obj)
GTEST_EXCLUSIVE_LOCK_REQUIRED_(internal::g_gmock_mutex) {
internal::g_gmock_mutex.AssertHeld();
if (g_mock_object_registry.states().count(mock_obj) == 0) {
return;
}
FunctionMockers& mockers =
g_mock_object_registry.states()[mock_obj].function_mockers;
for (FunctionMockers::const_iterator it = mockers.begin();
it != mockers.end(); ++it) {
(*it)->ClearDefaultActionsLocked();
}
}
Expectation::Expectation() = default;
Expectation::Expectation(
const std::shared_ptr<internal::ExpectationBase>& an_expectation_base)
: expectation_base_(an_expectation_base) {}
Expectation::~Expectation() = default;
void Sequence::AddExpectation(const Expectation& expectation) const {
if (*last_expectation_ != expectation) {
if (last_expectation_->expectation_base() != nullptr) {
expectation.expectation_base()->immediate_prerequisites_ +=
*last_expectation_;
}
*last_expectation_ = expectation;
}
}
InSequence::InSequence() {
if (internal::g_gmock_implicit_sequence.get() == nullptr) {
internal::g_gmock_implicit_sequence.set(new Sequence);
sequence_created_ = true;
} else {
sequence_created_ = false;
}
}
InSequence::~InSequence() {
if (sequence_created_) {
delete internal::g_gmock_implicit_sequence.get();
internal::g_gmock_implicit_sequence.set(nullptr);
}
}
}
#if defined(_MSC_VER) && (_MSC_VER == 1900)
GTEST_DISABLE_MSC_WARNINGS_POP_()
#endif | #include "gmock/gmock-spec-builders.h"
#include <memory>
#include <ostream>
#include <sstream>
#include <string>
#include <type_traits>
#include "gmock/gmock.h"
#include "gmock/internal/gmock-port.h"
#include "gtest/gtest-spi.h"
#include "gtest/gtest.h"
#include "gtest/internal/gtest-port.h"
namespace testing {
namespace {
using ::testing::internal::FormatFileLocation;
using ::testing::internal::kAllow;
using ::testing::internal::kErrorVerbosity;
using ::testing::internal::kFail;
using ::testing::internal::kInfoVerbosity;
using ::testing::internal::kWarn;
using ::testing::internal::kWarningVerbosity;
#if GTEST_HAS_STREAM_REDIRECTION
using ::testing::internal::CaptureStdout;
using ::testing::internal::GetCapturedStdout;
#endif
class Incomplete;
class MockIncomplete {
public:
MOCK_METHOD1(ByRefFunc, void(const Incomplete& x));
};
void PrintTo(const Incomplete& x, ::std::ostream* os);
TEST(MockMethodTest, CanInstantiateWithIncompleteArgType) {
MockIncomplete incomplete;
EXPECT_CALL(incomplete, ByRefFunc(_)).Times(AnyNumber());
}
void PrintTo(const Incomplete& , ::std::ostream* os) {
*os << "incomplete";
}
class Result {};
class NonDefaultConstructible {
public:
explicit NonDefaultConstructible(int ) {}
};
class MockA {
public:
MockA() = default;
MOCK_METHOD1(DoA, void(int n));
MOCK_METHOD1(ReturnResult, Result(int n));
MOCK_METHOD0(ReturnNonDefaultConstructible, NonDefaultConstructible());
MOCK_METHOD2(Binary, bool(int x, int y));
MOCK_METHOD2(ReturnInt, int(int x, int y));
private:
MockA(const MockA&) = delete;
MockA& operator=(const MockA&) = delete;
};
class MockB {
public:
MockB() = default;
MOCK_CONST_METHOD0(DoB, int());
MOCK_METHOD1(DoB, int(int n));
private:
MockB(const MockB&) = delete;
MockB& operator=(const MockB&) = delete;
};
class ReferenceHoldingMock {
public:
ReferenceHoldingMock() = default;
MOCK_METHOD1(AcceptReference, void(std::shared_ptr<MockA>*));
private:
ReferenceHoldingMock(const ReferenceHoldingMock&) = delete;
ReferenceHoldingMock& operator=(const ReferenceHoldingMock&) = delete;
};
#define Method MethodW
class CC {
public:
virtual ~CC() = default;
virtual int Method() = 0;
};
class MockCC : public CC {
public:
MockCC() = default;
MOCK_METHOD0(Method, int());
private:
MockCC(const MockCC&) = delete;
MockCC& operator=(const MockCC&) = delete;
};
TEST(OnCallSyntaxTest, CompilesWithMethodNameExpandedFromMacro) {
MockCC cc;
ON_CALL(cc, Method());
}
TEST(OnCallSyntaxTest, WorksWithMethodNameExpandedFromMacro) {
MockCC cc;
ON_CALL(cc, Method()).WillByDefault(Return(42));
EXPECT_EQ(42, cc.Method());
}
TEST(ExpectCallSyntaxTest, CompilesWithMethodNameExpandedFromMacro) {
MockCC cc;
EXPECT_CALL(cc, Method());
cc.Method();
}
TEST(ExpectCallSyntaxTest, WorksWithMethodNameExpandedFromMacro) {
MockCC cc;
EXPECT_CALL(cc, Method()).WillOnce(Return(42));
EXPECT_EQ(42, cc.Method());
}
#undef Method
TEST(OnCallSyntaxTest, EvaluatesFirstArgumentOnce) {
MockA a;
MockA* pa = &a;
ON_CALL(*pa++, DoA(_));
EXPECT_EQ(&a + 1, pa);
}
TEST(OnCallSyntaxTest, EvaluatesSecondArgumentOnce) {
MockA a;
int n = 0;
ON_CALL(a, DoA(n++));
EXPECT_EQ(1, n);
}
TEST(OnCallSyntaxTest, WithIsOptional) {
MockA a;
ON_CALL(a, DoA(5)).WillByDefault(Return());
ON_CALL(a, DoA(_)).With(_).WillByDefault(Return());
}
TEST(OnCallSyntaxTest, WithCanAppearAtMostOnce) {
MockA a;
EXPECT_NONFATAL_FAILURE(
{
ON_CALL(a, ReturnResult(_))
.With(_)
.With(_)
.WillByDefault(Return(Result()));
},
".With() cannot appear more than once in an ON_CALL()");
}
TEST(OnCallSyntaxTest, WillByDefaultIsMandatory) {
MockA a;
EXPECT_DEATH_IF_SUPPORTED(
{
ON_CALL(a, DoA(5));
a.DoA(5);
},
"");
}
TEST(OnCallSyntaxTest, WillByDefaultCanAppearAtMostOnce) {
MockA a;
EXPECT_NONFATAL_FAILURE(
{
ON_CALL(a, DoA(5)).WillByDefault(Return()).WillByDefault(Return());
},
".WillByDefault() must appear exactly once in an ON_CALL()");
}
TEST(ExpectCallSyntaxTest, EvaluatesFirstArgumentOnce) {
MockA a;
MockA* pa = &a;
EXPECT_CALL(*pa++, DoA(_));
a.DoA(0);
EXPECT_EQ(&a + 1, pa);
}
TEST(ExpectCallSyntaxTest, EvaluatesSecondArgumentOnce) {
MockA a;
int n = 0;
EXPECT_CALL(a, DoA(n++));
a.DoA(0);
EXPECT_EQ(1, n);
}
TEST(ExpectCallSyntaxTest, WithIsOptional) {
MockA a;
EXPECT_CALL(a, DoA(5)).Times(0);
EXPECT_CALL(a, DoA(6)).With(_).Times(0);
}
TEST(ExpectCallSyntaxTest, WithCanAppearAtMostOnce) {
MockA a;
EXPECT_NONFATAL_FAILURE(
{
EXPECT_CALL(a, DoA(6)).With(_).With(_);
},
".With() cannot appear more than once in an EXPECT_CALL()");
a.DoA(6);
}
TEST(ExpectCallSyntaxTest, WithMustBeFirstClause) {
MockA a;
EXPECT_NONFATAL_FAILURE(
{
EXPECT_CALL(a, DoA(1)).Times(1).With(_);
},
".With() must be the first clause in an EXPECT_CALL()");
a.DoA(1);
EXPECT_NONFATAL_FAILURE(
{
EXPECT_CALL(a, DoA(2)).WillOnce(Return()).With(_);
},
".With() must be the first clause in an EXPECT_CALL()");
a.DoA(2);
}
TEST(ExpectCallSyntaxTest, TimesCanBeInferred) {
MockA a;
EXPECT_CALL(a, DoA(1)).WillOnce(Return());
EXPECT_CALL(a, DoA(2)).WillOnce(Return()).WillRepeatedly(Return());
a.DoA(1);
a.DoA(2);
a.DoA(2);
}
TEST(ExpectCallSyntaxTest, TimesCanAppearAtMostOnce) {
MockA a;
EXPECT_NONFATAL_FAILURE(
{
EXPECT_CALL(a, DoA(1)).Times(1).Times(2);
},
".Times() cannot appear more than once in an EXPECT_CALL()");
a.DoA(1);
a.DoA(1);
}
TEST(ExpectCallSyntaxTest, TimesMustBeBeforeInSequence) {
MockA a;
Sequence s;
EXPECT_NONFATAL_FAILURE(
{
EXPECT_CALL(a, DoA(1)).InSequence(s).Times(1);
},
".Times() may only appear *before* ");
a.DoA(1);
}
TEST(ExpectCallSyntaxTest, InSequenceIsOptional) {
MockA a;
Sequence s;
EXPECT_CALL(a, DoA(1));
EXPECT_CALL(a, DoA(2)).InSequence(s);
a.DoA(1);
a.DoA(2);
}
TEST(ExpectCallSyntaxTest, InSequenceCanAppearMultipleTimes) {
MockA a;
Sequence s1, s2;
EXPECT_CALL(a, DoA(1)).InSequence(s1, s2).InSequence(s1);
a.DoA(1);
}
TEST(ExpectCallSyntaxTest, InSequenceMustBeBeforeAfter) {
MockA a;
Sequence s;
Expectation e = EXPECT_CALL(a, DoA(1)).Times(AnyNumber());
EXPECT_NONFATAL_FAILURE(
{
EXPECT_CALL(a, DoA(2)).After(e).InSequence(s);
},
".InSequence() cannot appear after ");
a.DoA(2);
}
TEST(ExpectCallSyntaxTest, InSequenceMustBeBeforeWillOnce) {
MockA a;
Sequence s;
EXPECT_NONFATAL_FAILURE(
{
EXPECT_CALL(a, DoA(1)).WillOnce(Return()).InSequence(s);
},
".InSequence() cannot appear after ");
a.DoA(1);
}
TEST(ExpectCallSyntaxTest, AfterMustBeBeforeWillOnce) {
MockA a;
Expectation e = EXPECT_CALL(a, DoA(1));
EXPECT_NONFATAL_FAILURE(
{ EXPECT_CALL(a, DoA(2)).WillOnce(Return()).After(e); },
".After() cannot appear after ");
a.DoA(1);
a.DoA(2);
}
TEST(ExpectCallSyntaxTest, WillIsOptional) {
MockA a;
EXPECT_CALL(a, DoA(1));
EXPECT_CALL(a, DoA(2)).WillOnce(Return());
a.DoA(1);
a.DoA(2);
}
TEST(ExpectCallSyntaxTest, WillCanAppearMultipleTimes) {
MockA a;
EXPECT_CALL(a, DoA(1))
.Times(AnyNumber())
.WillOnce(Return())
.WillOnce(Return())
.WillOnce(Return());
}
TEST(ExpectCallSyntaxTest, WillMustBeBeforeWillRepeatedly) {
MockA a;
EXPECT_NONFATAL_FAILURE(
{
EXPECT_CALL(a, DoA(1)).WillRepeatedly(Return()).WillOnce(Return());
},
".WillOnce() cannot appear after ");
a.DoA(1);
}
TEST(ExpectCallSyntaxTest, WillRepeatedlyIsOptional) {
MockA a;
EXPECT_CALL(a, DoA(1)).WillOnce(Return());
EXPECT_CALL(a, DoA(2)).WillOnce(Return()).WillRepeatedly(Return());
a.DoA(1);
a.DoA(2);
a.DoA(2);
}
TEST(ExpectCallSyntaxTest, WillRepeatedlyCannotAppearMultipleTimes) {
MockA a;
EXPECT_NONFATAL_FAILURE(
{
EXPECT_CALL(a, DoA(1)).WillRepeatedly(Return()).WillRepeatedly(
Return());
},
".WillRepeatedly() cannot appear more than once in an "
"EXPECT_CALL()");
}
TEST(ExpectCallSyntaxTest, WillRepeatedlyMustBeBeforeRetiresOnSaturation) {
MockA a;
EXPECT_NONFATAL_FAILURE(
{
EXPECT_CALL(a, DoA(1)).RetiresOnSaturation().WillRepeatedly(Return());
},
".WillRepeatedly() cannot appear after ");
}
TEST(ExpectCallSyntaxTest, RetiresOnSaturationIsOptional) {
MockA a;
EXPECT_CALL(a, DoA(1));
EXPECT_CALL(a, DoA(1)).RetiresOnSaturation();
a.DoA(1);
a.DoA(1);
}
TEST(ExpectCallSyntaxTest, RetiresOnSaturationCannotAppearMultipleTimes) {
MockA a;
EXPECT_NONFATAL_FAILURE(
{
EXPECT_CALL(a, DoA(1)).RetiresOnSaturation().RetiresOnSaturation();
},
".RetiresOnSaturation() cannot appear more than once");
a.DoA(1);
}
TEST(ExpectCallSyntaxTest, DefaultCardinalityIsOnce) {
{
MockA a;
EXPECT_CALL(a, DoA(1));
a.DoA(1);
}
EXPECT_NONFATAL_FAILURE(
{
MockA a;
EXPECT_CALL(a, DoA(1));
},
"to be called once");
EXPECT_NONFATAL_FAILURE(
{
MockA a;
EXPECT_CALL(a, DoA(1));
a.DoA(1);
a.DoA(1);
},
"to be called once");
}
#if GTEST_HAS_STREAM_REDIRECTION
TEST(ExpectCallSyntaxTest, DoesNotWarnOnAdequateActionCount) {
CaptureStdout();
{
MockB b;
EXPECT_CALL(b, DoB()).Times(0);
EXPECT_CALL(b, DoB(1)).Times(AtMost(1));
EXPECT_CALL(b, DoB(2)).Times(1).WillRepeatedly(Return(1));
EXPECT_CALL(b, DoB(3))
.Times(Between(1, 2))
.WillOnce(Return(1))
.WillOnce(Return(2));
EXPECT_CALL(b, DoB(4)).Times(AtMost(3)).WillOnce(Return(1)).WillRepeatedly(
Return(2));
b.DoB(2);
b.DoB(3);
}
EXPECT_STREQ("", GetCapturedStdout().c_str());
}
TEST(ExpectCallSyntaxTest, WarnsOnTooManyActions) {
CaptureStdout();
{
MockB b;
EXPECT_CALL(b, DoB()).Times(0).WillOnce(Return(1));
EXPECT_CALL(b, DoB()).Times(AtMost(1)).WillOnce(Return(1)).WillOnce(
Return(2));
EXPECT_CALL(b, DoB(1))
.Times(1)
.WillOnce(Return(1))
.WillOnce(Return(2))
.RetiresOnSaturation();
EXPECT_CALL(b, DoB()).Times(0).WillRepeatedly(Return(1));
EXPECT_CALL(b, DoB(2)).Times(1).WillOnce(Return(1)).WillRepeatedly(
Return(2));
b.DoB(1);
b.DoB(2);
}
const std::string output = GetCapturedStdout();
EXPECT_PRED_FORMAT2(IsSubstring,
"Too many actions specified in EXPECT_CALL(b, DoB())...\n"
"Expected to be never called, but has 1 WillOnce().",
output);
EXPECT_PRED_FORMAT2(IsSubstring,
"Too many actions specified in EXPECT_CALL(b, DoB())...\n"
"Expected to be called at most once, "
"but has 2 WillOnce()s.",
output);
EXPECT_PRED_FORMAT2(
IsSubstring,
"Too many actions specified in EXPECT_CALL(b, DoB(1))...\n"
"Expected to be called once, but has 2 WillOnce()s.",
output);
EXPECT_PRED_FORMAT2(IsSubstring,
"Too many actions specified in EXPECT_CALL(b, DoB())...\n"
"Expected to be never called, but has 0 WillOnce()s "
"and a WillRepeatedly().",
output);
EXPECT_PRED_FORMAT2(
IsSubstring,
"Too many actions specified in EXPECT_CALL(b, DoB(2))...\n"
"Expected to be called once, but has 1 WillOnce() "
"and a WillRepeatedly().",
output);
}
TEST(ExpectCallSyntaxTest, WarnsOnTooFewActions) {
MockB b;
EXPECT_CALL(b, DoB()).Times(Between(2, 3)).WillOnce(Return(1));
CaptureStdout();
b.DoB();
const std::string output = GetCapturedStdout();
EXPECT_PRED_FORMAT2(IsSubstring,
"Too few actions specified in EXPECT_CALL(b, DoB())...\n"
"Expected to be called between 2 and 3 times, "
"but has only 1 WillOnce().",
output);
b.DoB();
}
TEST(ExpectCallSyntaxTest, WarningIsErrorWithFlag) {
int original_behavior = GMOCK_FLAG_GET(default_mock_behavior);
GMOCK_FLAG_SET(default_mock_behavior, kAllow);
CaptureStdout();
{
MockA a;
a.DoA(0);
}
std::string output = GetCapturedStdout();
EXPECT_TRUE(output.empty()) << output;
GMOCK_FLAG_SET(default_mock_behavior, kWarn);
CaptureStdout();
{
MockA a;
a.DoA(0);
}
std::string warning_output = GetCapturedStdout();
EXPECT_PRED_FORMAT2(IsSubstring, "GMOCK WARNING", warning_output);
EXPECT_PRED_FORMAT2(IsSubstring, "Uninteresting mock function call",
warning_output);
GMOCK_FLAG_SET(default_mock_behavior, kFail);
EXPECT_NONFATAL_FAILURE(
{
MockA a;
a.DoA(0);
},
"Uninteresting mock function call");
GMOCK_FLAG_SET(default_mock_behavior, -1);
CaptureStdout();
{
MockA a;
a.DoA(0);
}
warning_output = GetCapturedStdout();
EXPECT_PRED_FORMAT2(IsSubstring, "GMOCK WARNING", warning_output);
EXPECT_PRED_FORMAT2(IsSubstring, "Uninteresting mock function call",
warning_output);
GMOCK_FLAG_SET(default_mock_behavior, 3);
CaptureStdout();
{
MockA a;
a.DoA(0);
}
warning_output = GetCapturedStdout();
EXPECT_PRED_FORMAT2(IsSubstring, "GMOCK WARNING", warning_output);
EXPECT_PRED_FORMAT2(IsSubstring, "Uninteresting mock function call",
warning_output);
GMOCK_FLAG_SET(default_mock_behavior, original_behavior);
}
#endif
TEST(OnCallTest, TakesBuiltInDefaultActionWhenNoOnCall) {
MockB b;
EXPECT_CALL(b, DoB());
EXPECT_EQ(0, b.DoB());
}
TEST(OnCallTest, TakesBuiltInDefaultActionWhenNoOnCallMatches) {
MockB b;
ON_CALL(b, DoB(1)).WillByDefault(Return(1));
EXPECT_CALL(b, DoB(_));
EXPECT_EQ(0, b.DoB(2));
}
TEST(OnCallTest, PicksLastMatchingOnCall) {
MockB b;
ON_CALL(b, DoB(_)).WillByDefault(Return(3));
ON_CALL(b, DoB(2)).WillByDefault(Return(2));
ON_CALL(b, DoB(1)).WillByDefault(Return(1));
EXPECT_CALL(b, DoB(_));
EXPECT_EQ(2, b.DoB(2));
}
TEST(ExpectCallTest, AllowsAnyCallWhenNoSpec) {
MockB b;
EXPECT_CALL(b, DoB());
b.DoB();
b.DoB(1);
b.DoB(2);
}
TEST(ExpectCallTest, PicksLastMatchingExpectCall) {
MockB b;
EXPECT_CALL(b, DoB(_)).WillRepeatedly(Return(2));
EXPECT_CALL(b, DoB(1)).WillRepeatedly(Return(1));
EXPECT_EQ(1, b.DoB(1));
}
TEST(ExpectCallTest, CatchesTooFewCalls) {
EXPECT_NONFATAL_FAILURE(
{
MockB b;
EXPECT_CALL(b, DoB(5)).Description("DoB Method").Times(AtLeast(2));
b.DoB(5);
},
"Actual function \"DoB Method\" call count "
"doesn't match EXPECT_CALL(b, DoB(5))...\n"
" Expected: to be called at least twice\n"
" Actual: called once - unsatisfied and active");
}
TEST(ExpectCallTest, InfersCardinalityWhenThereIsNoWillRepeatedly) {
{
MockB b;
EXPECT_CALL(b, DoB()).WillOnce(Return(1)).WillOnce(Return(2));
EXPECT_EQ(1, b.DoB());
EXPECT_EQ(2, b.DoB());
}
EXPECT_NONFATAL_FAILURE(
{
MockB b;
EXPECT_CALL(b, DoB()).WillOnce(Return(1)).WillOnce(Return(2));
EXPECT_EQ(1, b.DoB());
},
"to be called twice");
{
MockB b;
EXPECT_CALL(b, DoB()).WillOnce(Return(1)).WillOnce(Return(2));
EXPECT_EQ(1, b.DoB());
EXPECT_EQ(2, b.DoB());
EXPECT_NONFATAL_FAILURE(b.DoB(), "to be called twice");
}
}
TEST(ExpectCallTest, InfersCardinality1WhenThereIsWillRepeatedly) {
{
MockB b;
EXPECT_CALL(b, DoB()).WillOnce(Return(1)).WillRepeatedly(Return(2));
EXPECT_EQ(1, b.DoB());
}
{
MockB b;
EXPECT_CALL(b, DoB()).WillOnce(Return(1)).WillRepeatedly(Return(2));
EXPECT_EQ(1, b.DoB());
EXPECT_EQ(2, b.DoB());
EXPECT_EQ(2, b.DoB());
}
EXPECT_NONFATAL_FAILURE(
{
MockB b;
EXPECT_CALL(b, DoB()).WillOnce(Return(1)).WillRepeatedly(Return(2));
},
"to be called at least once");
}
#if defined(GTEST_INTERNAL_CPLUSPLUS_LANG) && \
GTEST_INTERNAL_CPLUSPLUS_LANG >= 201703L
TEST(ExpectCallTest, NonMoveableType) {
struct NonMoveableStruct {
explicit NonMoveableStruct(int x_in) : x(x_in) {}
NonMoveableStruct(NonMoveableStruct&&) = delete;
int x;
};
static_assert(!std::is_move_constructible_v<NonMoveableStruct>);
static_assert(!std::is_copy_constructible_v<NonMoveableStruct>);
static_assert(!std::is_move_assignable_v<NonMoveableStruct>);
static_assert(!std::is_copy_assignable_v<NonMoveableStruct>);
const auto return_17 = [] { return NonMoveableStruct(17); };
static_cast<void>(OnceAction<NonMoveableStruct()>{return_17});
static_cast<void>(Action<NonMoveableStruct()>{return_17});
static_cast<void>(OnceAction<NonMoveableStruct(int)>{return_17});
static_cast<void>(Action<NonMoveableStruct(int)>{return_17});
MockFunction<NonMoveableStruct()> mock;
EXPECT_CALL(mock, Call)
.WillOnce(return_17)
.WillRepeatedly(return_17);
EXPECT_EQ(17, mock.AsStdFunction()().x);
EXPECT_EQ(17, mock.AsStdFunction()().x);
EXPECT_EQ(17, mock.AsStdFunction()().x);
}
#endif
TEST(ExpectCallTest, NthMatchTakesNthAction) {
MockB b;
EXPECT_CALL(b, DoB()).WillOnce(Return(1)).WillOnce(Return(2)).WillOnce(
Return(3));
EXPECT_EQ(1, b.DoB());
EXPECT_EQ(2, b.DoB());
EXPECT_EQ(3, b.DoB());
}
TEST(ExpectCallTest, TakesRepeatedActionWhenWillListIsExhausted) {
MockB b;
EXPECT_CALL(b, DoB()).WillOnce(Return(1)).WillRepeatedly(Return(2));
EXPECT_EQ(1, b.DoB());
EXPECT_EQ(2, b.DoB());
EXPECT_EQ(2, b.DoB());
}
#if GTEST_HAS_STREAM_REDIRECTION
TEST(ExpectCallTest, TakesDefaultActionWhenWillListIsExhausted) {
MockB b;
EXPECT_CALL(b, DoB(_)).Times(1);
EXPECT_CALL(b, DoB())
.Times(AnyNumber())
.WillOnce(Return(1))
.WillOnce(Return(2));
CaptureStdout();
EXPECT_EQ(0, b.DoB(1));
EXPECT_EQ(1, b.DoB());
EXPECT_EQ(2, b.DoB());
const std::string output1 = GetCapturedStdout();
EXPECT_STREQ("", output1.c_str());
CaptureStdout();
EXPECT_EQ(0, b.DoB());
EXPECT_EQ(0, b.DoB());
const std::string output2 = GetCapturedStdout();
EXPECT_THAT(output2.c_str(),
HasSubstr("Actions ran out in EXPECT_CALL(b, DoB())...\n"
"Called 3 times, but only 2 WillOnce()s are specified"
" - returning default value."));
EXPECT_THAT(output2.c_str(),
HasSubstr("Actions ran out in EXPECT_CALL(b, DoB())...\n"
"Called 4 times, but only 2 WillOnce()s are specified"
" - returning default value."));
}
TEST(FunctionMockerMessageTest, ReportsExpectCallLocationForExhaustedActions) {
MockB b;
std::string expect_call_location = FormatFileLocation(__FILE__, __LINE__ + 1);
EXPECT_CALL(b, DoB()).Times(AnyNumber()).WillOnce(Return(1));
EXPECT_EQ(1, b.DoB());
CaptureStdout();
EXPECT_EQ(0, b.DoB());
const std::string output = GetCapturedStdout();
EXPECT_PRED_FORMAT2(IsSubstring, expect_call_location, output);
}
TEST(FunctionMockerMessageTest,
ReportsDefaultActionLocationOfUninterestingCallsForNaggyMock) {
std::string on_call_location;
CaptureStdout();
{
NaggyMock<MockB> b;
on_call_location = FormatFileLocation(__FILE__, __LINE__ + 1);
ON_CALL(b, DoB(_)).WillByDefault(Return(0));
b.DoB(0);
}
EXPECT_PRED_FORMAT2(IsSubstring, on_call_location, GetCapturedStdout());
}
#endif
TEST(UninterestingCallTest, DoesDefaultAction) {
MockA a;
ON_CALL(a, Binary(_, _)).WillByDefault(Return(true));
EXPECT_TRUE(a.Binary(1, 2));
MockB b;
EXPECT_EQ(0, b.DoB());
}
TEST(UnexpectedCallTest, DoesDefaultAction) {
MockA a;
ON_CALL(a, Binary(_, _)).WillByDefault(Return(true));
EXPECT_CALL(a, Binary(0, 0));
a.Binary(0, 0);
bool result = false;
EXPECT_NONFATAL_FAILURE(result = a.Binary(1, 2),
"Unexpected mock function call");
EXPECT_TRUE(result);
MockB b;
EXPECT_CALL(b, DoB(0)).Times(0);
int n = -1;
EXPECT_NONFATAL_FAILURE(n = b.DoB(1), "Unexpected mock function call");
EXPECT_EQ(0, n);
}
TEST(UnexpectedCallTest, GeneratesFailureForVoidFunction) {
MockA a1;
EXPECT_CALL(a1, DoA(1));
a1.DoA(1);
EXPECT_NONFATAL_FAILURE(
a1.DoA(9),
"Unexpected mock function call - returning directly.\n"
" Function call: DoA(9)\n"
"Google Mock tried the following 1 expectation, but it didn't match:");
EXPECT_NONFATAL_FAILURE(
a1.DoA(9),
" Expected arg #0: is equal to 1\n"
" Actual: 9\n"
" Expected: to be called once\n"
" Actual: called once - saturated and active");
MockA a2;
EXPECT_CALL(a2, DoA(1));
EXPECT_CALL(a2, DoA(3));
a2.DoA(1);
EXPECT_NONFATAL_FAILURE(
a2.DoA(2),
"Unexpected mock function call - returning directly.\n"
" Function call: DoA(2)\n"
"Google Mock tried the following 2 expectations, but none matched:");
EXPECT_NONFATAL_FAILURE(
a2.DoA(2),
"tried expectation #0: EXPECT_CALL(a2, DoA(1))...\n"
" Expected arg #0: is equal to 1\n"
" Actual: 2\n"
" Expected: to be called once\n"
" Actual: called once - saturated and active");
EXPECT_NONFATAL_FAILURE(
a2.DoA(2),
"tried expectation #1: EXPECT_CALL(a2, DoA(3))...\n"
" Expected arg #0: is equal to 3\n"
" Actual: 2\n"
" Expected: to be called once\n"
" Actual: never called - unsatisfied and active");
a2.DoA(3);
}
TEST(UnexpectedCallTest, GeneartesFailureForNonVoidFunction) {
MockB b1;
EXPECT_CALL(b1, DoB(1));
b1.DoB(1);
EXPECT_NONFATAL_FAILURE(
b1.DoB(2),
"Unexpected mock function call - returning default value.\n"
" Function call: DoB(2)\n"
" Returns: 0\n"
"Google Mock tried the following 1 expectation, but it didn't match:");
EXPECT_NONFATAL_FAILURE(
b1.DoB(2),
" Expected arg #0: is equal to 1\n"
" Actual: 2\n"
" Expected: to be called once\n"
" Actual: called once - saturated and active");
}
TEST(UnexpectedCallTest, RetiredExpectation) {
MockB b;
EXPECT_CALL(b, DoB(1)).RetiresOnSaturation();
b.DoB(1);
EXPECT_NONFATAL_FAILURE(b.DoB(1),
" Expected: the expectation is active\n"
" Actual: it is retired");
}
TEST(UnexpectedCallTest, UnmatchedArguments) {
MockB b;
EXPECT_CALL(b, DoB(1));
EXPECT_NONFATAL_FAILURE(b.DoB(2),
" Expected arg #0: is equal to 1\n"
" Actual: 2\n");
b.DoB(1);
}
TEST(UnexpectedCallTest, UnsatisfiedPrerequisites) {
Sequence s1, s2;
MockB b;
EXPECT_CALL(b, DoB(1)).InSequence(s1);
EXPECT_CALL(b, DoB(2)).Times(AnyNumber()).InSequence(s1);
EXPECT_CALL(b, DoB(3)).InSequence(s2);
EXPECT_CALL(b, DoB(4)).InSequence(s1, s2);
::testing::TestPartResultArray failures;
{
::testing::ScopedFakeTestPartResultReporter reporter(&failures);
b.DoB(4);
}
ASSERT_EQ(1, failures.size());
const ::testing::TestPartResult& r = failures.GetTestPartResult(0);
EXPECT_EQ(::testing::TestPartResult::kNonFatalFailure, r.type());
#ifdef GTEST_USES_POSIX_RE
EXPECT_THAT(r.message(),
ContainsRegex(
"the following immediate pre-requisites are not satisfied:\n"
"(.|\n)*: pre-requisite #0\n"
"(.|\n)*: pre-requisite #1"));
#else
EXPECT_THAT(r.message(),
ContainsRegex(
"the following immediate pre-requisites are not satisfied:"));
EXPECT_THAT(r.message(), ContainsRegex(": pre-requisite #0"));
EXPECT_THAT(r.message(), ContainsRegex(": pre-requisite #1"));
#endif
b.DoB(1);
b.DoB(3);
b.DoB(4);
}
TEST(UndefinedReturnValueTest,
ReturnValueIsMandatoryWhenNotDefaultConstructible) {
MockA a;
#if GTEST_HAS_EXCEPTIONS
EXPECT_ANY_THROW(a.ReturnNonDefaultConstructible());
#else
EXPECT_DEATH_IF_SUPPORTED(a.ReturnNonDefaultConstructible(), "");
#endif
}
TEST(ExcessiveCallTest, DoesDefaultAction) {
MockA a;
ON_CALL(a, Binary(_, _)).WillByDefault(Return(true));
EXPECT_CALL(a, Binary(0, 0));
a.Binary(0, 0);
bool result = false;
EXPECT_NONFATAL_FAILURE(result = a.Binary(0, 0),
"Mock function called more times than expected");
EXPECT_TRUE(result);
MockB b;
EXPECT_CALL(b, DoB(0)).Description("DoB Method").Times(0);
int n = -1;
EXPECT_NONFATAL_FAILURE(
n = b.DoB(0),
"Mock function \"DoB Method\" called more times than expected");
EXPECT_EQ(0, n);
}
TEST(ExcessiveCallTest, GeneratesFailureForVoidFunction) {
MockA a;
EXPECT_CALL(a, DoA(_)).Description("DoA Method").Times(0);
EXPECT_NONFATAL_FAILURE(
a.DoA(9),
"Mock function \"DoA Method\" called more times than expected - "
"returning directly.\n"
" Function call: DoA(9)\n"
" Expected: to be never called\n"
" Actual: called once - over-saturated and active");
}
TEST(ExcessiveCallTest, GeneratesFailureForNonVoidFunction) {
MockB b;
EXPECT_CALL(b, DoB(_));
b.DoB(1);
EXPECT_NONFATAL_FAILURE(
b.DoB(2),
"Mock function called more times than expected - "
"returning default value.\n"
" Function call: DoB(2)\n"
" Returns: 0\n"
" Expected: to be called once\n"
" Actual: called twice - over-saturated and active");
}
TEST(InSequenceTest, AllExpectationInScopeAreInSequence) {
MockA a;
{
InSequence dummy;
EXPECT_CALL(a, DoA(1));
EXPECT_CALL(a, DoA(2));
}
EXPECT_NONFATAL_FAILURE(
{
a.DoA(2);
},
"Unexpected mock function call");
a.DoA(1);
a.DoA(2);
}
TEST(InSequenceTest, NestedInSequence) {
MockA a;
{
InSequence dummy;
EXPECT_CALL(a, DoA(1));
{
InSequence dummy2;
EXPECT_CALL(a, DoA(2));
EXPECT_CALL(a, DoA(3));
}
}
EXPECT_NONFATAL_FAILURE(
{
a.DoA(1);
a.DoA(3);
},
"Unexpected mock function call");
a.DoA(2);
a.DoA(3);
}
TEST(InSequenceTest, ExpectationsOutOfScopeAreNotAffected) {
MockA a;
{
InSequence dummy;
EXPECT_CALL(a, DoA(1));
EXPECT_CALL(a, DoA(2));
}
EXPECT_CALL(a, DoA(3));
EXPECT_NONFATAL_FAILURE(
{
a.DoA(2);
},
"Unexpected mock function call");
a.DoA(3);
a.DoA(1);
a.DoA(2);
}
TEST(SequenceTest, AnyOrderIsOkByDefault) {
{
MockA a;
MockB b;
EXPECT_CALL(a, DoA(1));
EXPECT_CALL(b, DoB()).Times(AnyNumber());
a.DoA(1);
b.DoB();
}
{
MockA a;
MockB b;
EXPECT_CALL(a, DoA(1));
EXPECT_CALL(b, DoB()).Times(AnyNumber());
b.DoB();
a.DoA(1);
}
}
TEST(SequenceTest, CallsMustBeInStrictOrderWhenSaidSo1) {
MockA a;
ON_CALL(a, ReturnResult(_)).WillByDefault(Return(Result()));
Sequence s;
EXPECT_CALL(a, ReturnResult(1)).InSequence(s);
EXPECT_CALL(a, ReturnResult(2)).InSequence(s);
EXPECT_CALL(a, ReturnResult(3)).InSequence(s);
a.ReturnResult(1);
EXPECT_NONFATAL_FAILURE(a.ReturnResult(3), "Unexpected mock function call");
a.ReturnResult(2);
a.ReturnResult(3);
}
TEST(SequenceTest, CallsMustBeInStrictOrderWhenSaidSo2) {
MockA a;
ON_CALL(a, ReturnResult(_)).WillByDefault(Return(Result()));
Sequence s;
EXPECT_CALL(a, ReturnResult(1)).InSequence(s);
EXPECT_CALL(a, ReturnResult(2)).InSequence(s);
EXPECT_NONFATAL_FAILURE(a.ReturnResult(2), "Unexpected mock function call");
a.ReturnResult(1);
a.ReturnResult(2);
}
class PartialOrderTest : public testing::Test {
protected:
PartialOrderTest() {
ON_CALL(a_, ReturnResult(_)).WillByDefault(Return(Result()));
Sequence x, y;
EXPECT_CALL(a_, ReturnResult(1)).InSequence(x);
EXPECT_CALL(b_, DoB()).Times(2).InSequence(y);
EXPECT_CALL(a_, ReturnResult(2)).Times(AnyNumber()).InSequence(x, y);
EXPECT_CALL(a_, ReturnResult(3)).InSequence(x);
}
MockA a_;
MockB b_;
};
TEST_F(PartialOrderTest, CallsMustConformToSpecifiedDag1) {
a_.ReturnResult(1);
b_.DoB();
EXPECT_NONFATAL_FAILURE(a_.ReturnResult(2), "Unexpected mock function call");
b_.DoB();
a_.ReturnResult(3);
}
TEST_F(PartialOrderTest, CallsMustConformToSpecifiedDag2) {
EXPECT_NONFATAL_FAILURE(a_.ReturnResult(2), "Unexpected mock function call");
a_.ReturnResult(1);
b_.DoB();
b_.DoB();
a_.ReturnResult(3);
}
TEST_F(PartialOrderTest, CallsMustConformToSpecifiedDag3) {
EXPECT_NONFATAL_FAILURE(a_.ReturnResult(3), "Unexpected mock function call");
a_.ReturnResult(1);
b_.DoB();
b_.DoB();
a_.ReturnResult(3);
}
TEST_F(PartialOrderTest, CallsMustConformToSpecifiedDag4) {
a_.ReturnResult(1);
b_.DoB();
b_.DoB();
a_.ReturnResult(3);
EXPECT_NONFATAL_FAILURE(a_.ReturnResult(2), "Unexpected mock function call");
}
TEST(SequenceTest, Retirement) {
MockA a;
Sequence s;
EXPECT_CALL(a, DoA(1)).InSequence(s);
EXPECT_CALL(a, DoA(_)).InSequence(s).RetiresOnSaturation();
EXPECT_CALL(a, DoA(1)).InSequence(s);
a.DoA(1);
a.DoA(2);
a.DoA(1);
}
TEST(ExpectationTest, ConstrutorsWork) {
MockA a;
Expectation e1;
Expectation e2 = EXPECT_CALL(a, DoA(2));
Expectation e3 = EXPECT_CALL(a, DoA(3)).With(_);
{
Sequence s;
Expectation e4 = EXPECT_CALL(a, DoA(4)).Times(1);
Expectation e5 = EXPECT_CALL(a, DoA(5)).InSequence(s);
}
Expectation e6 = EXPECT_CALL(a, DoA(6)).After(e2);
Expectation e7 = EXPECT_CALL(a, DoA(7)).WillOnce(Return());
Expectation e8 = EXPECT_CALL(a, DoA(8)).WillRepeatedly(Return());
Expectation e9 = EXPECT_CALL(a, DoA(9)).RetiresOnSaturation();
Expectation e10 = e2;
EXPECT_THAT(e1, Ne(e2));
EXPECT_THAT(e2, Eq(e10));
a.DoA(2);
a.DoA(3);
a.DoA(4);
a.DoA(5);
a.DoA(6);
a.DoA(7);
a.DoA(8);
a.DoA(9);
}
TEST(ExpectationTest, AssignmentWorks) {
MockA a;
Expectation e1;
Expectation e2 = EXPECT_CALL(a, DoA(1));
EXPECT_THAT(e1, Ne(e2));
e1 = e2;
EXPECT_THAT(e1, Eq(e2));
a.DoA(1);
}
TEST(ExpectationSetTest, MemberTypesAreCorrect) {
::testing::StaticAssertTypeEq<Expectation, ExpectationSet::value_type>();
}
TEST(ExpectationSetTest, ConstructorsWork) {
MockA a;
Expectation e1;
const Expectation e2;
ExpectationSet es1;
ExpectationSet es2 = EXPECT_CALL(a, DoA(1));
ExpectationSet es3 = e1;
ExpectationSet es4(e1);
ExpectationSet es5 = e2;
ExpectationSet es6(e2);
ExpectationSet es7 = es2;
EXPECT_EQ(0, es1.size());
EXPECT_EQ(1, es2.size());
EXPECT_EQ(1, es3.size());
EXPECT_EQ(1, es4.size());
EXPECT_EQ(1, es5.size());
EXPECT_EQ(1, es6.size());
EXPECT_EQ(1, es7.size());
EXPECT_THAT(es3, Ne(es2));
EXPECT_THAT(es4, Eq(es3));
EXPECT_THAT(es5, Eq(es4));
EXPECT_THAT(es6, Eq(es5));
EXPECT_THAT(es7, Eq(es2));
a.DoA(1);
}
TEST(ExpectationSetTest, AssignmentWorks) {
ExpectationSet es1;
ExpectationSet es2 = Expectation();
es1 = es2;
EXPECT_EQ(1, es1.size());
EXPECT_THAT(*(es1.begin()), Eq(Expectation()));
EXPECT_THAT(es1, Eq(es2));
}
TEST(ExpectationSetTest, InsertionWorks) {
ExpectationSet es1;
Expectation e1;
es1 += e1;
EXPECT_EQ(1, es1.size());
EXPECT_THAT(*(es1.begin()), Eq(e1));
MockA a;
Expectation e2 = EXPECT_CALL(a, DoA(1));
es1 += e2;
EXPECT_EQ(2, es1.size());
ExpectationSet::const_iterator it1 = es1.begin();
ExpectationSet::const_iterator it2 = it1;
++it2;
EXPECT_TRUE(*it1 == e1 || *it2 == e1);
EXPECT_TRUE(*it1 == e2 || *it2 == e2);
a.DoA(1);
}
TEST(ExpectationSetTest, SizeWorks) {
ExpectationSet es;
EXPECT_EQ(0, es.size());
es += Expectation();
EXPECT_EQ(1, es.size());
MockA a;
es += EXPECT_CALL(a, DoA(1));
EXPECT_EQ(2, es.size());
a.DoA(1);
}
TEST(ExpectationSetTest, IsEnumerable) {
ExpectationSet es;
EXPECT_TRUE(es.begin() == es.end());
es += Expectation();
ExpectationSet::const_iterator it = es.begin();
EXPECT_TRUE(it != es.end());
EXPECT_THAT(*it, Eq(Expectation()));
++it;
EXPECT_TRUE(it == es.end());
}
TEST(AfterTest, SucceedsWhenPartialOrderIsSatisfied) {
MockA a;
ExpectationSet es;
es += EXPECT_CALL(a, DoA(1));
es += EXPECT_CALL(a, DoA(2));
EXPECT_CALL(a, DoA(3)).After(es);
a.DoA(1);
a.DoA(2);
a.DoA(3);
}
TEST(AfterTest, SucceedsWhenTotalOrderIsSatisfied) {
MockA a;
MockB b;
const Expectation e1 = EXPECT_CALL(a, DoA(1));
const Expectation e2 = EXPECT_CALL(b, DoB()).Times(2).After(e1);
EXPECT_CALL(a, DoA(2)).After(e2);
a.DoA(1);
b.DoB();
b.DoB();
a.DoA(2);
}
TEST(AfterTest, CallsMustBeInStrictOrderWhenSpecifiedSo1) {
MockA a;
MockB b;
Expectation e1 = EXPECT_CALL(a, DoA(1));
Expectation e2 = EXPECT_CALL(b, DoB()).After(e1);
EXPECT_CALL(a, DoA(2)).After(e2);
a.DoA(1);
EXPECT_NONFATAL_FAILURE(a.DoA(2), "Unexpected mock function call");
b.DoB();
a.DoA(2);
}
TEST(AfterTest, CallsMustBeInStrictOrderWhenSpecifiedSo2) {
MockA a;
MockB b;
Expectation e1 = EXPECT_CALL(a, DoA(1));
Expectation e2 = EXPECT_CALL(b, DoB()).Times(2).After(e1);
EXPECT_CALL(a, DoA(2)).After(e2);
a.DoA(1);
b.DoB();
EXPECT_NONFATAL_FAILURE(a.DoA(2), "Unexpected mock function call");
b.DoB();
a.DoA(2);
}
TEST(AfterTest, CallsMustSatisfyPartialOrderWhenSpecifiedSo) {
MockA a;
ON_CALL(a, ReturnResult(_)).WillByDefault(Return(Result()));
Expectation e = EXPECT_CALL(a, DoA(1));
const ExpectationSet es = EXPECT_CALL(a, DoA(2));
EXPECT_CALL(a, ReturnResult(3)).After(e, es);
EXPECT_NONFATAL_FAILURE(a.ReturnResult(3), "Unexpected mock function call");
a.DoA(2);
a.DoA(1);
a.ReturnResult(3);
}
TEST(AfterTest, CallsMustSatisfyPartialOrderWhenSpecifiedSo2) {
MockA a;
Expectation e = EXPECT_CALL(a, DoA(1));
const ExpectationSet es = EXPECT_CALL(a, DoA(2));
EXPECT_CALL(a, DoA(3)).After(e, es);
a.DoA(2);
EXPECT_NONFATAL_FAILURE(a.DoA(3), "Unexpected mock function call");
a.DoA(1);
a.DoA(3);
}
TEST(AfterTest, CanBeUsedWithInSequence) {
MockA a;
Sequence s;
Expectation e = EXPECT_CALL(a, DoA(1));
EXPECT_CALL(a, DoA(2)).InSequence(s);
EXPECT_CALL(a, DoA(3)).InSequence(s).After(e);
a.DoA(1);
EXPECT_NONFATAL_FAILURE(a.DoA(3), "Unexpected mock function call");
a.DoA(2);
a.DoA(3);
}
TEST(AfterTest, CanBeCalledManyTimes) {
MockA a;
Expectation e1 = EXPECT_CALL(a, DoA(1));
Expectation e2 = EXPECT_CALL(a, DoA(2));
Expectation e3 = EXPECT_CALL(a, DoA(3));
EXPECT_CALL(a, DoA(4)).After(e1).After(e2).After(e3);
a.DoA(3);
a.DoA(1);
a.DoA(2);
a.DoA(4);
}
TEST(AfterTest, AcceptsUpToFiveArguments) {
MockA a;
Expectation e1 = EXPECT_CALL(a, DoA(1));
Expectation e2 = EXPECT_CALL(a, DoA(2));
Expectation e3 = EXPECT_CALL(a, DoA(3));
ExpectationSet es1 = EXPECT_CALL(a, DoA(4));
ExpectationSet es2 = EXPECT_CALL(a, DoA(5));
EXPECT_CALL(a, DoA(6)).After(e1, e2, e3, es1, es2);
a.DoA(5);
a.DoA(2);
a.DoA(4);
a.DoA(1);
a.DoA(3);
a.DoA(6);
}
TEST(AfterTest, AcceptsDuplicatedInput) {
MockA a;
ON_CALL(a, ReturnResult(_)).WillByDefault(Return(Result()));
Expectation e1 = EXPECT_CALL(a, DoA(1));
Expectation e2 = EXPECT_CALL(a, DoA(2));
ExpectationSet es;
es += e1;
es += e2;
EXPECT_CALL(a, ReturnResult(3)).After(e1, e2, es, e1);
a.DoA(1);
EXPECT_NONFATAL_FAILURE(a.ReturnResult(3), "Unexpected mock function call");
a.DoA(2);
a.ReturnResult(3);
}
TEST(AfterTest, ChangesToExpectationSetHaveNoEffectAfterwards) {
MockA a;
ExpectationSet es1 = EXPECT_CALL(a, DoA(1));
Expectation e2 = EXPECT_CALL(a, DoA(2));
EXPECT_CALL(a, DoA(3)).After(es1);
es1 += e2;
a.DoA(1);
a.DoA(3);
a.DoA(2);
}
TEST(DeletingMockEarlyTest, Success1) {
MockB* const b1 = new MockB;
MockA* const a = new MockA;
MockB* const b2 = new MockB;
{
InSequence dummy;
EXPECT_CALL(*b1, DoB(_)).WillOnce(Return(1));
EXPECT_CALL(*a, Binary(_, _))
.Times(AnyNumber())
.WillRepeatedly(Return(true));
EXPECT_CALL(*b2, DoB(_)).Times(AnyNumber()).WillRepeatedly(Return(2));
}
EXPECT_EQ(1, b1->DoB(1));
delete b1;
EXPECT_TRUE(a->Binary(0, 1));
delete b2;
EXPECT_TRUE(a->Binary(1, 2));
delete a;
}
TEST(DeletingMockEarlyTest, Success2) {
MockB* const b1 = new MockB;
MockA* const a = new MockA;
MockB* const b2 = new MockB;
{
InSequence dummy;
EXPECT_CALL(*b1, DoB(_)).WillOnce(Return(1));
EXPECT_CALL(*a, Binary(_, _)).Times(AnyNumber());
EXPECT_CALL(*b2, DoB(_)).Times(AnyNumber()).WillRepeatedly(Return(2));
}
delete a;
EXPECT_EQ(1, b1->DoB(1));
EXPECT_EQ(2, b2->DoB(2));
delete b1;
delete b2;
}
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4100)
ACTION_P(Delete, ptr) { delete ptr; }
GTEST_DISABLE_MSC_WARNINGS_POP_()
TEST(DeletingMockEarlyTest, CanDeleteSelfInActionReturningVoid) {
MockA* const a = new MockA;
EXPECT_CALL(*a, DoA(_)).WillOnce(Delete(a));
a->DoA(42);
}
TEST(DeletingMockEarlyTest, CanDeleteSelfInActionReturningValue) {
MockA* const a = new MockA;
EXPECT_CALL(*a, ReturnResult(_)).WillOnce(DoAll(Delete(a), Return(Result())));
a->ReturnResult(42);
}
TEST(DeletingMockEarlyTest, Failure1) {
MockB* const b1 = new MockB;
MockA* const a = new MockA;
MockB* const b2 = new MockB;
{
InSequence dummy;
EXPECT_CALL(*b1, DoB(_)).WillOnce(Return(1));
EXPECT_CALL(*a, Binary(_, _)).Times(AnyNumber());
EXPECT_CALL(*b2, DoB(_)).Times(AnyNumber()).WillRepeatedly(Return(2));
}
delete a;
EXPECT_NONFATAL_FAILURE({ b2->DoB(2); }, "Unexpected mock function call");
EXPECT_EQ(1, b1->DoB(1));
delete b1;
delete b2;
}
TEST(DeletingMockEarlyTest, Failure2) {
MockB* const b1 = new MockB;
MockA* const a = new MockA;
MockB* const b2 = new MockB;
{
InSequence dummy;
EXPECT_CALL(*b1, DoB(_));
EXPECT_CALL(*a, Binary(_, _)).Times(AnyNumber());
EXPECT_CALL(*b2, DoB(_)).Times(AnyNumber());
}
EXPECT_NONFATAL_FAILURE(delete b1, "Actual: never called");
EXPECT_NONFATAL_FAILURE(a->Binary(0, 1), "Unexpected mock function call");
EXPECT_NONFATAL_FAILURE(b2->DoB(1), "Unexpected mock function call");
delete a;
delete b2;
}
class EvenNumberCardinality : public CardinalityInterface {
public:
bool IsSatisfiedByCallCount(int call_count) const override {
return call_count % 2 == 0;
}
bool IsSaturatedByCallCount(int ) const override {
return false;
}
void DescribeTo(::std::ostream* os) const override {
*os << "called even number of times";
}
};
Cardinality EvenNumber() { return Cardinality(new EvenNumberCardinality); }
TEST(ExpectationBaseTest,
AllPrerequisitesAreSatisfiedWorksForNonMonotonicCardinality) {
MockA* a = new MockA;
Sequence s;
EXPECT_CALL(*a, DoA(1)).Times(EvenNumber()).InSequence(s);
EXPECT_CALL(*a, DoA(2)).Times(AnyNumber()).InSequence(s);
EXPECT_CALL(*a, DoA(3)).Times(AnyNumber());
a->DoA(3);
a->DoA(1);
EXPECT_NONFATAL_FAILURE(a->DoA(2), "Unexpected mock function call");
EXPECT_NONFATAL_FAILURE(delete a, "to be called even number of times");
}
struct Printable {};
inline void operator<<(::std::ostream& os, const Printable&) {
os << "Printable";
}
struct Unprintable {
Unprintable() : value(0) {}
int value;
};
class MockC {
public:
MockC() = default;
MOCK_METHOD6(VoidMethod, void(bool cond, int n, std::string s, void* p,
const Printable& x, Unprintable y));
MOCK_METHOD0(NonVoidMethod, int());
private:
MockC(const MockC&) = delete;
MockC& operator=(const MockC&) = delete;
};
class VerboseFlagPreservingFixture : public testing::Test {
protected:
VerboseFlagPreservingFixture()
: saved_verbose_flag_(GMOCK_FLAG_GET(verbose)) {}
~VerboseFlagPreservingFixture() override {
GMOCK_FLAG_SET(verbose, saved_verbose_flag_);
}
private:
const std::string saved_verbose_flag_;
VerboseFlagPreservingFixture(const VerboseFlagPreservingFixture&) = delete;
VerboseFlagPreservingFixture& operator=(const VerboseFlagPreservingFixture&) =
delete;
};
#if GTEST_HAS_STREAM_REDIRECTION
TEST(FunctionCallMessageTest,
UninterestingCallOnNaggyMockGeneratesNoStackTraceWhenVerboseWarning) {
GMOCK_FLAG_SET(verbose, kWarningVerbosity);
NaggyMock<MockC> c;
CaptureStdout();
c.VoidMethod(false, 5, "Hi", nullptr, Printable(), Unprintable());
const std::string output = GetCapturedStdout();
EXPECT_PRED_FORMAT2(IsSubstring, "GMOCK WARNING", output);
EXPECT_PRED_FORMAT2(IsNotSubstring, "Stack trace:", output);
}
TEST(FunctionCallMessageTest,
UninterestingCallOnNaggyMockGeneratesFyiWithStackTraceWhenVerboseInfo) {
GMOCK_FLAG_SET(verbose, kInfoVerbosity);
NaggyMock<MockC> c;
CaptureStdout();
c.VoidMethod(false, 5, "Hi", nullptr, Printable(), Unprintable());
const std::string output = GetCapturedStdout();
EXPECT_PRED_FORMAT2(IsSubstring, "GMOCK WARNING", output);
EXPECT_PRED_FORMAT2(IsSubstring, "Stack trace:", output);
#ifndef NDEBUG
EXPECT_PRED_FORMAT2(IsSubstring, "VoidMethod(", output);
CaptureStdout();
c.NonVoidMethod();
const std::string output2 = GetCapturedStdout();
EXPECT_PRED_FORMAT2(IsSubstring, "NonVoidMethod(", output2);
#endif
}
TEST(FunctionCallMessageTest,
UninterestingCallOnNaggyMockPrintsArgumentsAndReturnValue) {
NaggyMock<MockB> b;
CaptureStdout();
b.DoB();
const std::string output1 = GetCapturedStdout();
EXPECT_PRED_FORMAT2(
IsSubstring,
"Uninteresting mock function call - returning default value.\n"
" Function call: DoB()\n"
" Returns: 0\n",
output1.c_str());
NaggyMock<MockC> c;
CaptureStdout();
c.VoidMethod(false, 5, "Hi", nullptr, Printable(), Unprintable());
const std::string output2 = GetCapturedStdout();
EXPECT_THAT(
output2.c_str(),
ContainsRegex("Uninteresting mock function call - returning directly\\.\n"
" Function call: VoidMethod"
"\\(false, 5, \"Hi\", NULL, @.+ "
"Printable, 4-byte object <00-00 00-00>\\)"));
}
class GMockVerboseFlagTest : public VerboseFlagPreservingFixture {
public:
void VerifyOutput(const std::string& output, bool should_print,
const std::string& expected_substring,
const std::string& function_name) {
if (should_print) {
EXPECT_THAT(output.c_str(), HasSubstr(expected_substring));
#ifndef NDEBUG
EXPECT_THAT(output.c_str(), HasSubstr(function_name));
#else
static_cast<void>(function_name);
#endif
} else {
EXPECT_STREQ("", output.c_str());
}
}
void TestExpectedCall(bool should_print) {
MockA a;
EXPECT_CALL(a, DoA(5));
EXPECT_CALL(a, Binary(_, 1)).WillOnce(Return(true));
CaptureStdout();
a.DoA(5);
VerifyOutput(GetCapturedStdout(), should_print,
"Mock function call matches EXPECT_CALL(a, DoA(5))...\n"
" Function call: DoA(5)\n"
"Stack trace:\n",
"DoA");
CaptureStdout();
a.Binary(2, 1);
VerifyOutput(GetCapturedStdout(), should_print,
"Mock function call matches EXPECT_CALL(a, Binary(_, 1))...\n"
" Function call: Binary(2, 1)\n"
" Returns: true\n"
"Stack trace:\n",
"Binary");
}
void TestUninterestingCallOnNaggyMock(bool should_print) {
NaggyMock<MockA> a;
const std::string note =
"NOTE: You can safely ignore the above warning unless this "
"call should not happen. Do not suppress it by blindly adding "
"an EXPECT_CALL() if you don't mean to enforce the call. "
"See "
"https:
"gmock_cook_book.md#"
"knowing-when-to-expect-useoncall for details.";
CaptureStdout();
a.DoA(5);
VerifyOutput(GetCapturedStdout(), should_print,
"\nGMOCK WARNING:\n"
"Uninteresting mock function call - returning directly.\n"
" Function call: DoA(5)\n" +
note,
"DoA");
CaptureStdout();
a.Binary(2, 1);
VerifyOutput(GetCapturedStdout(), should_print,
"\nGMOCK WARNING:\n"
"Uninteresting mock function call - returning default value.\n"
" Function call: Binary(2, 1)\n"
" Returns: false\n" +
note,
"Binary");
}
};
TEST_F(GMockVerboseFlagTest, Info) {
GMOCK_FLAG_SET(verbose, kInfoVerbosity);
TestExpectedCall(true);
TestUninterestingCallOnNaggyMock(true);
}
TEST_F(GMockVerboseFlagTest, Warning) {
GMOCK_FLAG_SET(verbose, kWarningVerbosity);
TestExpectedCall(false);
TestUninterestingCallOnNaggyMock(true);
}
TEST_F(GMockVerboseFlagTest, Error) {
GMOCK_FLAG_SET(verbose, kErrorVerbosity);
TestExpectedCall(false);
TestUninterestingCallOnNaggyMock(false);
}
TEST_F(GMockVerboseFlagTest, InvalidFlagIsTreatedAsWarning) {
GMOCK_FLAG_SET(verbose, "invalid");
TestExpectedCall(false);
TestUninterestingCallOnNaggyMock(true);
}
#endif
class PrintMeNot {};
void PrintTo(PrintMeNot , ::std::ostream* ) {
ADD_FAILURE() << "Google Mock is printing a value that shouldn't be "
<< "printed even to an internal buffer.";
}
class LogTestHelper {
public:
LogTestHelper() = default;
MOCK_METHOD1(Foo, PrintMeNot(PrintMeNot));
private:
LogTestHelper(const LogTestHelper&) = delete;
LogTestHelper& operator=(const LogTestHelper&) = delete;
};
class GMockLogTest : public VerboseFlagPreservingFixture {
protected:
LogTestHelper helper_;
};
TEST_F(GMockLogTest, DoesNotPrintGoodCallInternallyIfVerbosityIsWarning) {
GMOCK_FLAG_SET(verbose, kWarningVerbosity);
EXPECT_CALL(helper_, Foo(_)).WillOnce(Return(PrintMeNot()));
helper_.Foo(PrintMeNot());
}
TEST_F(GMockLogTest, DoesNotPrintGoodCallInternallyIfVerbosityIsError) {
GMOCK_FLAG_SET(verbose, kErrorVerbosity);
EXPECT_CALL(helper_, Foo(_)).WillOnce(Return(PrintMeNot()));
helper_.Foo(PrintMeNot());
}
TEST_F(GMockLogTest, DoesNotPrintWarningInternallyIfVerbosityIsError) {
GMOCK_FLAG_SET(verbose, kErrorVerbosity);
ON_CALL(helper_, Foo(_)).WillByDefault(Return(PrintMeNot()));
helper_.Foo(PrintMeNot());
}
TEST(AllowLeakTest, AllowsLeakingUnusedMockObject) {
MockA* a = new MockA;
Mock::AllowLeak(a);
}
TEST(AllowLeakTest, CanBeCalledBeforeOnCall) {
MockA* a = new MockA;
Mock::AllowLeak(a);
ON_CALL(*a, DoA(_)).WillByDefault(Return());
a->DoA(0);
}
TEST(AllowLeakTest, CanBeCalledAfterOnCall) {
MockA* a = new MockA;
ON_CALL(*a, DoA(_)).WillByDefault(Return());
Mock::AllowLeak(a);
}
TEST(AllowLeakTest, CanBeCalledBeforeExpectCall) {
MockA* a = new MockA;
Mock::AllowLeak(a);
EXPECT_CALL(*a, DoA(_));
a->DoA(0);
}
TEST(AllowLeakTest, CanBeCalledAfterExpectCall) {
MockA* a = new MockA;
EXPECT_CALL(*a, DoA(_)).Times(AnyNumber());
Mock::AllowLeak(a);
}
TEST(AllowLeakTest, WorksWhenBothOnCallAndExpectCallArePresent) {
MockA* a = new MockA;
ON_CALL(*a, DoA(_)).WillByDefault(Return());
EXPECT_CALL(*a, DoA(_)).Times(AnyNumber());
Mock::AllowLeak(a);
}
TEST(VerifyAndClearExpectationsTest, NoMethodHasExpectations) {
MockB b;
ASSERT_TRUE(Mock::VerifyAndClearExpectations(&b));
EXPECT_EQ(0, b.DoB());
EXPECT_EQ(0, b.DoB(1));
}
TEST(VerifyAndClearExpectationsTest, SomeMethodsHaveExpectationsAndSucceed) {
MockB b;
EXPECT_CALL(b, DoB()).WillOnce(Return(1));
b.DoB();
ASSERT_TRUE(Mock::VerifyAndClearExpectations(&b));
EXPECT_EQ(0, b.DoB());
EXPECT_EQ(0, b.DoB(1));
}
TEST(VerifyAndClearExpectationsTest, SomeMethodsHaveExpectationsAndFail) {
MockB b;
EXPECT_CALL(b, DoB()).WillOnce(Return(1));
bool result = true;
EXPECT_NONFATAL_FAILURE(result = Mock::VerifyAndClearExpectations(&b),
"Actual: never called");
ASSERT_FALSE(result);
EXPECT_EQ(0, b.DoB());
EXPECT_EQ(0, b.DoB(1));
}
TEST(VerifyAndClearExpectationsTest, AllMethodsHaveExpectations) {
MockB b;
EXPECT_CALL(b, DoB()).WillOnce(Return(1));
EXPECT_CALL(b, DoB(_)).WillOnce(Return(2));
b.DoB();
b.DoB(1);
ASSERT_TRUE(Mock::VerifyAndClearExpectations(&b));
EXPECT_EQ(0, b.DoB());
EXPECT_EQ(0, b.DoB(1));
}
TEST(VerifyAndClearExpectationsTest, AMethodHasManyExpectations) {
MockB b;
EXPECT_CALL(b, DoB(0)).WillOnce(Return(1));
EXPECT_CALL(b, DoB(_)).WillOnce(Return(2));
b.DoB(1);
bool result = true;
EXPECT_NONFATAL_FAILURE(result = Mock::VerifyAndClearExpectations(&b),
"Actual: never called");
ASSERT_FALSE(result);
EXPECT_EQ(0, b.DoB());
EXPECT_EQ(0, b.DoB(1));
}
TEST(VerifyAndClearExpectationsTest, CanCallManyTimes) {
MockB b;
EXPECT_CALL(b, DoB());
b.DoB();
Mock::VerifyAndClearExpectations(&b);
EXPECT_CALL(b, DoB(_)).WillOnce(Return(1));
b.DoB(1);
Mock::VerifyAndClearExpectations(&b);
Mock::VerifyAndClearExpectations(&b);
EXPECT_EQ(0, b.DoB());
EXPECT_EQ(0, b.DoB(1));
}
TEST(VerifyAndClearTest, NoMethodHasDefaultActions) {
MockB b;
Mock::VerifyAndClear(&b);
EXPECT_EQ(0, b.DoB());
}
TEST(VerifyAndClearTest, SomeMethodsHaveDefaultActions) {
MockB b;
ON_CALL(b, DoB()).WillByDefault(Return(1));
Mock::VerifyAndClear(&b);
EXPECT_EQ(0, b.DoB());
}
TEST(VerifyAndClearTest, AllMethodsHaveDefaultActions) {
MockB b;
ON_CALL(b, DoB()).WillByDefault(Return(1));
ON_CALL(b, DoB(_)).WillByDefault(Return(2));
Mock::VerifyAndClear(&b);
EXPECT_EQ(0, b.DoB());
EXPECT_EQ(0, b.DoB(0));
}
TEST(VerifyAndClearTest, AMethodHasManyDefaultActions) {
MockB b;
ON_CALL(b, DoB(0)).WillByDefault(Return(1));
ON_CALL(b, DoB(_)).WillByDefault(Return(2));
Mock::VerifyAndClear(&b);
EXPECT_EQ(0, b.DoB(0));
EXPECT_EQ(0, b.DoB(1));
}
TEST(VerifyAndClearTest, CanCallManyTimes) {
MockB b;
ON_CALL(b, DoB()).WillByDefault(Return(1));
Mock::VerifyAndClear(&b);
Mock::VerifyAndClear(&b);
ON_CALL(b, DoB(_)).WillByDefault(Return(1));
Mock::VerifyAndClear(&b);
EXPECT_EQ(0, b.DoB());
EXPECT_EQ(0, b.DoB(1));
}
TEST(VerifyAndClearTest, Success) {
MockB b;
ON_CALL(b, DoB()).WillByDefault(Return(1));
EXPECT_CALL(b, DoB(1)).WillOnce(Return(2));
b.DoB();
b.DoB(1);
ASSERT_TRUE(Mock::VerifyAndClear(&b));
EXPECT_EQ(0, b.DoB());
EXPECT_EQ(0, b.DoB(1));
}
TEST(VerifyAndClearTest, Failure) {
MockB b;
ON_CALL(b, DoB(_)).WillByDefault(Return(1));
EXPECT_CALL(b, DoB()).WillOnce(Return(2));
b.DoB(1);
bool result = true;
EXPECT_NONFATAL_FAILURE(result = Mock::VerifyAndClear(&b),
"Actual: never called");
ASSERT_FALSE(result);
EXPECT_EQ(0, b.DoB());
EXPECT_EQ(0, b.DoB(1));
}
TEST(VerifyAndClearTest, Const) {
MockB b;
ON_CALL(Const(b), DoB()).WillByDefault(Return(1));
EXPECT_CALL(Const(b), DoB()).WillOnce(DoDefault()).WillOnce(Return(2));
b.DoB();
b.DoB();
ASSERT_TRUE(Mock::VerifyAndClear(&b));
EXPECT_EQ(0, b.DoB());
EXPECT_EQ(0, b.DoB(1));
}
TEST(VerifyAndClearTest, CanSetDefaultActionsAndExpectationsAfterwards) {
MockB b;
ON_CALL(b, DoB()).WillByDefault(Return(1));
EXPECT_CALL(b, DoB(_)).WillOnce(Return(2));
b.DoB(1);
Mock::VerifyAndClear(&b);
EXPECT_CALL(b, DoB()).WillOnce(Return(3));
ON_CALL(b, DoB(_)).WillByDefault(Return(4));
EXPECT_EQ(3, b.DoB());
EXPECT_EQ(4, b.DoB(1));
}
TEST(VerifyAndClearTest, DoesNotAffectOtherMockObjects) {
MockA a;
MockB b1;
MockB b2;
ON_CALL(a, Binary(_, _)).WillByDefault(Return(true));
EXPECT_CALL(a, Binary(_, _)).WillOnce(DoDefault()).WillOnce(Return(false));
ON_CALL(b1, DoB()).WillByDefault(Return(1));
EXPECT_CALL(b1, DoB(_)).WillOnce(Return(2));
ON_CALL(b2, DoB()).WillByDefault(Return(3));
EXPECT_CALL(b2, DoB(_));
b2.DoB(0);
Mock::VerifyAndClear(&b2);
EXPECT_TRUE(a.Binary(0, 0));
EXPECT_FALSE(a.Binary(0, 0));
EXPECT_EQ(1, b1.DoB());
EXPECT_EQ(2, b1.DoB(0));
}
TEST(VerifyAndClearTest,
DestroyingChainedMocksDoesNotDeadlockThroughExpectations) {
std::shared_ptr<MockA> a(new MockA);
ReferenceHoldingMock test_mock;
EXPECT_CALL(test_mock, AcceptReference(_))
.WillRepeatedly(SetArgPointee<0>(a));
a.reset();
}
TEST(VerifyAndClearTest,
DestroyingChainedMocksDoesNotDeadlockThroughDefaultAction) {
std::shared_ptr<MockA> a(new MockA);
ReferenceHoldingMock test_mock;
ON_CALL(test_mock, AcceptReference(_)).WillByDefault(SetArgPointee<0>(a));
a.reset();
}
TEST(SynchronizationTest, CanCallMockMethodInAction) {
MockA a;
MockC c;
ON_CALL(a, DoA(_)).WillByDefault(
IgnoreResult(InvokeWithoutArgs(&c, &MockC::NonVoidMethod)));
EXPECT_CALL(a, DoA(1));
EXPECT_CALL(a, DoA(1))
.WillOnce(Invoke(&a, &MockA::DoA))
.RetiresOnSaturation();
EXPECT_CALL(c, NonVoidMethod());
a.DoA(1);
}
TEST(ParameterlessExpectationsTest, CanSetExpectationsWithoutMatchers) {
MockA a;
int do_a_arg0 = 0;
ON_CALL(a, DoA).WillByDefault(SaveArg<0>(&do_a_arg0));
int do_a_47_arg0 = 0;
ON_CALL(a, DoA(47)).WillByDefault(SaveArg<0>(&do_a_47_arg0));
a.DoA(17);
EXPECT_THAT(do_a_arg0, 17);
EXPECT_THAT(do_a_47_arg0, 0);
a.DoA(47);
EXPECT_THAT(do_a_arg0, 17);
EXPECT_THAT(do_a_47_arg0, 47);
ON_CALL(a, Binary).WillByDefault(Return(true));
ON_CALL(a, Binary(_, 14)).WillByDefault(Return(false));
EXPECT_THAT(a.Binary(14, 17), true);
EXPECT_THAT(a.Binary(17, 14), false);
}
TEST(ParameterlessExpectationsTest, CanSetExpectationsForOverloadedMethods) {
MockB b;
ON_CALL(b, DoB()).WillByDefault(Return(9));
ON_CALL(b, DoB(5)).WillByDefault(Return(11));
EXPECT_THAT(b.DoB(), 9);
EXPECT_THAT(b.DoB(1), 0);
EXPECT_THAT(b.DoB(5), 11);
}
struct MockWithConstMethods {
public:
MOCK_CONST_METHOD1(Foo, int(int));
MOCK_CONST_METHOD2(Bar, int(int, const char*));
};
TEST(ParameterlessExpectationsTest, CanSetExpectationsForConstMethods) {
MockWithConstMethods mock;
ON_CALL(mock, Foo).WillByDefault(Return(7));
ON_CALL(mock, Bar).WillByDefault(Return(33));
EXPECT_THAT(mock.Foo(17), 7);
EXPECT_THAT(mock.Bar(27, "purple"), 33);
}
class MockConstOverload {
public:
MOCK_METHOD1(Overloaded, int(int));
MOCK_CONST_METHOD1(Overloaded, int(int));
};
TEST(ParameterlessExpectationsTest,
CanSetExpectationsForConstOverloadedMethods) {
MockConstOverload mock;
ON_CALL(mock, Overloaded(_)).WillByDefault(Return(7));
ON_CALL(mock, Overloaded(5)).WillByDefault(Return(9));
ON_CALL(Const(mock), Overloaded(5)).WillByDefault(Return(11));
ON_CALL(Const(mock), Overloaded(7)).WillByDefault(Return(13));
EXPECT_THAT(mock.Overloaded(1), 7);
EXPECT_THAT(mock.Overloaded(5), 9);
EXPECT_THAT(mock.Overloaded(7), 7);
const MockConstOverload& const_mock = mock;
EXPECT_THAT(const_mock.Overloaded(1), 0);
EXPECT_THAT(const_mock.Overloaded(5), 11);
EXPECT_THAT(const_mock.Overloaded(7), 13);
}
}
}
int main(int argc, char** argv) {
testing::InitGoogleMock(&argc, argv);
GMOCK_FLAG_SET(catch_leaked_mocks, true);
GMOCK_FLAG_SET(verbose, testing::internal::kWarningVerbosity);
return RUN_ALL_TESTS();
} | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googlemock/src/gmock-spec-builders.cc | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googlemock/test/gmock-spec-builders_test.cc | a1e255a582377e1006bb88a408ac3f933ba7c916 |
f88d24c3-4501-4787-af00-d518a7808703 | cpp | google/arolla | typed_refs_input_loader | arolla/io/typed_refs_input_loader.cc | arolla/io/typed_refs_input_loader_test.cc | #include "arolla/io/typed_refs_input_loader.h"
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/io/input_loader.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
using Input = absl::Span<const TypedRef>;
class TypedRefsInputLoader : public StaticInputLoader<Input> {
public:
explicit TypedRefsInputLoader(
std::vector<std::pair<std::string, QTypePtr>> args)
: StaticInputLoader<Input>(std::move(args)) {}
private:
absl::StatusOr<BoundInputLoader<Input>> BindImpl(
const absl::flat_hash_map<std::string, TypedSlot>& output_slots)
const override {
std::vector<size_t> element_ids;
std::vector<TypedSlot> slots;
element_ids.reserve(output_slots.size());
slots.reserve(output_slots.size());
for (size_t i = 0; i != types_in_order().size(); ++i) {
if (auto it = output_slots.find(types_in_order()[i].first);
it != output_slots.end()) {
element_ids.push_back(i);
slots.push_back(it->second);
}
}
return BoundInputLoader<Input>(
[slots = std::move(slots), element_ids = std::move(element_ids),
expected_input_size = types_in_order().size()](
const Input& input, FramePtr frame,
RawBufferFactory*) -> absl::Status {
if (input.size() != expected_input_size) {
return absl::InvalidArgumentError(
absl::StrFormat("unexpected input count: expected %d, got %d",
expected_input_size, input.size()));
}
for (size_t i = 0; i < slots.size(); ++i) {
size_t id = element_ids[i];
DCHECK_LT(id, input.size());
RETURN_IF_ERROR(input[id].CopyToSlot(slots[i], frame));
}
return absl::OkStatus();
});
}
};
}
std::unique_ptr<InputLoader<Input>> CreateTypedRefsInputLoader(
const std::vector<std::pair<std::string, QTypePtr>>& args) {
return std::make_unique<TypedRefsInputLoader>(args);
}
} | #include "arolla/io/typed_refs_input_loader.h"
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status_matchers.h"
#include "absl/types/span.h"
#include "arolla/io/input_loader.h"
#include "arolla/io/testing/matchers.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
namespace arolla {
namespace {
using ::absl_testing::IsOk;
using ::arolla::testing::InputLoaderSupports;
using ::testing::Eq;
TEST(TupleInputLoaderTest, Scalars) {
using Input = absl::Span<const TypedRef>;
std::unique_ptr<InputLoader<Input>> input_loader = CreateTypedRefsInputLoader(
{{"a", GetQType<float>()}, {"b", GetQType<int>()}});
EXPECT_THAT(input_loader, InputLoaderSupports({{"a", GetQType<float>()},
{"b", GetQType<int>()}}));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<float>();
auto b_slot = layout_builder.AddSlot<int>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<Input> bound_input_loader,
input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
TypedValue tv_a = TypedValue::FromValue<float>(5);
TypedValue tv_b = TypedValue::FromValue<int>(7);
ASSERT_THAT(bound_input_loader({tv_a.AsRef(), tv_b.AsRef()}, alloc.frame()),
IsOk());
EXPECT_THAT(alloc.frame().Get(a_slot), Eq(5));
EXPECT_THAT(alloc.frame().Get(b_slot), Eq(7));
ASSERT_OK_AND_ASSIGN(BoundInputLoader<Input> bound_b_input_loader,
input_loader->Bind({
{"b", TypedSlot::FromSlot(b_slot)},
}));
alloc.frame().Set(a_slot, 42);
alloc.frame().Set(b_slot, 57);
ASSERT_THAT(bound_b_input_loader({tv_a.AsRef(), tv_b.AsRef()}, alloc.frame()),
IsOk());
EXPECT_THAT(alloc.frame().Get(a_slot), Eq(42));
EXPECT_THAT(alloc.frame().Get(b_slot), Eq(7));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/typed_refs_input_loader.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/typed_refs_input_loader_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
109f995c-94fb-4864-98ce-1f4d803594a6 | cpp | google/quiche | qpack_index_conversions | quiche/quic/core/qpack/qpack_index_conversions.cc | quiche/quic/core/qpack/qpack_index_conversions_test.cc | #include "quiche/quic/core/qpack/qpack_index_conversions.h"
#include <limits>
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
uint64_t QpackAbsoluteIndexToEncoderStreamRelativeIndex(
uint64_t absolute_index, uint64_t inserted_entry_count) {
QUICHE_DCHECK_LT(absolute_index, inserted_entry_count);
return inserted_entry_count - absolute_index - 1;
}
uint64_t QpackAbsoluteIndexToRequestStreamRelativeIndex(uint64_t absolute_index,
uint64_t base) {
QUICHE_DCHECK_LT(absolute_index, base);
return base - absolute_index - 1;
}
bool QpackEncoderStreamRelativeIndexToAbsoluteIndex(
uint64_t relative_index, uint64_t inserted_entry_count,
uint64_t* absolute_index) {
if (relative_index >= inserted_entry_count) {
return false;
}
*absolute_index = inserted_entry_count - relative_index - 1;
return true;
}
bool QpackRequestStreamRelativeIndexToAbsoluteIndex(uint64_t relative_index,
uint64_t base,
uint64_t* absolute_index) {
if (relative_index >= base) {
return false;
}
*absolute_index = base - relative_index - 1;
return true;
}
bool QpackPostBaseIndexToAbsoluteIndex(uint64_t post_base_index, uint64_t base,
uint64_t* absolute_index) {
if (post_base_index >= std::numeric_limits<uint64_t>::max() - base) {
return false;
}
*absolute_index = base + post_base_index;
return true;
}
} | #include "quiche/quic/core/qpack/qpack_index_conversions.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
namespace {
struct {
uint64_t relative_index;
uint64_t inserted_entry_count;
uint64_t expected_absolute_index;
} kEncoderStreamRelativeIndexTestData[] = {{0, 1, 0}, {0, 2, 1}, {1, 2, 0},
{0, 10, 9}, {5, 10, 4}, {9, 10, 0}};
TEST(QpackIndexConversions, EncoderStreamRelativeIndex) {
for (const auto& test_data : kEncoderStreamRelativeIndexTestData) {
uint64_t absolute_index = 42;
EXPECT_TRUE(QpackEncoderStreamRelativeIndexToAbsoluteIndex(
test_data.relative_index, test_data.inserted_entry_count,
&absolute_index));
EXPECT_EQ(test_data.expected_absolute_index, absolute_index);
EXPECT_EQ(test_data.relative_index,
QpackAbsoluteIndexToEncoderStreamRelativeIndex(
absolute_index, test_data.inserted_entry_count));
}
}
struct {
uint64_t relative_index;
uint64_t base;
uint64_t expected_absolute_index;
} kRequestStreamRelativeIndexTestData[] = {{0, 1, 0}, {0, 2, 1}, {1, 2, 0},
{0, 10, 9}, {5, 10, 4}, {9, 10, 0}};
TEST(QpackIndexConversions, RequestStreamRelativeIndex) {
for (const auto& test_data : kRequestStreamRelativeIndexTestData) {
uint64_t absolute_index = 42;
EXPECT_TRUE(QpackRequestStreamRelativeIndexToAbsoluteIndex(
test_data.relative_index, test_data.base, &absolute_index));
EXPECT_EQ(test_data.expected_absolute_index, absolute_index);
EXPECT_EQ(test_data.relative_index,
QpackAbsoluteIndexToRequestStreamRelativeIndex(absolute_index,
test_data.base));
}
}
struct {
uint64_t post_base_index;
uint64_t base;
uint64_t expected_absolute_index;
} kPostBaseIndexTestData[] = {{0, 1, 1}, {1, 0, 1}, {2, 0, 2},
{1, 1, 2}, {0, 2, 2}, {1, 2, 3}};
TEST(QpackIndexConversions, PostBaseIndex) {
for (const auto& test_data : kPostBaseIndexTestData) {
uint64_t absolute_index = 42;
EXPECT_TRUE(QpackPostBaseIndexToAbsoluteIndex(
test_data.post_base_index, test_data.base, &absolute_index));
EXPECT_EQ(test_data.expected_absolute_index, absolute_index);
}
}
TEST(QpackIndexConversions, EncoderStreamRelativeIndexUnderflow) {
uint64_t absolute_index;
EXPECT_FALSE(QpackEncoderStreamRelativeIndexToAbsoluteIndex(
10,
10, &absolute_index));
EXPECT_FALSE(QpackEncoderStreamRelativeIndexToAbsoluteIndex(
12,
10, &absolute_index));
}
TEST(QpackIndexConversions, RequestStreamRelativeIndexUnderflow) {
uint64_t absolute_index;
EXPECT_FALSE(QpackRequestStreamRelativeIndexToAbsoluteIndex(
10,
10, &absolute_index));
EXPECT_FALSE(QpackRequestStreamRelativeIndexToAbsoluteIndex(
12,
10, &absolute_index));
}
TEST(QpackIndexConversions, QpackPostBaseIndexToAbsoluteIndexOverflow) {
uint64_t absolute_index;
EXPECT_FALSE(QpackPostBaseIndexToAbsoluteIndex(
20,
std::numeric_limits<uint64_t>::max() - 10, &absolute_index));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_index_conversions.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_index_conversions_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
1426a3dd-c421-4482-8c1c-1fc35b1628e4 | cpp | google/quiche | quiche_test_utils | quiche/common/test_tools/quiche_test_utils.cc | quiche/common/test_tools/quiche_test_utils_test.cc | #include "quiche/common/test_tools/quiche_test_utils.h"
#include <algorithm>
#include <memory>
#include <string>
#include "quiche/common/platform/api/quiche_googleurl.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace {
std::string HexDumpWithMarks(const char* data, int length, const bool* marks,
int mark_length) {
static const char kHexChars[] = "0123456789abcdef";
static const int kColumns = 4;
const int kSizeLimit = 1024;
if (length > kSizeLimit || mark_length > kSizeLimit) {
QUICHE_LOG(ERROR) << "Only dumping first " << kSizeLimit << " bytes.";
length = std::min(length, kSizeLimit);
mark_length = std::min(mark_length, kSizeLimit);
}
std::string hex;
for (const char* row = data; length > 0;
row += kColumns, length -= kColumns) {
for (const char* p = row; p < row + 4; ++p) {
if (p < row + length) {
const bool mark =
(marks && (p - data) < mark_length && marks[p - data]);
hex += mark ? '*' : ' ';
hex += kHexChars[(*p & 0xf0) >> 4];
hex += kHexChars[*p & 0x0f];
hex += mark ? '*' : ' ';
} else {
hex += " ";
}
}
hex = hex + " ";
for (const char* p = row; p < row + 4 && p < row + length; ++p) {
hex += (*p >= 0x20 && *p < 0x7f) ? (*p) : '.';
}
hex = hex + '\n';
}
return hex;
}
}
namespace quiche {
namespace test {
void CompareCharArraysWithHexError(const std::string& description,
const char* actual, const int actual_len,
const char* expected,
const int expected_len) {
EXPECT_EQ(actual_len, expected_len);
const int min_len = std::min(actual_len, expected_len);
const int max_len = std::max(actual_len, expected_len);
std::unique_ptr<bool[]> marks(new bool[max_len]);
bool identical = (actual_len == expected_len);
for (int i = 0; i < min_len; ++i) {
if (actual[i] != expected[i]) {
marks[i] = true;
identical = false;
} else {
marks[i] = false;
}
}
for (int i = min_len; i < max_len; ++i) {
marks[i] = true;
}
if (identical) return;
ADD_FAILURE() << "Description:\n"
<< description << "\n\nExpected:\n"
<< HexDumpWithMarks(expected, expected_len, marks.get(),
max_len)
<< "\nActual:\n"
<< HexDumpWithMarks(actual, actual_len, marks.get(), max_len);
}
iovec MakeIOVector(absl::string_view str) {
return iovec{const_cast<char*>(str.data()), static_cast<size_t>(str.size())};
}
bool GoogleUrlSupportsIdnaForTest() {
const std::string kTestInput = "https:
const std::string kExpectedOutput = "https:
GURL url(kTestInput);
bool valid = url.is_valid() && url.spec() == kExpectedOutput;
QUICHE_CHECK(valid || !url.is_valid()) << url.spec();
return valid;
}
}
} | #include "quiche/common/test_tools/quiche_test_utils.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche::test {
namespace {
using ::testing::HasSubstr;
using ::testing::Not;
TEST(QuicheTestUtilsTest, StatusMatchers) {
const absl::Status ok = absl::OkStatus();
QUICHE_EXPECT_OK(ok);
QUICHE_ASSERT_OK(ok);
EXPECT_THAT(ok, IsOk());
const absl::StatusOr<int> ok_with_value = 2023;
QUICHE_EXPECT_OK(ok_with_value);
QUICHE_ASSERT_OK(ok_with_value);
EXPECT_THAT(ok_with_value, IsOk());
EXPECT_THAT(ok_with_value, IsOkAndHolds(2023));
const absl::Status err = absl::InternalError("test error");
EXPECT_THAT(err, Not(IsOk()));
EXPECT_THAT(err, StatusIs(absl::StatusCode::kInternal, HasSubstr("test")));
const absl::StatusOr<int> err_with_value = absl::InternalError("test error");
EXPECT_THAT(err_with_value, Not(IsOk()));
EXPECT_THAT(err_with_value, Not(IsOkAndHolds(2023)));
EXPECT_THAT(err_with_value,
StatusIs(absl::StatusCode::kInternal, HasSubstr("test")));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/test_tools/quiche_test_utils.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/test_tools/quiche_test_utils_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
b4226e9b-7fbf-4289-b06e-d2775cb74417 | cpp | tensorflow/tensorflow | tf_host_callback | tensorflow/core/tfrt/ifrt/tf_host_callback.cc | tensorflow/core/tfrt/ifrt/tf_host_callback_test.cc | #include "tensorflow/core/tfrt/ifrt/tf_host_callback.h"
#include <cstddef>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "absl/container/fixed_array.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/immediate_execution_context.h"
#include "tensorflow/c/eager/immediate_execution_operation.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/refcount.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
using RefCountHandle = ::tsl::core::RefCountPtr<tensorflow::TensorHandle>;
size_t GetSizeInBytes(const tensorflow::Tensor& tensor) {
return tensor.shape().num_elements() * DataTypeSize(tensor.dtype());
}
tensorflow::Tensor GetTensor(const DtypeAndShape& dtype_and_shape, void* src) {
DCHECK(DataTypeCanUseMemcpy(dtype_and_shape.dtype));
tensorflow::Tensor t(dtype_and_shape.dtype, dtype_and_shape.shape);
std::memcpy(t.data(), src, GetSizeInBytes(t));
return t;
}
void CopyToBuffer(void* dst, const tensorflow::Tensor& tensor) {
DCHECK(DataTypeCanUseMemcpy(tensor.dtype()));
std::memcpy(dst, tensor.data(), GetSizeInBytes(tensor));
}
}
absl::Status TfHostCallback::Call(void** inputs, void** outputs) {
tsl::profiler::TraceMe trace_me("TfHostCallback::Call");
tensorflow::ImmediateOpPtr op(ctx_->CreateOperation());
TF_RETURN_IF_ERROR(
op->Reset(entry_function_name_.c_str(), nullptr));
ctx_->StartStep();
absl::Cleanup cleanup_step = [this]() { ctx_->EndStep(); };
for (int i = 0; i < operand_type_and_shapes_.size(); ++i) {
tensorflow::Tensor t = GetTensor(operand_type_and_shapes_[i], inputs[i]);
RefCountHandle handle(tensorflow::down_cast<tensorflow::TensorHandle*>(
ctx_->CreateLocalHandleFromTFTensor(t, nullptr)));
TF_RETURN_IF_ERROR(op->AddInput(handle.get()));
}
int num_outputs = result_type_and_shapes_.size();
absl::FixedArray<tensorflow::AbstractTensorHandle*> output_raw_handles(
num_outputs);
TF_RETURN_IF_ERROR(
op->Execute(absl::MakeSpan(output_raw_handles), &num_outputs));
std::vector<RefCountHandle> output_handles;
output_handles.reserve(num_outputs);
for (auto* output_raw_handle : output_raw_handles) {
output_handles.emplace_back(
tensorflow::down_cast<tensorflow::TensorHandle*>(output_raw_handle));
}
if (result_type_and_shapes_.size() != num_outputs) {
return absl::InternalError(absl::StrCat(
"TF host callback invocation expected ", result_type_and_shapes_.size(),
" results, instead got ", num_outputs));
}
for (int i = 0; i < num_outputs; ++i) {
const tensorflow::Tensor* tensor;
TF_RETURN_IF_ERROR(output_handles[i]->Tensor(&tensor));
CopyToBuffer(outputs[i], *tensor);
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<TfHostCallback>> TfHostCallback::Create(
absl::Span<const tensorflow::FunctionDef> functions,
absl::string_view entry_function_name,
absl::Span<const DtypeAndShape> operand_type_and_shapes,
absl::Span<const DtypeAndShape> result_type_and_shapes,
tensorflow::DeviceMgr* device_mgr) {
tensorflow::SessionOptions options;
options.config.add_device_filters("/device:CPU:*");
DCHECK(device_mgr != nullptr);
tensorflow::EagerContextPtr ctx(new tensorflow::EagerContext(
options,
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, device_mgr,
false,
nullptr,
nullptr,
nullptr,
true));
for (const tensorflow::FunctionDef& function : functions) {
TF_RETURN_IF_ERROR(ctx->AddFunctionDef(function));
}
return absl::WrapUnique(
new TfHostCallback(entry_function_name, operand_type_and_shapes,
result_type_and_shapes, std::move(ctx)));
}
absl::StatusOr<std::unique_ptr<tensorflow::DynamicDeviceMgr>>
CreateTfDynamicDeviceMgr() {
std::vector<std::unique_ptr<tensorflow::Device>> devices;
TF_RETURN_IF_ERROR(tensorflow::DeviceFactory::AddCpuDevices(
tensorflow::SessionOptions(), "/job:localhost/replica:0/task:0",
&devices));
return std::make_unique<tensorflow::DynamicDeviceMgr>(std::move(devices));
}
}
} | #include "tensorflow/core/tfrt/ifrt/tf_host_callback.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::TensorEq;
absl::StatusOr<tensorflow::FunctionDef> ToFunctionDef(
tensorflow::Scope scope, const std::string& function_name) {
auto graph =
std::make_unique<tensorflow::Graph>(tensorflow::OpRegistry::Global());
TF_RETURN_IF_ERROR(scope.ToGraph(graph.get()));
tensorflow::FunctionDef function_def;
TF_RETURN_IF_ERROR(
tensorflow::GraphToFunctionDef(*graph, function_name, &function_def));
return function_def;
}
absl::StatusOr<tensorflow::FunctionDef> MakeAddOneFunctionDef(
const std::string& function_name) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
{
auto arg0 = tensorflow::ops::_Arg(scope.WithOpName("arg0"),
tensorflow::DT_FLOAT, 0);
auto const0_value = tensorflow::test::AsScalar<float>(1);
auto const0 =
tensorflow::ops::Const(scope.WithOpName("const0"),
tensorflow::Input::Initializer(const0_value));
auto add0 = tensorflow::ops::Add(scope.WithOpName("add0"), arg0, const0);
auto retval0 =
tensorflow::ops::_Retval(scope.WithOpName("retval0"), add0, 0);
}
return ToFunctionDef(std::move(scope), function_name);
}
absl::StatusOr<std::vector<tensorflow::FunctionDef>>
MakeAddOneWithCallFunctionDef(const std::string& function_name) {
std::vector<tensorflow::FunctionDef> function_defs;
TF_ASSIGN_OR_RETURN(function_defs.emplace_back(),
MakeAddOneFunctionDef("add"));
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
{
auto arg0 = tensorflow::ops::_Arg(scope.WithOpName("arg0"),
tensorflow::DT_FLOAT, 0);
tensorflow::NameAttrList f;
f.set_name("add");
auto call = tensorflow::ops::StatefulPartitionedCall(
scope.WithOpName("call"), {arg0.output}, {tensorflow::DT_FLOAT}, f);
auto retval0 = tensorflow::ops::_Retval(scope.WithOpName("retval0"),
call.output[0], 0);
}
TF_ASSIGN_OR_RETURN(function_defs.emplace_back(),
ToFunctionDef(std::move(scope), function_name));
return function_defs;
}
absl::StatusOr<tensorflow::FunctionDef> MakeAssignVarFunctionDef(
const std::string& function_name) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
{
auto arg0 = tensorflow::ops::_Arg(scope.WithOpName("arg0"),
tensorflow::DT_INT32, 0);
auto var = tensorflow::ops::VarHandleOp(
scope.WithOpName("var"), tensorflow::DT_INT32,
tensorflow::TensorShape(),
tensorflow::ops::VarHandleOp::Attrs().SharedName("var"));
tensorflow::ops::AssignVariableOp assign_op(scope.WithOpName("assign"), var,
arg0);
}
return ToFunctionDef(std::move(scope), function_name);
}
absl::StatusOr<tensorflow::FunctionDef> MakeAddVarFunctionDef(
const std::string& function_name) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
{
auto arg0 = tensorflow::ops::_Arg(scope.WithOpName("arg0"),
tensorflow::DT_INT32, 0);
auto var = tensorflow::ops::VarHandleOp(
scope.WithOpName("var"), tensorflow::DT_INT32,
tensorflow::TensorShape(),
tensorflow::ops::VarHandleOp::Attrs().SharedName("var"));
auto read = tensorflow::ops::ReadVariableOp(scope.WithOpName("read"), var,
tensorflow::DT_INT32);
auto add = tensorflow::ops::Add(scope.WithOpName("add"), read, arg0);
tensorflow::ops::AssignVariableOp assign_op(scope.WithOpName("assign"), var,
add);
auto retval0 =
tensorflow::ops::_Retval(scope.WithOpName("retval0"), add, 0);
}
return ToFunctionDef(std::move(scope), function_name);
}
TEST(TfHostCallbackTest, Simple) {
ASSERT_OK_AND_ASSIGN(auto function_defs,
MakeAddOneWithCallFunctionDef("main"));
auto in = AsTensor<float>({2.5f}, tensorflow::TensorShape({1}));
void* in_ptrs[1] = {in.data()};
std::vector<DtypeAndShape> in_dtype_shapes;
in_dtype_shapes.push_back({.dtype = in.dtype(), .shape = in.shape()});
auto out = AsTensor<float>({0.0f}, tensorflow::TensorShape({1}));
void* out_ptrs[1] = {out.data()};
std::vector<DtypeAndShape> out_dtype_shapes;
out_dtype_shapes.push_back({.dtype = out.dtype(), .shape = out.shape()});
ASSERT_OK_AND_ASSIGN(auto device_mgr, CreateTfDynamicDeviceMgr());
ASSERT_OK_AND_ASSIGN(auto tf_host_callback,
tensorflow::ifrt_serving::TfHostCallback::Create(
function_defs, "main", in_dtype_shapes,
out_dtype_shapes, device_mgr.get()));
ASSERT_OK(tf_host_callback->Call(in_ptrs, out_ptrs));
EXPECT_THAT(out,
TensorEq(AsTensor<float>({3.5f}, tensorflow::TensorShape({1}))));
}
TEST(TfHostCallbackTest, SharedState) {
tensorflow::ConfigProto session_config;
ASSERT_OK_AND_ASSIGN(auto state, CreateTfDynamicDeviceMgr());
std::unique_ptr<TfHostCallback> assign_callback;
{
ASSERT_OK_AND_ASSIGN(auto functions, MakeAssignVarFunctionDef("main"));
std::vector<DtypeAndShape> in_dtype_shapes;
in_dtype_shapes.push_back(
{.dtype = DT_INT32, .shape = tensorflow::TensorShape({1})});
std::vector<DtypeAndShape> out_dtype_shapes;
ASSERT_OK_AND_ASSIGN(
assign_callback,
TfHostCallback::Create({functions}, "main", in_dtype_shapes,
out_dtype_shapes, state.get()));
}
std::unique_ptr<TfHostCallback> incr_callback;
{
ASSERT_OK_AND_ASSIGN(auto functions, MakeAddVarFunctionDef("main"));
std::vector<DtypeAndShape> in_dtype_shapes;
in_dtype_shapes.push_back(
{.dtype = DT_INT32, .shape = tensorflow::TensorShape({1})});
std::vector<DtypeAndShape> out_dtype_shapes;
out_dtype_shapes.push_back(
{.dtype = DT_INT32, .shape = tensorflow::TensorShape({1})});
ASSERT_OK_AND_ASSIGN(
incr_callback,
TfHostCallback::Create({functions}, "main", in_dtype_shapes,
out_dtype_shapes, state.get()));
}
constexpr int32_t kInit = 2;
{
auto in = AsTensor<int32_t>({kInit}, tensorflow::TensorShape({1}));
void* in_ptrs[1] = {in.data()};
void* out_ptrs[0];
ASSERT_OK(assign_callback->Call(in_ptrs, out_ptrs));
}
for (int i = 0; i < 3; ++i) {
auto in = AsTensor<int32_t>({1}, tensorflow::TensorShape({1}));
void* in_ptrs[1] = {in.data()};
auto out = AsTensor<int32_t>({0}, tensorflow::TensorShape({1}));
void* out_ptrs[1] = {out.data()};
ASSERT_OK(incr_callback->Call(in_ptrs, out_ptrs));
EXPECT_THAT(out, TensorEq(AsTensor<int32_t>({kInit + i + 1},
tensorflow::TensorShape({1}))));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/tf_host_callback.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/tf_host_callback_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
35f936ca-0998-4bd1-b503-610007d31953 | cpp | tensorflow/tensorflow | op_version | tensorflow/lite/tools/versioning/op_version.cc | tensorflow/lite/tools/versioning/op_version_test.cc | #include "tensorflow/lite/tools/versioning/op_version.h"
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/builtin_op_data.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/schema/schema_utils.h"
namespace tflite {
namespace {
bool NeedBroadcastForBinaryInputs(const OpSignature& op_sig) {
if (op_sig.inputs.size() < 2) {
return false;
}
return (op_sig.inputs.at(0).dims != op_sig.inputs.at(1).dims);
}
int GetInputMaxDims(const OpSignature& op_sig) {
int max_dims = 0;
for (auto& input : op_sig.inputs) {
if (input.dims.size() > max_dims) {
max_dims = input.dims.size();
}
}
return max_dims;
}
}
int GetBuiltinOperatorVersion(const OpSignature& op_sig) {
switch (op_sig.op) {
case BuiltinOperator_CONV_2D: {
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
auto conv_params =
reinterpret_cast<TfLiteConvParams*>(op_sig.builtin_data);
TFLITE_DCHECK(conv_params != nullptr);
if (conv_params->quantized_bias_type) {
return 8;
}
}
if (op_sig.ext_options.conv_2d.is_grouped_convolution) {
return 6;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt16 &&
op_sig.outputs.at(1).type == kTfLiteInt16) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
op_sig.inputs.at(1).type == kTfLiteInt4 &&
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 7;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
if (op_sig.ext_options.conv_2d.is_per_channel_quantized) {
return 5;
}
return 2;
}
return 1;
}
case BuiltinOperator_DEPTHWISE_CONV_2D: {
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt16 &&
op_sig.outputs.at(1).type == kTfLiteInt16) {
return 5;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
if (op_sig.ext_options.depthwise_conv_2d.is_per_channel_quantized) {
return 6;
}
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
op_sig.inputs.at(1).type == kTfLiteInt4 &&
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 7;
}
auto depthwise_conv_params =
reinterpret_cast<TfLiteDepthwiseConvParams*>(op_sig.builtin_data);
TFLITE_DCHECK(depthwise_conv_params != nullptr);
if (depthwise_conv_params->dilation_width_factor != 1 ||
depthwise_conv_params->dilation_height_factor != 1) {
return 2;
}
return 1;
}
case BuiltinOperator_EMBEDDING_LOOKUP: {
if (op_sig.inputs.at(1).type == kTfLiteInt4 ||
op_sig.ext_options.embedding_lookup.is_per_channel_quantized) {
return 4;
}
return 1;
}
case BuiltinOperator_FAKE_QUANT: {
auto fake_quant_params =
reinterpret_cast<TfLiteFakeQuantParams*>(op_sig.builtin_data);
TFLITE_DCHECK(fake_quant_params != nullptr);
if (fake_quant_params->narrow_range) {
return 2;
}
return 1;
}
case BuiltinOperator_FULLY_CONNECTED: {
auto fully_connected_params =
reinterpret_cast<TfLiteFullyConnectedParams*>(op_sig.builtin_data);
TFLITE_DCHECK(fully_connected_params != nullptr);
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt4 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 13;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32 &&
op_sig.ext_options.fully_connected.is_per_channel_quantized) {
return 12;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
if (fully_connected_params->quantized_bias_type) {
return 11;
}
}
if (op_sig.ext_options.fully_connected.sparse_weight) {
return 8;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 7;
}
if (op_sig.inputs.size() == 2) {
return 6;
}
if (fully_connected_params->keep_num_dims) {
return 5;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
op_sig.inputs.at(1).type == kTfLiteInt4 &&
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 10;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
if (fully_connected_params->asymmetric_quantize_inputs) {
return 9;
}
return 3;
}
if (fully_connected_params->weights_format ==
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8) {
return 2;
}
return 1;
}
case BuiltinOperator_GATHER: {
if (op_sig.inputs.at(0).type == kTfLiteInt4) {
return 7;
}
if (op_sig.inputs.at(1).type == kTfLiteInt16) {
return 6;
}
auto gather_params =
reinterpret_cast<TfLiteGatherParams*>(op_sig.builtin_data);
if (gather_params && gather_params->batch_dims != 0) {
return 5;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_SVDF: {
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
auto svdf_params =
reinterpret_cast<TfLiteSVDFParams*>(op_sig.builtin_data);
if (svdf_params && svdf_params->asymmetric_quantize_inputs) {
return 4;
}
return 2;
}
return 1;
}
case BuiltinOperator_SIGN:
if (op_sig.inputs.at(0).type == kTfLiteInt32) {
return 2;
}
return 1;
case BuiltinOperator_MUL:
if ((op_sig.inputs.at(0).type == kTfLiteInt16 &&
!op_sig.ext_options.mul.input_quantized) ||
op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 7;
}
if (op_sig.inputs.at(0).type == kTfLiteComplex64) {
return 6;
}
if (op_sig.inputs.at(0).type == kTfLiteInt64) {
return 5;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
if (op_sig.ext_options.mul.input1_scale != 0 &&
op_sig.ext_options.mul.input2_scale != 0 &&
op_sig.ext_options.mul.output_scale != 0 &&
(op_sig.ext_options.mul.input1_scale *
op_sig.ext_options.mul.input2_scale /
op_sig.ext_options.mul.output_scale) >= 1.0) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_MAX_POOL_2D:
case BuiltinOperator_AVERAGE_POOL_2D:
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_TRANSPOSE:
if (op_sig.inputs.at(0).dims.size() > 5) {
return 6;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 5;
}
if (op_sig.inputs.at(0).dims.size() > 4) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_TRANSPOSE_CONV: {
auto transpose_conv_params =
reinterpret_cast<TfLiteTransposeConvParams*>(op_sig.builtin_data);
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
TFLITE_DCHECK(transpose_conv_params != nullptr);
if (transpose_conv_params->quantized_bias_type) {
return 5;
}
}
if (transpose_conv_params != nullptr &&
transpose_conv_params->activation) {
return 4;
}
if (op_sig.inputs.size() == 4 &&
op_sig.inputs.at(3).type != kTfLiteNoType) {
return 3;
}
if (op_sig.inputs.at(1).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_LSTM: {
auto lstm_params =
reinterpret_cast<TfLiteLSTMParams*>(op_sig.builtin_data);
if (lstm_params->kernel_type == kTfLiteLSTMFullKernel &&
op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(2).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 5;
}
TFLITE_DCHECK(lstm_params != nullptr);
if (lstm_params->kernel_type == kTfLiteLSTMFullKernel &&
op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(2).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
if (lstm_params->asymmetric_quantize_inputs) {
return 4;
}
return 3;
}
if (lstm_params->kernel_type == kTfLiteLSTMBasicKernel) {
return 2;
}
return 1;
}
case BuiltinOperator_SPLIT:
if (op_sig.inputs.at(1).type == kTfLiteInt16) {
return 4;
}
if (op_sig.inputs.at(1).type == kTfLiteInt32) {
return 3;
}
if (op_sig.inputs.at(1).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_SPARSE_TO_DENSE:
if (op_sig.inputs.at(2).type == kTfLiteInt8 ||
op_sig.inputs.at(2).type == kTfLiteUInt8) {
return 3;
}
if (op_sig.inputs.at(2).type == kTfLiteInt64) {
return 2;
}
return 1;
case BuiltinOperator_SLICE:
if (op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 6;
}
if (op_sig.inputs.at(0).dims.size() > 4) {
return 5;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteString) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_UNPACK:
if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
op_sig.inputs.at(0).type == kTfLiteUInt8) {
return 2;
}
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 4;
}
return 1;
case BuiltinOperator_DEQUANTIZE:
if (op_sig.inputs.at(0).type == kTfLiteInt4) {
return 6;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 ||
op_sig.inputs.at(0).type == kTfLiteFloat16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
if (op_sig.ext_options.dequantize.is_per_channel_quantized) {
return 5;
}
return 2;
}
return 1;
case BuiltinOperator_QUANTIZE:
if (op_sig.inputs.at(0).type == kTfLiteInt4 ||
op_sig.outputs.at(0).type == kTfLiteInt4) {
return 4;
}
if (op_sig.ext_options.quantize.is_per_channel_quantized) {
return 3;
}
if (op_sig.outputs.at(0).type == kTfLiteInt16) {
return 2;
}
return 1;
case BuiltinOperator_FLOOR_DIV:
if (op_sig.inputs.at(0).type == kTfLiteInt16 ||
op_sig.inputs.at(0).type == kTfLiteInt8) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32) {
return 2;
}
return 1;
case BuiltinOperator_FLOOR_MOD:
if (op_sig.inputs.at(0).type == kTfLiteInt16 ||
op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_L2_NORMALIZATION:
if (op_sig.outputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_ABS:
if (op_sig.inputs.at(0).type == kTfLiteInt32) {
return 5;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return op_sig.ext_options.abs.input_quantized ? 3 : 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
op_sig.inputs.at(0).type == kTfLiteUInt8) {
return 2;
}
return 1;
case BuiltinOperator_RELU:
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
op_sig.inputs.at(0).type == kTfLiteUInt8) {
return 2;
}
return 1;
case BuiltinOperator_STRIDED_SLICE: {
auto strided_slice_params =
reinterpret_cast<TfLiteStridedSliceParams*>(op_sig.builtin_data);
TFLITE_DCHECK(strided_slice_params != nullptr);
if (strided_slice_params->offset == true) {
return 8;
}
if (op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 7;
}
if (strided_slice_params->ellipsis_mask != 0 ||
strided_slice_params->new_axis_mask != 0) {
return 6;
}
if (op_sig.inputs.at(0).type == kTfLiteString) {
return 5;
}
if (op_sig.ext_options.strided_slice.num_dims > 4) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_REVERSE_V2:
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 2;
}
return 1;
case BuiltinOperator_RESIZE_BILINEAR: {
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
auto resize_bilinear_params =
reinterpret_cast<TfLiteResizeBilinearParams*>(op_sig.builtin_data);
TFLITE_DCHECK(resize_bilinear_params != nullptr);
if (resize_bilinear_params->half_pixel_centers) {
return 3;
} else if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
auto resize_nearest_neighbor_params =
reinterpret_cast<TfLiteResizeNearestNeighborParams*>(
op_sig.builtin_data);
TFLITE_DCHECK(resize_nearest_neighbor_params != nullptr);
if (resize_nearest_neighbor_params->half_pixel_centers ||
resize_nearest_neighbor_params->align_corners) {
return 3;
} else if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_MAXIMUM:
case BuiltinOperator_MINIMUM:
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 4;
}
if (NeedBroadcastForBinaryInputs(op_sig) && GetInputMaxDims(op_sig) > 4) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_PACK:
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 4;
}
return 1;
case BuiltinOperator_TILE:
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteString) {
return 2;
}
return 1;
case BuiltinOperator_SQUEEZE:
if (op_sig.inputs.at(0).type == kTfLiteString) {
return 2;
}
return 1;
case BuiltinOperator_SPACE_TO_BATCH_ND:
case BuiltinOperator_BATCH_TO_SPACE_ND:
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
if (op_sig.inputs.at(0).dims.size() != 4) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_ADD: {
if (!op_sig.inputs.empty() && op_sig.inputs.at(0).type == kTfLiteInt16 &&
!op_sig.ext_options.add.input_quantized) {
return 5;
}
if (!op_sig.inputs.empty() && op_sig.inputs.at(0).type == kTfLiteInt64) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
auto add_params =
reinterpret_cast<TfLiteAddParams*>(op_sig.builtin_data);
if (add_params && !add_params->pot_scale_int16) {
return 3;
}
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_SUB: {
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
auto sub_params =
reinterpret_cast<TfLiteSubParams*>(op_sig.builtin_data);
if (sub_params && !sub_params->pot_scale_int16) {
return 5;
}
}
if (!op_sig.inputs.empty() && op_sig.inputs.at(0).type == kTfLiteInt64) {
return 4;
}
if (NeedBroadcastForBinaryInputs(op_sig) && GetInputMaxDims(op_sig) > 4) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_GATHER_ND:
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 5;
}
if (op_sig.inputs.at(1).type == kTfLiteInt16) {
return 4;
}
if (!op_sig.inputs.empty() &&
(op_sig.inputs.at(0).type == kTfLiteInt16)) {
return 3;
}
if (!op_sig.inputs.empty() && op_sig.inputs.at(0).type == kTfLiteString) {
return 2;
}
return 1;
case BuiltinOperator_DIV:
if (NeedBroadcastForBinaryInputs(op_sig) && GetInputMaxDims(op_sig) > 4) {
return 2;
}
return 1;
case BuiltinOperator_TANH:
case BuiltinOperator_LOGISTIC:
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_FILL:
if (op_sig.inputs.size() >= 2) {
if (op_sig.inputs.at(1).type == kTfLiteFloat16) return 4;
if (op_sig.inputs.at(1).type == kTfLiteInt8 ||
op_sig.inputs.at(1).type == kTfLiteInt16) {
return 3;
} else if ((op_sig.inputs.at(1).type == kTfLiteBool ||
op_sig.inputs.at(1).type == kTfLiteString)) {
return 2;
}
}
return 1;
case BuiltinOperator_EQUAL:
if (!op_sig.inputs.empty()) {
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteString) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
}
return 1;
case BuiltinOperator_NOT_EQUAL:
if (!op_sig.inputs.empty()) {
if (op_sig.inputs.at(0).type == kTfLiteString) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
}
return 1;
case BuiltinOperator_LEAKY_RELU:
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 2;
}
return 1;
case BuiltinOperator_RANGE:
if (op_sig.inputs.at(0).type == kTfLiteInt64) {
return 2;
}
return 1;
case BuiltinOperator_BATCH_MATMUL: {
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
auto batch_mat_mul_params =
reinterpret_cast<TfLiteBatchMatMulParams*>(op_sig.builtin_data);
if (batch_mat_mul_params &&
batch_mat_mul_params->asymmetric_quantize_inputs) {
return 4;
}
}
return 1;
}
case BuiltinOperator_PAD:
case BuiltinOperator_PADV2:
if (op_sig.inputs.at(0).dims.size() > 4) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_CONCATENATION:
if (op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_SOFTMAX:
case BuiltinOperator_MEAN:
case BuiltinOperator_MIRROR_PAD:
case BuiltinOperator_REDUCE_MAX:
case BuiltinOperator_REDUCE_MIN:
case BuiltinOperator_RELU6:
case BuiltinOperator_RSQRT:
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_RNN: {
if (op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
auto rnn_params =
reinterpret_cast<TfLiteRNNParams*>(op_sig.builtin_data);
if (rnn_params && rnn_params->asymmetric_quantize_inputs) {
return 3;
} else {
return 2;
}
}
return 1;
}
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: {
if (op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
auto sequence_rnn_params =
reinterpret_cast<TfLiteSequenceRNNParams*>(op_sig.builtin_data);
if (sequence_rnn_params &&
sequence_rnn_params->asymmetric_quantize_inputs) {
return 3;
} else {
return 2;
}
}
return 1;
}
case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN: {
if (op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
auto bidirectional_sequence_rnn_params =
reinterpret_cast<TfLiteBidirectionalSequenceRNNParams*>(
op_sig.builtin_data);
if (bidirectional_sequence_rnn_params &&
bidirectional_sequence_rnn_params->asymmetric_quantize_inputs) {
return 3;
} else {
return 2;
}
}
return 1;
}
case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: {
if (op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
auto bidirectional_sequence_lstm_params =
reinterpret_cast<TfLiteBidirectionalSequenceLSTMParams*>(
op_sig.builtin_data);
if (bidirectional_sequence_lstm_params &&
bidirectional_sequence_lstm_params->asymmetric_quantize_inputs) {
return 3;
} else {
return 2;
}
}
return 1;
}
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: {
auto unidirectional_sequence_lstm_params =
reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams*>(
op_sig.builtin_data);
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(2).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 5;
}
if (unidirectional_sequence_lstm_params &&
unidirectional_sequence_lstm_params->diagonal_recurrent_tensors) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(2).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
if (unidirectional_sequence_lstm_params &&
unidirectional_sequence_lstm_params->asymmetric_quantize_inputs) {
return 3;
}
return 2;
}
return 1;
}
case BuiltinOperator_ARG_MAX:
case BuiltinOperator_ARG_MIN:
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_SELECT: {
if (op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 4;
}
if (op_sig.inputs.at(0).dims.size() == 5 ||
op_sig.inputs.at(1).dims.size() == 5 ||
op_sig.inputs.at(2).dims.size() == 5)
return 3;
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_LESS:
case BuiltinOperator_GREATER_EQUAL: {
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_SELECT_V2: {
if (op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 2;
}
return 1;
}
case BuiltinOperator_SPACE_TO_DEPTH:
case BuiltinOperator_SPLIT_V:
case BuiltinOperator_SUM:
case BuiltinOperator_LOG_SOFTMAX:
case BuiltinOperator_GREATER:
case BuiltinOperator_LESS_EQUAL:
case BuiltinOperator_SQUARED_DIFFERENCE:
case BuiltinOperator_DEPTH_TO_SPACE:
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_TOPK_V2:
if (op_sig.inputs.at(0).type == kTfLiteInt16 ||
op_sig.inputs.at(1).type == kTfLiteInt16 ||
op_sig.outputs.at(1).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_EXP:
case BuiltinOperator_LOG:
case BuiltinOperator_REDUCE_PROD:
if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
op_sig.inputs.at(0).type == kTfLiteInt16) {
return 2;
}
return 1;
case BuiltinOperator_DYNAMIC_UPDATE_SLICE:
if (op_sig.inputs.at(2).type == kTfLiteInt64) return 2;
return 1;
case BuiltinOperator_BROADCAST_TO:
if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
return 2;
case BuiltinOperator_CAST:
if (op_sig.inputs.at(0).type == kTfLiteBFloat16 ||
op_sig.outputs.at(0).type == kTfLiteBFloat16) {
return 7;
} else if (op_sig.inputs.at(0).type == kTfLiteInt4 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
return 6;
} else if (op_sig.inputs.at(0).type == kTfLiteFloat64 ||
op_sig.outputs.at(0).type == kTfLiteFloat64 ||
op_sig.inputs.at(0).type == kTfLiteFloat16 ||
op_sig.outputs.at(0).type == kTfLiteFloat16) {
return 5;
} else if (op_sig.inputs.at(0).type == kTfLiteUInt16 ||
op_sig.outputs.at(0).type == kTfLiteUInt16) {
return 4;
} else if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 3;
} else if (op_sig.inputs.at(0).type == kTfLiteUInt32 ||
op_sig.outputs.at(0).type == kTfLiteUInt32) {
return 2;
}
return 1;
case BuiltinOperator_WHERE:
if (op_sig.inputs.at(0).type == kTfLiteBool) return 1;
return 2;
case BuiltinOperator_GELU:
if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
op_sig.inputs.at(0).type == kTfLiteUInt8) {
return 2;
}
return 1;
default:
return 1;
}
}
void UpdateOpVersion(uint8_t* model_buffer_pointer) {
auto model = GetMutableModel(model_buffer_pointer);
auto subgraphs = model->subgraphs();
for (int i = 0; i < subgraphs->Length(); ++i) {
const SubGraph* subgraph = subgraphs->Get(i);
for (int j = 0; j < subgraph->operators()->Length(); ++j) {
const Operator* op = subgraph->operators()->Get(j);
OperatorCode* op_code =
model->mutable_operator_codes()->GetMutableObject(op->opcode_index());
auto builtin_code = GetBuiltinCode(op_code);
if (builtin_code != BuiltinOperator_CUSTOM) {
OpSignature op_sig = GetOpSignature(op_code, op, subgraph, model);
int32_t op_ver = GetBuiltinOperatorVersion(op_sig);
if (op_sig.builtin_data) {
free(op_sig.builtin_data);
}
if (op_ver <= op_code->version()) {
continue;
}
if (!op_code->mutate_version(op_ver)) {
LOG(ERROR) << "Can't set operator "
<< EnumNameBuiltinOperator(builtin_code) << " to version "
<< op_ver;
}
}
}
}
}
} | #include "tensorflow/lite/tools/versioning/op_version.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_op_data.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
std::vector<OpSignatureTensorSpec> CreateOpSignatureTensorSpecs(
const std::vector<TfLiteType>& types) {
std::vector<OpSignatureTensorSpec> tensor_specs;
for (auto type : types) {
OpSignatureTensorSpec tensor_spec = {};
tensor_spec.type = type;
tensor_specs.push_back(tensor_spec);
}
return tensor_specs;
}
std::vector<OpSignatureTensorSpec> CreateOpSignatureTensorSpecs(
const std::vector<TfLiteType>& types, int rank) {
std::vector<OpSignatureTensorSpec> tensor_specs;
for (auto type : types) {
OpSignatureTensorSpec tensor_spec = {};
tensor_spec.type = type;
for (int i = 0; i < rank; i++) {
tensor_spec.dims.push_back(4);
}
tensor_specs.push_back(tensor_spec);
}
return tensor_specs;
}
std::vector<OpSignatureTensorSpec> CreateOpSignatureTensorSpecs(
const TfLiteType type) {
std::vector<OpSignatureTensorSpec> tensor_specs;
OpSignatureTensorSpec tensor_spec = {};
tensor_spec.type = type;
tensor_specs.push_back(tensor_spec);
return tensor_specs;
}
std::vector<OpSignatureTensorSpec> CreateOpSignatureTensorSpecs(
const TfLiteType type, const int dim) {
std::vector<OpSignatureTensorSpec> tensor_specs;
OpSignatureTensorSpec tensor_spec = {};
tensor_spec.type = type;
for (int i = 0; i < dim; i++) {
tensor_spec.dims.push_back(4);
}
tensor_specs.push_back(tensor_spec);
return tensor_specs;
}
std::vector<OpSignatureTensorSpec> CreateOpSignatureTensorSpecs(
const TfLiteType type, const int dim1, const int dim2) {
std::vector<OpSignatureTensorSpec> tensor_specs;
OpSignatureTensorSpec tensor_spec1 = {};
tensor_spec1.type = type;
for (int i = 0; i < dim1; i++) {
tensor_spec1.dims.push_back(4);
}
tensor_specs.push_back(tensor_spec1);
OpSignatureTensorSpec tensor_spec2 = {};
tensor_spec2.type = type;
for (int i = 0; i < dim2; i++) {
tensor_spec2.dims.push_back(4);
}
tensor_specs.push_back(tensor_spec2);
return tensor_specs;
}
}
TEST(OpVersionTest, VersioningSpareToDense) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_SPARSE_TO_DENSE,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8, kTfLiteInt8}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_SPARSE_TO_DENSE,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteUInt8, kTfLiteUInt8, kTfLiteUInt8}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_SPARSE_TO_DENSE,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt64, kTfLiteInt64, kTfLiteInt64}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_SPARSE_TO_DENSE,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteInt32, kTfLiteInt32}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
void SimpleVersioningTest(BuiltinOperator op) {
OpSignature fake_op_sig = {
.op = op,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = op,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
void SimpleVersioningTestExtended(BuiltinOperator op) {
OpSignature fake_op_sig = {
.op = op,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
SimpleVersioningTest(op);
}
void SimpleOutputVersioningTest(BuiltinOperator op) {
OpSignature fake_op_sig = {
.op = op,
.inputs = std::vector<OpSignatureTensorSpec>{},
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = op,
.inputs = std::vector<OpSignatureTensorSpec>{},
.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningEqualTest) {
SimpleVersioningTest(BuiltinOperator_EQUAL);
OpSignature fake_op_sig = {
.op = BuiltinOperator_EQUAL,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteString),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
}
TEST(OpVersionTest, VersioningNotEqualTest) {
SimpleVersioningTest(BuiltinOperator_NOT_EQUAL);
OpSignature fake_op_sig = {
.op = BuiltinOperator_NOT_EQUAL,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteString),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
}
TEST(OpVersionTest, VersioningLessTest) {
SimpleVersioningTest(BuiltinOperator_LESS);
}
TEST(OpVersionTest, VersioningLessEqualTest) {
SimpleVersioningTest(BuiltinOperator_LESS_EQUAL);
}
TEST(OpVersionTest, VersioningGreaterTest) {
SimpleVersioningTest(BuiltinOperator_GREATER);
}
TEST(OpVersionTest, VersioningGreaterEqualTest) {
SimpleVersioningTest(BuiltinOperator_GREATER_EQUAL);
}
TEST(OpVersionTest, VersioningSpaceToBatchNDTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_SPACE_TO_BATCH_ND,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16, 3);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 3);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 3);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningLogSoftmaxTest) {
SimpleVersioningTest(BuiltinOperator_LOG_SOFTMAX);
}
TEST(OpVersionTest, VersioningPackTest) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_PACK;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_PACK;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_PACK;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_PACK;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningUnpackTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_UNPACK,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_UNPACK,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_UNPACK,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningRangeTest) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_RANGE;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt64);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningReluTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_RELU,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_RELU,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_RELU,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_RELU,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningBatchToSpaceNDTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_BATCH_TO_SPACE_ND,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16, 3);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 3);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 3);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningTanhTest) {
SimpleVersioningTest(BuiltinOperator_TANH);
}
TEST(OpVersionTest, VersioningStridedSliceTest) {
TfLiteStridedSliceParams strided_slice_params = {};
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_STRIDED_SLICE;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
fake_op_sig.builtin_data = reinterpret_cast<void*>(&strided_slice_params);
strided_slice_params.ellipsis_mask = 0;
strided_slice_params.new_axis_mask = 2;
fake_op_sig.ext_options.strided_slice.num_dims = 5;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
strided_slice_params.new_axis_mask = 0;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.ext_options.strided_slice.num_dims = 4;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 7);
strided_slice_params.offset = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 8);
}
TEST(OpVersionTest, VersioningSpaceToDepthTest) {
SimpleVersioningTest(BuiltinOperator_SPACE_TO_DEPTH);
}
TEST(OpVersionTest, VersioningSliceTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_SLICE,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
fake_op_sig = {
.op = BuiltinOperator_SLICE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig = {
.op = BuiltinOperator_SLICE,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteString, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_SLICE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_SLICE,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SLICE;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
}
TEST(OpVersionTest, VersioningLogisticTest) {
SimpleVersioningTest(BuiltinOperator_SPACE_TO_DEPTH);
}
TEST(OpVersionTest, VersioningL2NormTest) {
SimpleOutputVersioningTest(BuiltinOperator_L2_NORMALIZATION);
}
TEST(OpVersionTest, VersioningMaxTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_MAXIMUM,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 5, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_MAXIMUM,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningMinTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_MINIMUM,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 5, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_MINIMUM,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningMeanTest) {
SimpleVersioningTestExtended(BuiltinOperator_MEAN);
}
TEST(OpVersionTest, VersioningSumTest) {
SimpleVersioningTest(BuiltinOperator_SUM);
}
TEST(OpVersionTest, VersioningReduceMinTest) {
SimpleVersioningTestExtended(BuiltinOperator_REDUCE_MIN);
}
TEST(OpVersionTest, VersioningReduceMaxTest) {
SimpleVersioningTestExtended(BuiltinOperator_REDUCE_MAX);
}
TEST(OpVersionTest, VersioningMirrorPadTest) {
SimpleVersioningTestExtended(BuiltinOperator_MIRROR_PAD);
}
TEST(OpVersionTest, VersioningReduceProdTest) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_REDUCE_PROD;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningAddTest) {
TfLiteAddParams add_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_ADD,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&add_params)};
add_params.pot_scale_int16 = false;
fake_op_sig.ext_options.add.input_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.ext_options.add.input_quantized = false;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
SimpleVersioningTest(BuiltinOperator_ADD);
}
TEST(OpVersionTest, VersioningSubTest) {
TfLiteSubParams sub_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_SUB,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&sub_params)};
sub_params.pot_scale_int16 = false;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt64);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4, 5);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
SimpleVersioningTest(BuiltinOperator_SUB);
}
TEST(OpVersionTest, VersioningMUL7TestInt16) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_MUL;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
fake_op_sig.ext_options.mul.input_quantized = false;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 7);
}
TEST(OpVersionTest, VersioningMUL7TestUInt32) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_MUL;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 7);
}
TEST(OpVersionTest, VersioningMUL6Test) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_MUL;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteComplex64);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
}
TEST(OpVersionTest, VersioningMUL5Test) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_MUL;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt64);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
}
TEST(OpVersionTest, VersioningSub4Test) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_SUB,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt64),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
}
void SimpleMulVersioningTest(TfLiteType data_type, float multiplier,
int version) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_MUL,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{data_type, data_type}),
.outputs = CreateOpSignatureTensorSpecs(data_type),
};
fake_op_sig.ext_options.mul = {1.0f, 1.0f, 1.0f / multiplier};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), version);
}
TEST(OpVersionTest, VersioningMulTest) {
SimpleMulVersioningTest(kTfLiteUInt8, 0.5f, 1);
SimpleMulVersioningTest(kTfLiteInt8, 0.5f, 2);
SimpleMulVersioningTest(kTfLiteInt8, 2.0f, 3);
}
TEST(OpVersionTest, VersioningPadTest) {
SimpleVersioningTest(BuiltinOperator_PAD);
}
TEST(OpVersionTest, VersioningPadV2Test) {
SimpleVersioningTest(BuiltinOperator_PADV2);
}
TEST(OpVersionTest, VersioningConcatenationTest) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_CONCATENATION;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
}
TEST(OpVersionTest, VersioningSelectTest) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SELECT;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteUInt32, kTfLiteUInt32, kTfLiteUInt32}, 5);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SELECT;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteUInt8, kTfLiteUInt8, kTfLiteUInt8}, 5);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SELECT;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8, kTfLiteInt8}, 4);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SELECT;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32, kTfLiteFloat32},
4);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningSelectV2Test) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SELECT_V2;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteUInt32, kTfLiteUInt32, kTfLiteUInt32}, 5);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SELECT_V2;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteInt32, kTfLiteInt32}, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningRelu6Test) {
SimpleVersioningTestExtended(BuiltinOperator_RELU6);
}
TEST(OpVersionTest, VersioningFullyConnectedTest) {
TfLiteFullyConnectedParams fully_connected_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_FULLY_CONNECTED,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteUInt8, kTfLiteUInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
.builtin_data = reinterpret_cast<void*>(&fully_connected_params),
};
fully_connected_params.weights_format =
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
fake_op_sig = {
.op = BuiltinOperator_FULLY_CONNECTED,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.builtin_data = reinterpret_cast<void*>(&fully_connected_params),
};
fully_connected_params.weights_format =
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
fake_op_sig = {
.op = BuiltinOperator_FULLY_CONNECTED,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.builtin_data = reinterpret_cast<void*>(&fully_connected_params),
};
fully_connected_params.weights_format =
kTfLiteFullyConnectedWeightsFormatDefault;
fake_op_sig.ext_options.fully_connected.sparse_weight = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 8);
fake_op_sig = {
.op = BuiltinOperator_FULLY_CONNECTED,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8, kTfLiteFloat32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&fully_connected_params),
};
fully_connected_params.asymmetric_quantize_inputs = false;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fully_connected_params.asymmetric_quantize_inputs = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 9);
fake_op_sig = {
.op = BuiltinOperator_FULLY_CONNECTED,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt16, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&fully_connected_params),
};
fully_connected_params.quantized_bias_type = kTfLiteInt32;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 11);
fake_op_sig = {
.op = BuiltinOperator_FULLY_CONNECTED,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&fully_connected_params),
};
fake_op_sig.ext_options.fully_connected.is_per_channel_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 12);
}
TEST(OpVersionTest, VersioningDequantizeTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_DEQUANTIZE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_DEQUANTIZE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_DEQUANTIZE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.ext_options.dequantize.is_per_channel_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
fake_op_sig = {
.op = BuiltinOperator_DEQUANTIZE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningQuantizeTest) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_QUANTIZE;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
fake_op_sig.ext_options.quantize.is_per_channel_quantized = false;
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.ext_options.quantize.is_per_channel_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
}
TEST(OpVersionTest, VersioningConv2DTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteUInt8, kTfLiteUInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
fake_op_sig.ext_options.conv_2d.is_per_channel_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
fake_op_sig.op = BuiltinOperator_CONV_2D;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8});
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
fake_op_sig.ext_options.conv_2d.is_grouped_convolution = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
TfLiteConvParams conv_params = {};
fake_op_sig = {
.op = BuiltinOperator_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt16, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&conv_params),
};
conv_params.quantized_bias_type = kTfLiteInt32;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 8);
}
TEST(OpVersionTest, VersioningFloorDivOperatorTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_FLOOR_DIV,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_FLOOR_DIV,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_FLOOR_DIV,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
}
TEST(OpVersionTest, VersioningFloorModOperatorTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_FLOOR_MOD,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_FLOOR_MOD,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
}
TEST(OpVersionTest, VersioningTransposeConvOperatorTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE_CONV,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteUInt8}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE_CONV,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteInt8, kTfLiteInt8}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE_CONV,
.inputs = CreateOpSignatureTensorSpecs(std::vector<TfLiteType>{
kTfLiteInt32, kTfLiteInt8, kTfLiteInt8, kTfLiteInt32}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
const auto none_type = kTfLiteNoType;
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE_CONV,
.inputs = CreateOpSignatureTensorSpecs(std::vector<TfLiteType>{
kTfLiteInt32, kTfLiteInt8, kTfLiteInt8, none_type}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
TfLiteTransposeConvParams transpose_conv_params = {};
transpose_conv_params.activation = kTfLiteActRelu;
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE_CONV,
.inputs = CreateOpSignatureTensorSpecs(std::vector<TfLiteType>{
kTfLiteInt32, kTfLiteInt8, kTfLiteInt8, none_type}),
.builtin_data = reinterpret_cast<void*>(&transpose_conv_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
transpose_conv_params = {};
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE_CONV,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt16, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&transpose_conv_params),
};
transpose_conv_params.quantized_bias_type = kTfLiteInt32;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
}
TEST(OpVersionTest, VersioningSVDFOperatorTest) {
TfLiteSVDFParams svdf_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_SVDF,
.inputs = CreateOpSignatureTensorSpecs(std::vector<TfLiteType>{
kTfLiteFloat32, kTfLiteFloat32, kTfLiteFloat32, kTfLiteFloat32,
kTfLiteFloat32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&svdf_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_SVDF,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8, kTfLiteFloat32,
kTfLiteFloat32, kTfLiteFloat32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&svdf_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
svdf_params.asymmetric_quantize_inputs = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
svdf_params = {};
fake_op_sig = {
.op = BuiltinOperator_SVDF,
.inputs = CreateOpSignatureTensorSpecs(std::vector<TfLiteType>{
kTfLiteInt8, kTfLiteInt8, kTfLiteInt32, kTfLiteInt32, kTfLiteInt16}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.builtin_data = reinterpret_cast<void*>(&svdf_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
}
TEST(OpVersionTest, VersioningDepthwiseConv2DTest) {
TfLiteDepthwiseConvParams depthwise_conv_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_DEPTHWISE_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&depthwise_conv_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.ext_options.depthwise_conv_2d.is_per_channel_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
depthwise_conv_params = {};
fake_op_sig = {
.op = BuiltinOperator_DEPTHWISE_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.builtin_data = reinterpret_cast<void*>(&depthwise_conv_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_DEPTHWISE_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&depthwise_conv_params),
};
depthwise_conv_params.dilation_width_factor = 2;
depthwise_conv_params.dilation_height_factor = 2;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_DEPTHWISE_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&depthwise_conv_params),
};
depthwise_conv_params.dilation_width_factor = 1;
depthwise_conv_params.dilation_height_factor = 1;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningTileOperatorTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_TILE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_TILE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteString),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
}
TEST(OpVersionTest, VersioningTransposeTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteBool, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteBool, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningGatherNdOperatorTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_GATHER_ND,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteInt32}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_GATHER_ND,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteString, kTfLiteInt32}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_GATHER_ND,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt16, kTfLiteInt32}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_GATHER_ND,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteInt16}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig = {
.op = BuiltinOperator_GATHER_ND,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteBool, kTfLiteInt16}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
}
TEST(OpVersionTest, VersioningDivTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_DIV,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 5, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 5, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTEst, VersioningFillTest) {
OpSignature fake_op_sig = {BuiltinOperator_FILL};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteFloat16});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt64, kTfLiteFloat16});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteInt8});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt64, kTfLiteInt16});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteBool});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteString});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteInt32});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningResizeBilinearTest) {
TfLiteResizeBilinearParams resize_bilinear_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_RESIZE_BILINEAR,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&resize_bilinear_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
resize_bilinear_params.align_corners = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
resize_bilinear_params.align_corners = false;
resize_bilinear_params.half_pixel_centers = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
resize_bilinear_params = {};
fake_op_sig = {
.op = BuiltinOperator_RESIZE_BILINEAR,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.builtin_data = reinterpret_cast<void*>(&resize_bilinear_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
resize_bilinear_params.half_pixel_centers = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
resize_bilinear_params = {};
fake_op_sig = {
.op = BuiltinOperator_RESIZE_BILINEAR,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt16, kTfLiteInt32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&resize_bilinear_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
}
TEST(OpVersionTest, VersioningResizeNearestNeighborTest) {
TfLiteResizeNearestNeighborParams resize_nearest_neighbor_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&resize_nearest_neighbor_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
resize_nearest_neighbor_params.align_corners = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
resize_nearest_neighbor_params.align_corners = false;
resize_nearest_neighbor_params.half_pixel_centers = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
resize_nearest_neighbor_params = {};
fake_op_sig = {
.op = BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.builtin_data = reinterpret_cast<void*>(&resize_nearest_neighbor_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
resize_nearest_neighbor_params.align_corners = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
resize_nearest_neighbor_params = {};
fake_op_sig = {
.op = BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt16, kTfLiteInt32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&resize_nearest_neighbor_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
}
TEST(OpVersionTest, VersioningAbsTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_ABS,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_ABS,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_ABS,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
fake_op_sig.ext_options.abs.input_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_ABS,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_ABS;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
}
TEST(OpVersionTest, VersioningSignTest) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_SIGN;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SIGN;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
}
TEST(OpVersionTest, VersioningBatchMatMulTest) {
TfLiteBatchMatMulParams batch_mat_mul_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_BATCH_MATMUL,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&batch_mat_mul_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
batch_mat_mul_params = {};
fake_op_sig = {
.op = BuiltinOperator_BATCH_MATMUL,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.builtin_data = reinterpret_cast<void*>(&batch_mat_mul_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_BATCH_MATMUL,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt16, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&batch_mat_mul_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_BATCH_MATMUL,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&batch_mat_mul_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_BATCH_MATMUL,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&batch_mat_mul_params),
};
batch_mat_mul_params.asymmetric_quantize_inputs = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
}
TEST(OpVersionTest, VersioningSquaredDifferenceTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_SQUARED_DIFFERENCE,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_SQUARED_DIFFERENCE,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
}
TEST(OpVersionTest, VersioningRsqrtTest) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_RSQRT;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
}
TEST(OpVersionTest, VersioningBroadcastToTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_BROADCAST_TO,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_BROADCAST_TO,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_BROADCAST_TO,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
}
TEST(OpVersionTest, VersioningGeluTest) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_GELU;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.op = BuiltinOperator_GELU;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.op = BuiltinOperator_GELU;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
}
TEST(OpVersionTest, VersioningUnidirectionalLstmTest) {
TfLiteUnidirectionalSequenceLSTMParams params = {};
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32, kTfLiteFloat32});
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
fake_op_sig.builtin_data = reinterpret_cast<void*>(¶ms);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32, kTfLiteInt8});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
params.asymmetric_quantize_inputs = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
params.diagonal_recurrent_tensors = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
}
TEST(OpVersionTest, VersioningExpTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_EXP,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_EXP,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_EXP,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
}
TEST(OpVersionTest, VersioningLogTest) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_LOG;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
}
TEST(OpVersionTest, VersioningDynamicUpdateSliceTest) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_DYNAMIC_UPDATE_SLICE;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32, kTfLiteInt32});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32, kTfLiteInt64});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/versioning/op_version.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/versioning/op_version_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
29ba1bee-a6f2-402f-9763-c48ea2b32abd | cpp | google/arolla | casting_registry | arolla/expr/operators/casting_registry.cc | arolla/expr/operators/casting_registry_test.cc | #include "arolla/expr/operators/casting_registry.h"
#include <cstdint>
#include <memory>
#include <optional>
#include "absl/base/no_destructor.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/derived_qtype_cast_operator.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/standard_type_properties/common_qtype.h"
#include "arolla/qtype/standard_type_properties/properties.h"
#include "arolla/qtype/weak_qtype.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr_operators {
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::RegisteredOperator;
CastingRegistry* CastingRegistry::GetInstance() {
static absl::NoDestructor<CastingRegistry> instance(PrivateConstructorTag{});
return instance.get();
}
CastingRegistry::CastingRegistry(PrivateConstructorTag) {
cast_to_ops_ = {
{GetQType<bool>(), std::make_shared<RegisteredOperator>("core.to_bool")},
{GetQType<int32_t>(),
std::make_shared<RegisteredOperator>("core.to_int32")},
{GetQType<int64_t>(),
std::make_shared<RegisteredOperator>("core.to_int64")},
{GetQType<float>(),
std::make_shared<RegisteredOperator>("core.to_float32")},
{GetQType<double>(),
std::make_shared<RegisteredOperator>("core.to_float64")},
{GetWeakFloatQType(),
std::make_shared<RegisteredOperator>("core._to_weak_float")},
{GetQType<uint64_t>(),
std::make_shared<RegisteredOperator>("core.to_uint64")},
};
}
absl::StatusOr<ExprNodePtr> CastingRegistry::GetCast(
ExprNodePtr node, QTypePtr to_qtype, bool implicit_only,
std::optional<ExprNodePtr> shape_for_broadcasting) const {
const QType* from_qtype = node->qtype();
if (from_qtype == nullptr) {
return absl::FailedPreconditionError(absl::StrFormat(
"cannot cast expression %s with unknown QType", GetDebugSnippet(node)));
}
if (from_qtype == to_qtype) {
return node;
}
if (implicit_only &&
!CanCastImplicitly(
from_qtype, to_qtype,
shape_for_broadcasting.has_value())) {
return absl::InvalidArgumentError(
absl::StrFormat("implicit casting from %s to %s is not allowed",
from_qtype->name(), to_qtype->name()));
}
ASSIGN_OR_RETURN(auto from_scalar_qtype, GetScalarQType(from_qtype));
ASSIGN_OR_RETURN(auto to_scalar_qtype, GetScalarQType(to_qtype));
if (from_scalar_qtype == GetWeakFloatQType() &&
from_scalar_qtype != to_scalar_qtype) {
const auto upcast_op =
std::make_shared<expr::DerivedQTypeUpcastOperator>(node->qtype());
ASSIGN_OR_RETURN(node, CallOp(upcast_op, {node}));
from_scalar_qtype = GetQType<double>();
}
if (from_scalar_qtype != to_scalar_qtype) {
if (!cast_to_ops_.contains(to_scalar_qtype)) {
return absl::InvalidArgumentError(
absl::StrFormat("unable to find a cast from %s to %s",
from_qtype->name(), to_qtype->name()));
}
ASSIGN_OR_RETURN(node, CallOp(cast_to_ops_.at(to_scalar_qtype), {node}));
if (node->qtype() == to_qtype) {
return node;
}
}
if (!IsArrayLikeQType(node->qtype()) && IsArrayLikeQType(to_qtype)) {
if (!shape_for_broadcasting.has_value()) {
return absl::InvalidArgumentError(
absl::StrFormat("unable to cast non-array type %s into an array type "
"%s without shape for broadcasting provided",
from_qtype->name(), to_qtype->name()));
}
ASSIGN_OR_RETURN(
node, CallOp("core.const_with_shape", {*shape_for_broadcasting, node}));
if (node->qtype() == to_qtype) {
return node;
}
}
if (!IsOptionalQType(node->qtype()) && IsOptionalQType(to_qtype)) {
ASSIGN_OR_RETURN(node, CallOp("core.to_optional", {node}));
}
if (node->qtype() == to_qtype) {
return node;
} else {
return absl::InvalidArgumentError(
absl::StrFormat("unable to find a cast from %s to %s",
from_qtype->name(), to_qtype->name()));
}
}
absl::StatusOr<QTypePtr> CastingRegistry::CommonType(
absl::Span<const QTypePtr> arg_types, bool enable_broadcasting) const {
if (arg_types.empty()) {
return absl::InvalidArgumentError(
"empty arg_types list passed to CommonType");
}
const QType* result_qtype = CommonQType(arg_types, enable_broadcasting);
if (result_qtype == nullptr) {
if (enable_broadcasting || !CommonType(arg_types, true).ok()) {
return absl::InvalidArgumentError(
absl::StrCat("no common QType for ", FormatTypeVector(arg_types)));
} else {
return absl::InvalidArgumentError(
absl::StrCat("no common QType without broadcasting for ",
FormatTypeVector(arg_types)));
}
}
return result_qtype;
}
} | #include "arolla/expr/operators/casting_registry.h"
#include <cstdint>
#include <memory>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/derived_qtype_cast_operator.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/operators/bootstrap_operators.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/weak_qtype.h"
#include "arolla/util/bytes.h"
namespace arolla::expr_operators {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::expr::CallOp;
using ::arolla::expr::Leaf;
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::WithQTypeAnnotation;
using ::testing::HasSubstr;
TEST(CastingRegistryTest, CommonType) {
const CastingRegistry* reg = CastingRegistry::GetInstance();
EXPECT_THAT(reg->CommonType({GetQType<int32_t>(), GetQType<int32_t>()}),
IsOkAndHolds(GetQType<int32_t>()));
EXPECT_THAT(reg->CommonType({GetQType<uint64_t>(), GetQType<uint64_t>()}),
IsOkAndHolds(GetQType<uint64_t>()));
EXPECT_THAT(reg->CommonType({GetQType<int32_t>(), GetQType<int64_t>()}),
IsOkAndHolds(GetQType<int64_t>()));
EXPECT_THAT(
reg->CommonType({GetQType<int32_t>(), GetOptionalQType<int32_t>()}),
IsOkAndHolds(GetOptionalQType<int32_t>()));
EXPECT_THAT(
reg->CommonType({GetQType<uint64_t>(), GetOptionalQType<uint64_t>()}),
IsOkAndHolds(GetOptionalQType<uint64_t>()));
EXPECT_THAT(
reg->CommonType({GetQType<int32_t>(), GetOptionalQType<int64_t>()}),
IsOkAndHolds(GetOptionalQType<int64_t>()));
EXPECT_THAT(reg->CommonType({GetQType<int32_t>(), GetQType<Bytes>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("no common QType for (INT32,BYTES)")));
EXPECT_THAT(reg->CommonType({GetQType<int32_t>(), GetQType<uint64_t>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("no common QType for (INT32,UINT64)")));
EXPECT_THAT(reg->CommonType({GetQType<int32_t>(), GetQType<int64_t>()}),
IsOkAndHolds(GetQType<int64_t>()));
EXPECT_THAT(
reg->CommonType({GetQType<int32_t>(), GetQType<Bytes>()}).status(),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
reg->CommonType({GetOptionalQType<int32_t>(), GetQType<int64_t>()}),
IsOkAndHolds(GetOptionalQType<int64_t>()));
}
TEST(CastingRegistryTest, GetCast) {
const CastingRegistry* reg = CastingRegistry::GetInstance();
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<int32_t>()));
EXPECT_THAT(reg->GetCast(x, GetOptionalQType<int64_t>(),
true),
IsOkAndHolds(EqualsExpr(
CallOp("core.to_optional", {CallOp("core.to_int64", {x})}))));
}
TEST(CastingRegistryTest, GetCastWithBroadcasting) {
const CastingRegistry* reg = CastingRegistry::GetInstance();
GetDenseArrayQType<int64_t>();
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<int32_t>()));
ASSERT_OK_AND_ASSIGN(
auto shape,
WithQTypeAnnotation(Leaf("shape"), GetQType<DenseArrayShape>()));
EXPECT_THAT(
reg->GetCast(x, GetDenseArrayQType<int64_t>(),
true, shape),
IsOkAndHolds(EqualsExpr(CallOp("core.const_with_shape",
{shape, CallOp("core.to_int64", {x})}))));
}
TEST(CastingRegistryTest, GetCastFromWeakType) {
const CastingRegistry* reg = CastingRegistry::GetInstance();
expr::ExprOperatorPtr upcast_op =
std::make_shared<expr::DerivedQTypeUpcastOperator>(GetWeakFloatQType());
{
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetWeakFloatQType()));
EXPECT_THAT(reg->GetCast(x, GetOptionalQType<double>(),
true),
IsOkAndHolds(EqualsExpr(
CallOp("core.to_optional", {CallOp(upcast_op, {x})}))));
}
{
expr::ExprOperatorPtr opt_upcast_op =
std::make_shared<expr::DerivedQTypeUpcastOperator>(
GetOptionalWeakFloatQType());
ASSERT_OK_AND_ASSIGN(
auto x, WithQTypeAnnotation(Leaf("x"), GetOptionalWeakFloatQType()));
EXPECT_THAT(reg->GetCast(x, GetOptionalQType<double>(),
true),
IsOkAndHolds(EqualsExpr(CallOp(opt_upcast_op, {x}))));
}
{
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetWeakFloatQType()));
EXPECT_THAT(reg->GetCast(x, GetOptionalWeakFloatQType(),
true),
IsOkAndHolds(EqualsExpr(CallOp("core.to_optional", {x}))));
}
{
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetWeakFloatQType()));
ASSERT_OK_AND_ASSIGN(
auto shape,
WithQTypeAnnotation(Leaf("shape"), GetQType<DenseArrayShape>()));
GetDenseArrayQType<float>();
EXPECT_THAT(
reg->GetCast(x, GetDenseArrayQType<float>(),
true, shape),
IsOkAndHolds(EqualsExpr(CallOp(
"core.const_with_shape",
{shape, CallOp("core.to_float32", {CallOp(upcast_op, {x})})}))));
}
}
TEST(CastingRegistryTest, GetCastToWeakType) {
const CastingRegistry* reg = CastingRegistry::GetInstance();
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
{
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
EXPECT_THAT(reg->GetCast(x, GetWeakFloatQType(),
false),
IsOkAndHolds(EqualsExpr(CoreToWeakFloat(x))));
}
{
ASSERT_OK_AND_ASSIGN(
auto x, WithQTypeAnnotation(Leaf("x"), GetOptionalQType<float>()));
EXPECT_THAT(reg->GetCast(x, GetOptionalWeakFloatQType(),
false),
IsOkAndHolds(EqualsExpr(CoreToWeakFloat(x))));
}
{
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
EXPECT_THAT(reg->GetCast(x, GetOptionalWeakFloatQType(),
false),
IsOkAndHolds(EqualsExpr(
CallOp("core.to_optional", {CoreToWeakFloat(x)}))));
}
{
GetDenseArrayQType<float>();
GetDenseArrayWeakFloatQType();
ASSERT_OK_AND_ASSIGN(
auto x, WithQTypeAnnotation(Leaf("x"), GetDenseArrayQType<float>()));
EXPECT_THAT(reg->GetCast(x, GetDenseArrayWeakFloatQType(),
false),
IsOkAndHolds(EqualsExpr(CoreToWeakFloat(x))));
}
{
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
EXPECT_THAT(
reg->GetCast(x, GetWeakFloatQType(),
true),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr(
"implicit casting from FLOAT32 to WEAK_FLOAT is not allowed")));
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/casting_registry.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/casting_registry_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
fcf11a49-e7c8-4437-8649-1838443687e3 | cpp | google/cel-cpp | const_value_step | eval/eval/const_value_step.cc | eval/eval/const_value_step_test.cc | #include "eval/eval/const_value_step.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/status/statusor.h"
#include "base/ast_internal/expr.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "eval/eval/compiler_constant_step.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "internal/status_macros.h"
#include "runtime/internal/convert_constant.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::ast_internal::Constant;
using ::cel::runtime_internal::ConvertConstant;
}
std::unique_ptr<DirectExpressionStep> CreateConstValueDirectStep(
cel::Value value, int64_t id) {
return std::make_unique<DirectCompilerConstantStep>(std::move(value), id);
}
absl::StatusOr<std::unique_ptr<ExpressionStep>> CreateConstValueStep(
cel::Value value, int64_t expr_id, bool comes_from_ast) {
return std::make_unique<CompilerConstantStep>(std::move(value), expr_id,
comes_from_ast);
}
absl::StatusOr<std::unique_ptr<ExpressionStep>> CreateConstValueStep(
const Constant& value, int64_t expr_id, cel::ValueManager& value_factory,
bool comes_from_ast) {
CEL_ASSIGN_OR_RETURN(cel::Value converted_value,
ConvertConstant(value, value_factory));
return std::make_unique<CompilerConstantStep>(std::move(converted_value),
expr_id, comes_from_ast);
}
} | #include "eval/eval/const_value_step.h"
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/time/time.h"
#include "base/ast_internal/expr.h"
#include "base/type_provider.h"
#include "common/type_factory.h"
#include "common/type_manager.h"
#include "common/value_manager.h"
#include "common/values/legacy_value_manager.h"
#include "eval/eval/cel_expression_flat_impl.h"
#include "eval/eval/evaluator_core.h"
#include "eval/internal/errors.h"
#include "eval/public/activation.h"
#include "eval/public/cel_value.h"
#include "eval/public/testing/matchers.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "runtime/runtime_options.h"
#include "google/protobuf/arena.h"
namespace google::api::expr::runtime {
namespace {
using ::absl_testing::StatusIs;
using ::cel::TypeProvider;
using ::cel::ast_internal::Constant;
using ::cel::ast_internal::Expr;
using ::cel::ast_internal::NullValue;
using ::cel::extensions::ProtoMemoryManagerRef;
using ::testing::Eq;
using ::testing::HasSubstr;
absl::StatusOr<CelValue> RunConstantExpression(
const Expr* expr, const Constant& const_expr, google::protobuf::Arena* arena,
cel::ValueManager& value_factory) {
CEL_ASSIGN_OR_RETURN(
auto step, CreateConstValueStep(const_expr, expr->id(), value_factory));
google::api::expr::runtime::ExecutionPath path;
path.push_back(std::move(step));
CelExpressionFlatImpl impl(
FlatExpression(std::move(path), 0,
TypeProvider::Builtin(), cel::RuntimeOptions{}));
google::api::expr::runtime::Activation activation;
return impl.Evaluate(activation, arena);
}
class ConstValueStepTest : public ::testing::Test {
public:
ConstValueStepTest()
: value_factory_(ProtoMemoryManagerRef(&arena_),
cel::TypeProvider::Builtin()) {}
protected:
google::protobuf::Arena arena_;
cel::common_internal::LegacyValueManager value_factory_;
};
TEST_F(ConstValueStepTest, TestEvaluationConstInt64) {
Expr expr;
auto& const_expr = expr.mutable_const_expr();
const_expr.set_int64_value(1);
auto status =
RunConstantExpression(&expr, const_expr, &arena_, value_factory_);
ASSERT_OK(status);
auto value = status.value();
ASSERT_TRUE(value.IsInt64());
EXPECT_THAT(value.Int64OrDie(), Eq(1));
}
TEST_F(ConstValueStepTest, TestEvaluationConstUint64) {
Expr expr;
auto& const_expr = expr.mutable_const_expr();
const_expr.set_uint64_value(1);
auto status =
RunConstantExpression(&expr, const_expr, &arena_, value_factory_);
ASSERT_OK(status);
auto value = status.value();
ASSERT_TRUE(value.IsUint64());
EXPECT_THAT(value.Uint64OrDie(), Eq(1));
}
TEST_F(ConstValueStepTest, TestEvaluationConstBool) {
Expr expr;
auto& const_expr = expr.mutable_const_expr();
const_expr.set_bool_value(true);
auto status =
RunConstantExpression(&expr, const_expr, &arena_, value_factory_);
ASSERT_OK(status);
auto value = status.value();
ASSERT_TRUE(value.IsBool());
EXPECT_THAT(value.BoolOrDie(), Eq(true));
}
TEST_F(ConstValueStepTest, TestEvaluationConstNull) {
Expr expr;
auto& const_expr = expr.mutable_const_expr();
const_expr.set_null_value(nullptr);
auto status =
RunConstantExpression(&expr, const_expr, &arena_, value_factory_);
ASSERT_OK(status);
auto value = status.value();
EXPECT_TRUE(value.IsNull());
}
TEST_F(ConstValueStepTest, TestEvaluationConstString) {
Expr expr;
auto& const_expr = expr.mutable_const_expr();
const_expr.set_string_value("test");
auto status =
RunConstantExpression(&expr, const_expr, &arena_, value_factory_);
ASSERT_OK(status);
auto value = status.value();
ASSERT_TRUE(value.IsString());
EXPECT_THAT(value.StringOrDie().value(), Eq("test"));
}
TEST_F(ConstValueStepTest, TestEvaluationConstDouble) {
Expr expr;
auto& const_expr = expr.mutable_const_expr();
const_expr.set_double_value(1.0);
auto status =
RunConstantExpression(&expr, const_expr, &arena_, value_factory_);
ASSERT_OK(status);
auto value = status.value();
ASSERT_TRUE(value.IsDouble());
EXPECT_THAT(value.DoubleOrDie(), testing::DoubleEq(1.0));
}
TEST_F(ConstValueStepTest, TestEvaluationConstBytes) {
Expr expr;
auto& const_expr = expr.mutable_const_expr();
const_expr.set_bytes_value("test");
auto status =
RunConstantExpression(&expr, const_expr, &arena_, value_factory_);
ASSERT_OK(status);
auto value = status.value();
ASSERT_TRUE(value.IsBytes());
EXPECT_THAT(value.BytesOrDie().value(), Eq("test"));
}
TEST_F(ConstValueStepTest, TestEvaluationConstDuration) {
Expr expr;
auto& const_expr = expr.mutable_const_expr();
const_expr.set_duration_value(absl::Seconds(5) + absl::Nanoseconds(2000));
auto status =
RunConstantExpression(&expr, const_expr, &arena_, value_factory_);
ASSERT_OK(status);
auto value = status.value();
EXPECT_THAT(value,
test::IsCelDuration(absl::Seconds(5) + absl::Nanoseconds(2000)));
}
TEST_F(ConstValueStepTest, TestEvaluationConstDurationOutOfRange) {
Expr expr;
auto& const_expr = expr.mutable_const_expr();
const_expr.set_duration_value(cel::runtime_internal::kDurationHigh);
auto status =
RunConstantExpression(&expr, const_expr, &arena_, value_factory_);
ASSERT_OK(status);
auto value = status.value();
EXPECT_THAT(value,
test::IsCelError(StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("out of range"))));
}
TEST_F(ConstValueStepTest, TestEvaluationConstTimestamp) {
Expr expr;
auto& const_expr = expr.mutable_const_expr();
const_expr.set_time_value(absl::FromUnixSeconds(3600) +
absl::Nanoseconds(1000));
auto status =
RunConstantExpression(&expr, const_expr, &arena_, value_factory_);
ASSERT_OK(status);
auto value = status.value();
EXPECT_THAT(value, test::IsCelTimestamp(absl::FromUnixSeconds(3600) +
absl::Nanoseconds(1000)));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/const_value_step.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/const_value_step_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
7d226d97-756f-49e0-8122-2a6f998635ce | cpp | google/cel-cpp | protobuf_descriptor_type_provider | eval/public/structs/protobuf_descriptor_type_provider.cc | eval/public/structs/protobuf_descriptor_type_provider_test.cc | #include "eval/public/structs/protobuf_descriptor_type_provider.h"
#include <memory>
#include <utility>
#include "google/protobuf/descriptor.h"
#include "absl/synchronization/mutex.h"
#include "eval/public/structs/proto_message_type_adapter.h"
namespace google::api::expr::runtime {
absl::optional<LegacyTypeAdapter> ProtobufDescriptorProvider::ProvideLegacyType(
absl::string_view name) const {
const ProtoMessageTypeAdapter* result = GetTypeAdapter(name);
if (result == nullptr) {
return absl::nullopt;
}
return LegacyTypeAdapter(result, result);
}
absl::optional<const LegacyTypeInfoApis*>
ProtobufDescriptorProvider::ProvideLegacyTypeInfo(
absl::string_view name) const {
const ProtoMessageTypeAdapter* result = GetTypeAdapter(name);
if (result == nullptr) {
return absl::nullopt;
}
return result;
}
std::unique_ptr<ProtoMessageTypeAdapter>
ProtobufDescriptorProvider::CreateTypeAdapter(absl::string_view name) const {
const google::protobuf::Descriptor* descriptor =
descriptor_pool_->FindMessageTypeByName(name);
if (descriptor == nullptr) {
return nullptr;
}
return std::make_unique<ProtoMessageTypeAdapter>(descriptor,
message_factory_);
}
const ProtoMessageTypeAdapter* ProtobufDescriptorProvider::GetTypeAdapter(
absl::string_view name) const {
absl::MutexLock lock(&mu_);
auto it = type_cache_.find(name);
if (it != type_cache_.end()) {
return it->second.get();
}
auto type_provider = CreateTypeAdapter(name);
const ProtoMessageTypeAdapter* result = type_provider.get();
type_cache_[name] = std::move(type_provider);
return result;
}
} | #include "eval/public/structs/protobuf_descriptor_type_provider.h"
#include <optional>
#include "google/protobuf/wrappers.pb.h"
#include "eval/public/cel_value.h"
#include "eval/public/structs/legacy_type_info_apis.h"
#include "eval/public/testing/matchers.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/testing.h"
#include "google/protobuf/arena.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::extensions::ProtoMemoryManager;
TEST(ProtobufDescriptorProvider, Basic) {
ProtobufDescriptorProvider provider(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory());
google::protobuf::Arena arena;
auto manager = ProtoMemoryManager(&arena);
auto type_adapter = provider.ProvideLegacyType("google.protobuf.Int64Value");
absl::optional<const LegacyTypeInfoApis*> type_info =
provider.ProvideLegacyTypeInfo("google.protobuf.Int64Value");
ASSERT_TRUE(type_adapter.has_value());
ASSERT_TRUE(type_adapter->mutation_apis() != nullptr);
ASSERT_TRUE(type_info.has_value());
ASSERT_TRUE(type_info != nullptr);
google::protobuf::Int64Value int64_value;
CelValue::MessageWrapper int64_cel_value(&int64_value, *type_info);
EXPECT_EQ((*type_info)->GetTypename(int64_cel_value),
"google.protobuf.Int64Value");
ASSERT_TRUE(type_adapter->mutation_apis()->DefinesField("value"));
ASSERT_OK_AND_ASSIGN(CelValue::MessageWrapper::Builder value,
type_adapter->mutation_apis()->NewInstance(manager));
ASSERT_OK(type_adapter->mutation_apis()->SetField(
"value", CelValue::CreateInt64(10), manager, value));
ASSERT_OK_AND_ASSIGN(
CelValue adapted,
type_adapter->mutation_apis()->AdaptFromWellKnownType(manager, value));
EXPECT_THAT(adapted, test::IsCelInt64(10));
}
TEST(ProtobufDescriptorProvider, MemoizesAdapters) {
ProtobufDescriptorProvider provider(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory());
auto type_adapter = provider.ProvideLegacyType("google.protobuf.Int64Value");
ASSERT_TRUE(type_adapter.has_value());
ASSERT_TRUE(type_adapter->mutation_apis() != nullptr);
auto type_adapter2 = provider.ProvideLegacyType("google.protobuf.Int64Value");
ASSERT_TRUE(type_adapter2.has_value());
EXPECT_EQ(type_adapter->mutation_apis(), type_adapter2->mutation_apis());
EXPECT_EQ(type_adapter->access_apis(), type_adapter2->access_apis());
}
TEST(ProtobufDescriptorProvider, NotFound) {
ProtobufDescriptorProvider provider(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory());
auto type_adapter = provider.ProvideLegacyType("UnknownType");
auto type_info = provider.ProvideLegacyTypeInfo("UnknownType");
ASSERT_FALSE(type_adapter.has_value());
ASSERT_FALSE(type_info.has_value());
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/structs/protobuf_descriptor_type_provider.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/structs/protobuf_descriptor_type_provider_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
700af8f9-e282-4548-bddd-8e9292091cda | cpp | google/arolla | annotation_utils | arolla/expr/annotation_utils.cc | arolla/expr/annotation_utils_test.cc | #include "arolla/expr/annotation_utils.h"
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "arolla/expr/annotation_expr_operators.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
absl::StatusOr<bool> IsAnnotation(const ExprNodePtr& node) {
ASSIGN_OR_RETURN(auto op, DecayRegisteredOperator(node->op()));
return !node->node_deps().empty() &&
dynamic_cast<const AnnotationExprOperatorTag*>(op.get()) != nullptr;
}
absl::StatusOr<ExprNodePtr> StripTopmostAnnotations(ExprNodePtr expr) {
ASSIGN_OR_RETURN(bool is_annotation, IsAnnotation(expr));
while (is_annotation) {
expr = expr->node_deps()[0];
ASSIGN_OR_RETURN(is_annotation, IsAnnotation(expr));
}
return expr;
}
absl::StatusOr<ExprNodePtr> StripAnnotations(const ExprNodePtr& expr) {
return Transform(
expr, [](const ExprNodePtr& node) -> absl::StatusOr<ExprNodePtr> {
ASSIGN_OR_RETURN(bool is_annotation, IsAnnotation(node));
DCHECK(!is_annotation ||
!node->node_deps().empty());
return is_annotation ? node->node_deps()[0] : node;
});
}
bool IsQTypeAnnotation(const ExprNodePtr& node) {
auto op = DecayRegisteredOperator(node->op()).value_or(nullptr);
return op != nullptr && typeid(*op) == typeid(QTypeAnnotation) &&
node->node_deps().size() == 2;
}
bool IsNameAnnotation(const ExprNodePtr& node) {
auto op = DecayRegisteredOperator(node->op()).value_or(nullptr);
if (op == nullptr || typeid(*op) != typeid(NameAnnotation) ||
node->node_deps().size() != 2) {
return false;
}
const auto& qvalue = node->node_deps()[1]->qvalue();
return qvalue.has_value() && qvalue->GetType() == GetQType<Text>();
}
bool IsExportAnnotation(const ExprNodePtr& node) {
auto op = DecayRegisteredOperator(node->op()).value_or(nullptr);
if (op == nullptr || ((typeid(*op) != typeid(ExportAnnotation) ||
node->node_deps().size() != 2) &&
(typeid(*op) != typeid(ExportValueAnnotation) ||
node->node_deps().size() != 3))) {
return false;
}
const auto& qvalue = node->node_deps()[1]->qvalue();
return qvalue.has_value() && qvalue->GetType() == GetQType<Text>();
}
const QType* ReadQTypeAnnotation(const ExprNodePtr& node) {
if (IsQTypeAnnotation(node)) {
DCHECK_EQ(node->node_deps().size(), 2);
if (const auto& qvalue = node->node_deps()[1]->qvalue()) {
if (qvalue->GetType() == GetQTypeQType()) {
return qvalue->UnsafeAs<QTypePtr>();
}
}
}
return nullptr;
}
absl::string_view ReadNameAnnotation(const ExprNodePtr& node) {
if (IsNameAnnotation(node)) {
return node->node_deps()[1]->qvalue()->UnsafeAs<Text>().view();
}
return {};
}
absl::string_view ReadExportAnnotationTag(const ExprNodePtr& node) {
if (IsExportAnnotation(node)) {
return node->node_deps()[1]->qvalue()->UnsafeAs<Text>().view();
}
return {};
}
ExprNodePtr ReadExportAnnotationValue(const ExprNodePtr& node) {
if (IsExportAnnotation(node)) {
if (node->node_deps().size() == 2) {
return node->node_deps()[0];
} else if (node->node_deps().size() == 3) {
return node->node_deps()[2];
}
}
return nullptr;
}
} | #include "arolla/expr/annotation_utils.h"
#include <memory>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/annotation_expr_operators.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/bytes.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/text.h"
namespace arolla::expr {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::testing::EqualsExpr;
class DummyOp : public BasicExprOperator {
public:
DummyOp(absl::string_view display_name,
const ExprOperatorSignature& signature)
: BasicExprOperator(display_name, signature, "docstring",
FingerprintHasher("arolla::expr::DummyOp")
.Combine(display_name, signature)
.Finish()) {}
absl::StatusOr<QTypePtr> GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const override {
return GetQType<int>();
}
};
class DummyAnnotation : public AnnotationExprOperatorTag,
public BasicExprOperator {
public:
DummyAnnotation(absl::string_view display_name,
const ExprOperatorSignature& signature)
: BasicExprOperator(display_name, signature, "docstring",
FingerprintHasher("arolla::expr::DummyAnnotation")
.Combine(display_name, signature)
.Finish()) {}
absl::StatusOr<QTypePtr> GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const override {
return input_qtypes.empty() ? GetQType<int>() : input_qtypes[0];
}
};
TEST(AnnotationUtilsTest, IsAnnotation) {
{
auto op =
std::make_shared<DummyAnnotation>("id", ExprOperatorSignature{{"x"}});
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Leaf("x")}));
EXPECT_THAT(IsAnnotation(expr), IsOkAndHolds(true));
}
{
auto op = RegisterOperator<DummyAnnotation>(
"annotation_utils_test.is_annotation.registered_annotation", "id",
ExprOperatorSignature{{"x"}});
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Leaf("x")}));
EXPECT_THAT(IsAnnotation(expr), IsOkAndHolds(true));
}
{
auto op =
std::make_shared<DummyAnnotation>("stub", ExprOperatorSignature{});
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {}));
EXPECT_THAT(IsAnnotation(expr), IsOkAndHolds(false));
}
{
auto op = std::make_shared<DummyOp>("id", ExprOperatorSignature{{"x"}});
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Leaf("x")}));
EXPECT_THAT(IsAnnotation(expr), IsOkAndHolds(false));
}
{
auto op = std::make_shared<RegisteredOperator>(
"annotation_utils_test.is_annotation.missing");
auto expr =
ExprNode::UnsafeMakeOperatorNode(std::move(op), {Leaf("x")}, {});
EXPECT_THAT(IsAnnotation(expr), StatusIs(absl::StatusCode::kNotFound));
}
}
TEST(AnnotationUtilsTest, StripTopmostAnnotations) {
auto dummy_annotation = std::make_shared<DummyAnnotation>(
"dummy_annotation", ExprOperatorSignature{{"x"}, {"y"}});
auto dummy_op = std::make_shared<DummyOp>(
"dummy_op", ExprOperatorSignature{{"x"}, {"y"}});
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp(dummy_annotation,
{CallOp(dummy_annotation,
{CallOp(dummy_op,
{CallOp(dummy_annotation, {Leaf("x"), Leaf("a")}),
Leaf("y")}),
Leaf("b")}),
Leaf("c")}));
ASSERT_OK_AND_ASSIGN(auto actual, StripTopmostAnnotations(expr));
ASSERT_OK_AND_ASSIGN(
auto expected,
CallOp(dummy_op,
{CallOp(dummy_annotation, {Leaf("x"), Leaf("a")}), Leaf("y")}));
EXPECT_THAT(actual, EqualsExpr(expected));
}
TEST(AnnotationUtilsTest, StripAnnotations) {
auto dummy_annotation = std::make_shared<DummyAnnotation>(
"dummy_annotation", ExprOperatorSignature{{"x"}, {"y"}});
auto dummy_op = std::make_shared<DummyOp>(
"dummy_op", ExprOperatorSignature{{"x"}, {"y"}});
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp(dummy_annotation,
{CallOp(dummy_annotation,
{CallOp(dummy_op,
{CallOp(dummy_annotation, {Leaf("x"), Leaf("a")}),
Leaf("y")}),
Leaf("b")}),
Leaf("c")}));
ASSERT_OK_AND_ASSIGN(auto actual, StripAnnotations(expr));
ASSERT_OK_AND_ASSIGN(auto expected, CallOp(dummy_op, {Leaf("x"), Leaf("y")}));
EXPECT_THAT(actual, EqualsExpr(expected));
}
TEST(AnnotationUtilsTest, IsQTypeAnnotation) {
{
auto op = QTypeAnnotation::Make();
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Leaf("x"), Placeholder("y")}));
EXPECT_TRUE(IsQTypeAnnotation(expr));
}
{
auto op = std::make_shared<QTypeAnnotation>("aux_policy");
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Leaf("x"), Placeholder("y")}));
EXPECT_TRUE(IsQTypeAnnotation(expr));
}
{
auto op = std::make_shared<DummyAnnotation>(
"annotation.name", ExprOperatorSignature{{"expr"}, {"qtype"}});
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Leaf("x"), Placeholder("y")}));
EXPECT_FALSE(IsQTypeAnnotation(expr));
}
{
auto op = QTypeAnnotation::Make();
auto expr =
ExprNode::UnsafeMakeOperatorNode(std::move(op), {Leaf("x")}, {});
EXPECT_FALSE(IsQTypeAnnotation(expr));
}
EXPECT_FALSE(IsQTypeAnnotation(Leaf("x")));
}
TEST(AnnotationUtilsTest, IsNameAnnotation) {
{
auto op = NameAnnotation::Make();
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(op, {Leaf("x"), Literal(Text("name"))}));
EXPECT_TRUE(IsNameAnnotation(expr));
}
{
auto op = std::make_shared<NameAnnotation>("aux_policy");
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(op, {Leaf("x"), Literal(Text("name"))}));
EXPECT_TRUE(IsNameAnnotation(expr));
}
{
auto op = std::make_shared<DummyAnnotation>(
"annotation.name", ExprOperatorSignature{{"expr"}, {"name"}});
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(op, {Leaf("x"), Literal(Text("name"))}));
EXPECT_FALSE(IsNameAnnotation(expr));
}
{
auto op = NameAnnotation::Make();
auto expr =
ExprNode::UnsafeMakeOperatorNode(std::move(op), {Leaf("x")}, {});
EXPECT_FALSE(IsNameAnnotation(expr));
}
{
auto op = NameAnnotation::Make();
auto expr = ExprNode::UnsafeMakeOperatorNode(
std::move(op), {Leaf("x"), Placeholder("y")}, {});
EXPECT_FALSE(IsNameAnnotation(expr));
}
{
auto op = NameAnnotation::Make();
auto expr = ExprNode::UnsafeMakeOperatorNode(
std::move(op), {Leaf("x"), Literal(Bytes("name"))}, {});
EXPECT_FALSE(IsNameAnnotation(expr));
}
EXPECT_FALSE(IsNameAnnotation(Leaf("x")));
}
TEST(AnnotationUtilsTest, IsExportAnnotation) {
{
auto op = ExportAnnotation::Make();
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(op, {Leaf("x"), Literal(Text("tag"))}));
EXPECT_TRUE(IsExportAnnotation(expr));
}
{
auto op = ExportValueAnnotation::Make();
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Leaf("x"), Literal(Text("tag")),
Placeholder("value")}));
EXPECT_TRUE(IsExportAnnotation(expr));
}
{
auto op = std::make_shared<DummyAnnotation>(
"annotation.export", ExprOperatorSignature{{"expr"}, {"export_tag"}});
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(op, {Leaf("x"), Literal(Text("tag"))}));
EXPECT_FALSE(IsExportAnnotation(expr));
}
{
auto op = ExportAnnotation::Make();
auto expr = ExprNode::UnsafeMakeOperatorNode(
std::move(op), {Leaf("x"), Literal(Text("tag")), Placeholder("value")},
{});
EXPECT_FALSE(IsExportAnnotation(expr));
}
{
auto op = ExportAnnotation::Make();
auto expr = ExprNode::UnsafeMakeOperatorNode(
std::move(op), {Leaf("x"), Literal(Text("tag")), Placeholder("value")},
{});
EXPECT_FALSE(IsExportAnnotation(expr));
}
{
auto op = ExportAnnotation::Make();
auto expr = ExprNode::UnsafeMakeOperatorNode(
std::move(op), {Leaf("x"), Placeholder("tag")}, {});
EXPECT_FALSE(IsExportAnnotation(expr));
}
{
auto op = ExportAnnotation::Make();
auto expr = ExprNode::UnsafeMakeOperatorNode(
std::move(op), {Leaf("x"), Literal(Bytes("tag"))}, {});
EXPECT_FALSE(IsExportAnnotation(expr));
}
EXPECT_FALSE(IsExportAnnotation(Leaf("x")));
}
TEST(AnnotationUtilsTest, ReadQTypeAnnotation) {
{
auto op = QTypeAnnotation::Make();
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(op, {Leaf("x"), Literal(GetQTypeQType())}));
EXPECT_EQ(ReadQTypeAnnotation(expr), GetQTypeQType());
}
{
auto op = QTypeAnnotation::Make();
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Leaf("x"), Placeholder("y")}));
EXPECT_EQ(ReadQTypeAnnotation(expr), nullptr);
}
{
auto op = std::make_shared<DummyAnnotation>(
"annotation.qtype", ExprOperatorSignature{{"expr"}, {"qtype"}});
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(op, {Leaf("x"), Literal(GetQTypeQType())}));
EXPECT_EQ(ReadQTypeAnnotation(expr), nullptr);
}
EXPECT_EQ(ReadQTypeAnnotation(Leaf("x")), nullptr);
}
TEST(AnnotationUtilsTest, ReadNameAnnotation) {
{
auto op = NameAnnotation::Make();
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(op, {Leaf("x"), Literal(Text("name"))}));
EXPECT_EQ(ReadNameAnnotation(expr), "name");
}
{
auto op = std::make_shared<DummyAnnotation>(
"annotation.name", ExprOperatorSignature{{"expr"}, {"name"}});
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(op, {Leaf("x"), Literal(Text("name"))}));
EXPECT_EQ(ReadNameAnnotation(expr), "");
}
EXPECT_EQ(ReadNameAnnotation(Leaf("x")), "");
}
TEST(AnnotationUtilsTest, ReadExportAnnotation) {
{
auto op = ExportAnnotation::Make();
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(op, {Leaf("x"), Literal(Text("tag"))}));
EXPECT_EQ(ReadExportAnnotationTag(expr), "tag");
EXPECT_THAT(ReadExportAnnotationValue(expr), EqualsExpr(Leaf("x")));
}
{
auto op = ExportValueAnnotation::Make();
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Leaf("x"), Literal(Text("tag")),
Placeholder("value")}));
EXPECT_EQ(ReadExportAnnotationTag(expr), "tag");
EXPECT_THAT(ReadExportAnnotationValue(expr),
EqualsExpr(Placeholder("value")));
}
{
auto op = std::make_shared<DummyAnnotation>(
"annotation.export", ExprOperatorSignature{{"expr"}, {"export_tag"}});
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(op, {Leaf("x"), Literal(Text("tag"))}));
EXPECT_EQ(ReadExportAnnotationTag(expr), "");
EXPECT_EQ(ReadExportAnnotationValue(expr), nullptr);
}
EXPECT_EQ(ReadExportAnnotationTag(Leaf("x")), "");
EXPECT_EQ(ReadExportAnnotationValue(Leaf("x")), nullptr);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/annotation_utils.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/annotation_utils_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
e0201d58-2450-421f-960a-aa40ebcad6aa | cpp | google/tensorstore | result_sender | tensorstore/util/execution/result_sender.h | tensorstore/util/execution/result_sender_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTION_RESULT_SENDER_H_
#define TENSORSTORE_UTIL_EXECUTION_RESULT_SENDER_H_
#include <functional>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_result {
template <typename Receiver, typename = void, typename = void, typename = void,
typename = void>
struct IsResultReceiver : public std::false_type {};
template <typename Receiver, typename T>
struct IsResultReceiver<
Receiver, T,
decltype(execution::set_value(std::declval<Receiver&>(),
std::declval<T>())),
decltype(execution::set_error(std::declval<Receiver&>(),
std::declval<absl::Status>())),
decltype(execution::set_cancel(std::declval<Receiver&>()))>
: public std::true_type {};
}
template <typename T, typename... V>
std::enable_if_t<((std::is_same_v<void, T> && sizeof...(V) == 0) ||
std::is_constructible_v<T, V&&...>)>
set_value(Result<T>& r, V&&... v) {
r.emplace(std::forward<V>(v)...);
}
template <typename T, typename... V>
std::enable_if_t<((std::is_same_v<void, T> && sizeof...(V) == 0) ||
std::is_constructible_v<T, V&&...>)>
set_value(std::reference_wrapper<Result<T>> r, V&&... v) {
set_value(r.get(), std::forward<V>(v)...);
}
template <typename T>
void set_error(Result<T>& r, absl::Status status) {
r = std::move(status);
}
template <typename T>
void set_error(std::reference_wrapper<Result<T>> r, absl::Status status) {
set_error(r.get(), std::move(status));
}
template <typename T>
void set_cancel(Result<T>& r) {
r = absl::CancelledError("");
}
template <typename T>
void set_cancel(std::reference_wrapper<Result<T>> r) {
set_cancel(r.get());
}
template <typename T, typename Receiver>
std::enable_if_t<internal_result::IsResultReceiver<Receiver, T>::value>
submit(Result<T>& r, Receiver&& receiver) {
if (r.has_value()) {
execution::set_value(receiver, r.value());
} else {
auto status = r.status();
if (status.code() == absl::StatusCode::kCancelled) {
execution::set_cancel(receiver);
} else {
execution::set_error(receiver, std::move(status));
}
}
}
template <typename T, typename Receiver>
std::enable_if_t<internal_result::IsResultReceiver<Receiver, T>::value>
submit(std::reference_wrapper<Result<T>> r, Receiver&& receiver) {
submit(r.get(), std::forward<Receiver>(receiver));
}
}
#endif | #include "tensorstore/util/execution/result_sender.h"
#include <functional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/any_sender.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Result;
using ::tensorstore::StatusIs;
TEST(ResultReceiverTest, SetCancel) {
Result<int> result = absl::InternalError("");
tensorstore::AnyReceiver<absl::Status, int> receiver{std::ref(result)};
tensorstore::execution::set_cancel(receiver);
EXPECT_THAT(result, StatusIs(absl::StatusCode::kCancelled));
}
TEST(ResultReceiverTest, SetValue) {
Result<int> result = absl::InternalError("");
tensorstore::AnyReceiver<absl::Status, int> receiver{std::ref(result)};
tensorstore::execution::set_value(receiver, 3);
EXPECT_EQ(result, Result<int>(3));
}
TEST(ResultReceiverTest, SetError) {
Result<int> result = absl::InternalError("");
tensorstore::AnyReceiver<absl::Status, int> receiver{std::ref(result)};
tensorstore::execution::set_error(receiver, absl::UnknownError("message"));
EXPECT_THAT(result, StatusIs(absl::StatusCode::kUnknown, "message"));
}
TEST(ResultSenderTest, SetValue) {
Result<int> result(3);
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnySender<absl::Status, int>(result),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_value: 3"));
}
TEST(ResultSenderTest, SetError) {
Result<int> result{absl::UnknownError("")};
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnySender<absl::Status, int>(result),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_error: UNKNOWN: "));
}
TEST(ResultSenderTest, SetCancel) {
Result<int> result{absl::CancelledError("")};
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnySender<absl::Status, int>(result),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_cancel"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/result_sender.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/result_sender_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
86b2b763-0abd-4bf5-99c5-c1caff3cceac | cpp | google/glog | utilities | src/utilities.cc | src/utilities_unittest.cc | #define _GNU_SOURCE 1
#include "utilities.h"
#include <atomic>
#include <cerrno>
#include <csignal>
#include <cstdio>
#include <cstdlib>
#include "base/googleinit.h"
#include "config.h"
#include "glog/flags.h"
#include "glog/logging.h"
#include "stacktrace.h"
#include "symbolize.h"
#ifdef GLOG_OS_ANDROID
# include <android/log.h>
#endif
#ifdef HAVE_SYS_TIME_H
# include <sys/time.h>
#endif
#if defined(HAVE_SYSCALL_H)
# include <syscall.h>
#elif defined(HAVE_SYS_SYSCALL_H)
# include <sys/syscall.h>
#endif
#ifdef HAVE_SYSLOG_H
# include <syslog.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef HAVE_PWD_H
# include <pwd.h>
#endif
#if defined(HAVE___PROGNAME)
extern char* __progname;
#endif
using std::string;
namespace google {
static const char* g_program_invocation_short_name = nullptr;
bool IsGoogleLoggingInitialized() {
return g_program_invocation_short_name != nullptr;
}
inline namespace glog_internal_namespace_ {
constexpr int FileDescriptor::InvalidHandle;
void AlsoErrorWrite(LogSeverity severity, const char* tag,
const char* message) noexcept {
#if defined(GLOG_OS_WINDOWS)
(void)severity;
(void)tag;
::OutputDebugStringA(message);
#elif defined(GLOG_OS_ANDROID)
constexpr int android_log_levels[] = {
ANDROID_LOG_INFO,
ANDROID_LOG_WARN,
ANDROID_LOG_ERROR,
ANDROID_LOG_FATAL,
};
__android_log_write(android_log_levels[severity], tag, message);
#else
(void)severity;
(void)tag;
(void)message;
#endif
}
}
}
#ifdef HAVE_STACKTRACE
# include "base/commandlineflags.h"
# include "stacktrace.h"
# include "symbolize.h"
namespace google {
using DebugWriter = void(const char*, void*);
static const int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*);
static void DebugWriteToStderr(const char* data, void*) {
if (write(fileno(stderr), data, strlen(data)) < 0) {
}
AlsoErrorWrite(GLOG_FATAL,
glog_internal_namespace_::ProgramInvocationShortName(), data);
}
static void DebugWriteToString(const char* data, void* arg) {
reinterpret_cast<string*>(arg)->append(data);
}
# ifdef HAVE_SYMBOLIZE
static void DumpPCAndSymbol(DebugWriter* writerfn, void* arg, void* pc,
const char* const prefix) {
char tmp[1024];
const char* symbol = "(unknown)";
if (Symbolize(reinterpret_cast<char*>(pc) - 1, tmp, sizeof(tmp))) {
symbol = tmp;
}
char buf[1024];
std::snprintf(buf, sizeof(buf), "%s@ %*p %s\n", prefix,
kPrintfPointerFieldWidth, pc, symbol);
writerfn(buf, arg);
}
# endif
static void DumpPC(DebugWriter* writerfn, void* arg, void* pc,
const char* const prefix) {
char buf[100];
std::snprintf(buf, sizeof(buf), "%s@ %*p\n", prefix, kPrintfPointerFieldWidth,
pc);
writerfn(buf, arg);
}
static void DumpStackTrace(int skip_count, DebugWriter* writerfn, void* arg) {
void* stack[32];
int depth = GetStackTrace(stack, ARRAYSIZE(stack), skip_count + 1);
for (int i = 0; i < depth; i++) {
# if defined(HAVE_SYMBOLIZE)
if (FLAGS_symbolize_stacktrace) {
DumpPCAndSymbol(writerfn, arg, stack[i], " ");
} else {
DumpPC(writerfn, arg, stack[i], " ");
}
# else
DumpPC(writerfn, arg, stack[i], " ");
# endif
}
}
# ifdef __GNUC__
__attribute__((noreturn))
# endif
static void
DumpStackTraceAndExit() {
DumpStackTrace(1, DebugWriteToStderr, nullptr);
if (IsFailureSignalHandlerInstalled()) {
# ifdef HAVE_SIGACTION
struct sigaction sig_action;
memset(&sig_action, 0, sizeof(sig_action));
sigemptyset(&sig_action.sa_mask);
sig_action.sa_handler = SIG_DFL;
sigaction(SIGABRT, &sig_action, nullptr);
# elif defined(GLOG_OS_WINDOWS)
signal(SIGABRT, SIG_DFL);
# endif
}
abort();
}
}
#endif
namespace google {
inline namespace glog_internal_namespace_ {
const char* const_basename(const char* filepath) {
const char* base = strrchr(filepath, '/');
#ifdef GLOG_OS_WINDOWS
if (!base) base = strrchr(filepath, '\\');
#endif
return base ? (base + 1) : filepath;
}
const char* ProgramInvocationShortName() {
if (g_program_invocation_short_name != nullptr) {
return g_program_invocation_short_name;
}
#if defined(HAVE_PROGRAM_INVOCATION_SHORT_NAME)
return program_invocation_short_name;
#elif defined(HAVE_GETPROGNAME)
return getprogname();
#elif defined(HAVE___PROGNAME)
return __progname;
#elif defined(HAVE___ARGV)
return const_basename(__argv[0]);
#else
return "UNKNOWN";
#endif
}
static int32 g_main_thread_pid = getpid();
int32 GetMainThreadPid() { return g_main_thread_pid; }
bool PidHasChanged() {
int32 pid = getpid();
if (g_main_thread_pid == pid) {
return false;
}
g_main_thread_pid = pid;
return true;
}
static string g_my_user_name;
const string& MyUserName() { return g_my_user_name; }
static void MyUserNameInitializer() {
#if defined(GLOG_OS_WINDOWS)
const char* user = getenv("USERNAME");
#else
const char* user = getenv("USER");
#endif
if (user != nullptr) {
g_my_user_name = user;
} else {
#if defined(HAVE_PWD_H) && defined(HAVE_UNISTD_H)
struct passwd pwd;
struct passwd* result = nullptr;
char buffer[1024] = {'\0'};
uid_t uid = geteuid();
int pwuid_res = getpwuid_r(uid, &pwd, buffer, sizeof(buffer), &result);
if (pwuid_res == 0 && result) {
g_my_user_name = pwd.pw_name;
} else {
std::snprintf(buffer, sizeof(buffer), "uid%d", uid);
g_my_user_name = buffer;
}
#endif
if (g_my_user_name.empty()) {
g_my_user_name = "invalid-user";
}
}
}
REGISTER_MODULE_INITIALIZER(utilities, MyUserNameInitializer())
static std::atomic<const logging::internal::CrashReason*> g_reason{nullptr};
void SetCrashReason(const logging::internal::CrashReason* r) {
const logging::internal::CrashReason* expected = nullptr;
g_reason.compare_exchange_strong(expected, r);
}
void InitGoogleLoggingUtilities(const char* argv0) {
CHECK(!IsGoogleLoggingInitialized())
<< "You called InitGoogleLogging() twice!";
g_program_invocation_short_name = const_basename(argv0);
#ifdef HAVE_STACKTRACE
InstallFailureFunction(&DumpStackTraceAndExit);
#endif
}
void ShutdownGoogleLoggingUtilities() {
CHECK(IsGoogleLoggingInitialized())
<< "You called ShutdownGoogleLogging() without calling "
"InitGoogleLogging() first!";
g_program_invocation_short_name = nullptr;
#ifdef HAVE_SYSLOG_H
closelog();
#endif
}
}
#ifdef HAVE_STACKTRACE
std::string GetStackTrace() {
std::string stacktrace;
DumpStackTrace(1, DebugWriteToString, &stacktrace);
return stacktrace;
}
#endif
} | #include "utilities.h"
#include "glog/logging.h"
#include "googletest.h"
#ifdef GLOG_USE_GFLAGS
# include <gflags/gflags.h>
using namespace GFLAGS_NAMESPACE;
#endif
using namespace google;
TEST(utilities, InitGoogleLoggingDeathTest) {
ASSERT_DEATH(InitGoogleLogging("foobar"), "");
}
int main(int argc, char** argv) {
InitGoogleLogging(argv[0]);
InitGoogleTest(&argc, argv);
CHECK_EQ(RUN_ALL_TESTS(), 0);
} | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/utilities.cc | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/utilities_unittest.cc | de309c08c05382fee0792380de7df1bd65332da2 |
11fa5562-8f1d-4479-b049-91d37b07b020 | cpp | google/quiche | noop_header_validator | quiche/http2/adapter/noop_header_validator.cc | quiche/http2/adapter/noop_header_validator_test.cc | #include "quiche/http2/adapter/noop_header_validator.h"
#include <string>
#include "absl/strings/escaping.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
namespace adapter {
HeaderValidatorBase::HeaderStatus NoopHeaderValidator::ValidateSingleHeader(
absl::string_view key, absl::string_view value) {
if (key == ":status") {
status_ = std::string(value);
}
return HEADER_OK;
}
bool NoopHeaderValidator::FinishHeaderBlock(HeaderType ) {
return true;
}
}
} | #include "quiche/http2/adapter/noop_header_validator.h"
#include <limits>
#include <optional>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace adapter {
namespace test {
using ::testing::Optional;
using Header = std::pair<absl::string_view, absl::string_view>;
constexpr Header kSampleRequestPseudoheaders[] = {{":authority", "www.foo.com"},
{":method", "GET"},
{":path", "/foo"},
{":scheme", "https"}};
TEST(NoopHeaderValidatorTest, HeaderNameEmpty) {
NoopHeaderValidator v;
NoopHeaderValidator::HeaderStatus status =
v.ValidateSingleHeader("", "value");
EXPECT_EQ(NoopHeaderValidator::HEADER_OK, status);
}
TEST(NoopHeaderValidatorTest, HeaderValueEmpty) {
NoopHeaderValidator v;
NoopHeaderValidator::HeaderStatus status = v.ValidateSingleHeader("name", "");
EXPECT_EQ(NoopHeaderValidator::HEADER_OK, status);
}
TEST(NoopHeaderValidatorTest, ExceedsMaxSize) {
NoopHeaderValidator v;
v.SetMaxFieldSize(64u);
NoopHeaderValidator::HeaderStatus status =
v.ValidateSingleHeader("name", "value");
EXPECT_EQ(NoopHeaderValidator::HEADER_OK, status);
status = v.ValidateSingleHeader(
"name2",
"Antidisestablishmentariansism is supercalifragilisticexpialodocious.");
EXPECT_EQ(NoopHeaderValidator::HEADER_OK, status);
}
TEST(NoopHeaderValidatorTest, AnyNameCharIsValid) {
NoopHeaderValidator v;
char pseudo_name[] = ":met hod";
char name[] = "na me";
for (int i = std::numeric_limits<char>::min();
i < std::numeric_limits<char>::max(); ++i) {
char c = static_cast<char>(i);
pseudo_name[3] = c;
auto sv = absl::string_view(pseudo_name, 8);
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(sv, "value"));
name[2] = c;
sv = absl::string_view(name, 5);
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(sv, "value"));
}
}
TEST(NoopHeaderValidatorTest, AnyValueCharIsValid) {
NoopHeaderValidator v;
char value[] = "val ue";
for (int i = std::numeric_limits<char>::min();
i < std::numeric_limits<char>::max(); ++i) {
char c = static_cast<char>(i);
value[3] = c;
auto sv = absl::string_view(value, 6);
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("name", sv));
}
}
TEST(NoopHeaderValidatorTest, AnyStatusIsValid) {
NoopHeaderValidator v;
for (HeaderType type : {HeaderType::RESPONSE, HeaderType::RESPONSE_100}) {
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "bar"));
EXPECT_TRUE(v.FinishHeaderBlock(type));
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "10"));
EXPECT_TRUE(v.FinishHeaderBlock(type));
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "9000"));
EXPECT_TRUE(v.FinishHeaderBlock(type));
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "400"));
EXPECT_TRUE(v.FinishHeaderBlock(type));
}
}
TEST(NoopHeaderValidatorTest, AnyAuthorityCharIsValid) {
char value[] = "ho st.example.com";
for (int i = std::numeric_limits<char>::min();
i < std::numeric_limits<char>::max(); ++i) {
char c = static_cast<char>(i);
value[2] = c;
auto sv = absl::string_view(value, 17);
for (absl::string_view key : {":authority", "host"}) {
NoopHeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(key, sv));
}
}
}
TEST(NoopHeaderValidatorTest, RequestHostAndAuthority) {
NoopHeaderValidator v;
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("host", "www.foo.com"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("host", "www.bar.com"));
}
TEST(NoopHeaderValidatorTest, RequestPseudoHeaders) {
NoopHeaderValidator v;
for (Header to_skip : kSampleRequestPseudoheaders) {
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
if (to_add != to_skip) {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
}
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
}
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":extra", "blah"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
for (Header to_repeat : kSampleRequestPseudoheaders) {
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
if (to_add == to_repeat) {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
}
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
}
}
TEST(NoopHeaderValidatorTest, WebsocketPseudoHeaders) {
NoopHeaderValidator v;
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":protocol", "websocket"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
v.SetAllowExtendedConnect();
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":protocol", "websocket"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
if (to_add.first == ":method") {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, "CONNECT"));
} else {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
}
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":protocol", "websocket"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
}
TEST(NoopHeaderValidatorTest, AsteriskPathPseudoHeader) {
NoopHeaderValidator v;
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
if (to_add.first == ":path") {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, "*"));
} else {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
}
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
if (to_add.first == ":path") {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, "*"));
} else if (to_add.first == ":method") {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, "OPTIONS"));
} else {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
}
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
}
TEST(NoopHeaderValidatorTest, InvalidPathPseudoHeader) {
NoopHeaderValidator v;
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
if (to_add.first == ":path") {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, ""));
} else {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
}
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
v.StartHeaderBlock();
for (Header to_add : kSampleRequestPseudoheaders) {
if (to_add.first == ":path") {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, "shawarma"));
} else {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(to_add.first, to_add.second));
}
}
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::REQUEST));
}
TEST(NoopHeaderValidatorTest, ResponsePseudoHeaders) {
NoopHeaderValidator v;
for (HeaderType type : {HeaderType::RESPONSE, HeaderType::RESPONSE_100}) {
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("foo", "bar"));
EXPECT_TRUE(v.FinishHeaderBlock(type));
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "199"));
EXPECT_TRUE(v.FinishHeaderBlock(type));
EXPECT_EQ("199", v.status_header());
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "199"));
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "299"));
EXPECT_TRUE(v.FinishHeaderBlock(type));
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "199"));
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":extra", "blorp"));
EXPECT_TRUE(v.FinishHeaderBlock(type));
}
}
TEST(NoopHeaderValidatorTest, ResponseWithHost) {
NoopHeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "200"));
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("host", "myserver.com"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::RESPONSE));
}
TEST(NoopHeaderValidatorTest, Response204) {
NoopHeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "204"));
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("x-content", "is not present"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::RESPONSE));
}
TEST(NoopHeaderValidatorTest, ResponseWithMultipleIdenticalContentLength) {
NoopHeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "200"));
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "13"));
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "13"));
}
TEST(NoopHeaderValidatorTest, ResponseWithMultipleDifferingContentLength) {
NoopHeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "200"));
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "13"));
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "17"));
}
TEST(NoopHeaderValidatorTest, Response204WithContentLengthZero) {
NoopHeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "204"));
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("x-content", "is not present"));
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "0"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::RESPONSE));
}
TEST(NoopHeaderValidatorTest, Response204WithContentLength) {
NoopHeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "204"));
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("x-content", "is not present"));
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "1"));
}
TEST(NoopHeaderValidatorTest, Response100) {
NoopHeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "100"));
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("x-content", "is not present"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::RESPONSE));
}
TEST(NoopHeaderValidatorTest, Response100WithContentLengthZero) {
NoopHeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "100"));
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("x-content", "is not present"));
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "0"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::RESPONSE));
}
TEST(NoopHeaderValidatorTest, Response100WithContentLength) {
NoopHeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "100"));
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("x-content", "is not present"));
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "1"));
}
TEST(NoopHeaderValidatorTest, ResponseTrailerPseudoHeaders) {
NoopHeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("foo", "bar"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::RESPONSE_TRAILER));
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(":status", "200"));
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("foo", "bar"));
EXPECT_TRUE(v.FinishHeaderBlock(HeaderType::RESPONSE_TRAILER));
}
TEST(NoopHeaderValidatorTest, ValidContentLength) {
NoopHeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(v.content_length(), std::nullopt);
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "41"));
EXPECT_EQ(v.content_length(), std::nullopt);
v.StartHeaderBlock();
EXPECT_EQ(v.content_length(), std::nullopt);
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "42"));
EXPECT_EQ(v.content_length(), std::nullopt);
}
TEST(NoopHeaderValidatorTest, InvalidContentLength) {
NoopHeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(v.content_length(), std::nullopt);
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", ""));
EXPECT_EQ(v.content_length(), std::nullopt);
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "nan"));
EXPECT_EQ(v.content_length(), std::nullopt);
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "-42"));
EXPECT_EQ(v.content_length(), std::nullopt);
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("content-length", "42"));
EXPECT_EQ(v.content_length(), std::nullopt);
}
TEST(NoopHeaderValidatorTest, TeHeader) {
NoopHeaderValidator v;
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("te", "trailers"));
v.StartHeaderBlock();
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader("te", "trailers, deflate"));
}
TEST(NoopHeaderValidatorTest, ConnectionSpecificHeaders) {
const std::vector<Header> connection_headers = {
{"connection", "keep-alive"}, {"proxy-connection", "keep-alive"},
{"keep-alive", "timeout=42"}, {"transfer-encoding", "chunked"},
{"upgrade", "h2c"},
};
for (const auto& [connection_key, connection_value] : connection_headers) {
NoopHeaderValidator v;
v.StartHeaderBlock();
for (const auto& [sample_key, sample_value] : kSampleRequestPseudoheaders) {
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(sample_key, sample_value));
}
EXPECT_EQ(NoopHeaderValidator::HEADER_OK,
v.ValidateSingleHeader(connection_key, connection_value));
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/noop_header_validator.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/noop_header_validator_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
cfec981d-552c-4da4-9a2f-8bcbd2723fba | cpp | tensorflow/tensorflow | reshape_mover | third_party/xla/xla/service/reshape_mover.cc | third_party/xla/xla/service/reshape_mover_test.cc | #include "xla/service/reshape_mover.h"
#include <algorithm>
#include <memory>
#include <vector>
#include "absl/algorithm/container.h"
#include "xla/permutation_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
bool IsRearrange(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kReshape ||
instruction->opcode() == HloOpcode::kTranspose;
}
bool AreEquivalentRearranges(const HloInstruction* a, const HloInstruction* b) {
if (a->opcode() != b->opcode() ||
!ShapeUtil::SameDimensions(a->shape(), b->shape())) {
return false;
}
switch (a->opcode()) {
case HloOpcode::kTranspose:
return a->dimensions() == b->dimensions();
case HloOpcode::kReshape:
return ShapeUtil::SameDimensions(a->operand(0)->shape(),
b->operand(0)->shape());
default:
return false;
}
}
absl::InlinedVector<int64_t, 4> TransposedBcastDims(
absl::Span<const int64_t> bcast_dims,
absl::Span<const int64_t> transpose_dims) {
auto inv_perm = InversePermutation(transpose_dims);
absl::InlinedVector<int64_t, 4> new_bcast_dims;
for (int64_t dim : bcast_dims) {
new_bcast_dims.push_back(inv_perm[dim]);
}
return new_bcast_dims;
}
}
bool ReshapeMover::CanTriviallyRearrange(const HloInstruction* instr,
const HloInstruction* rearrange) {
CHECK(IsRearrange(rearrange)) << rearrange->ToString();
if (rearrange->opcode() == HloOpcode::kReshape &&
ShapeUtil::Equal(rearrange->shape(), rearrange->operand(0)->shape())) {
return true;
}
if (rearrange->opcode() == HloOpcode::kTranspose &&
IsIdentityPermutation(rearrange->dimensions())) {
return true;
}
if (instr->opcode() == HloOpcode::kConstant) {
return true;
}
if (instr->opcode() == HloOpcode::kRng && instr->user_count() == 1) {
return true;
}
if (instr->opcode() == HloOpcode::kBroadcast) {
if (!absl::c_is_sorted(instr->dimensions())) {
return false;
}
if (rearrange->opcode() == HloOpcode::kReshape) {
return ShapeUtil::IsScalar(instr->operand(0)->shape()) ||
(options_.reshape_of_1d_broadcast_is_cheap &&
ShapeUtil::TrueRank(instr->operand(0)->shape()) <= 1) ||
(options_.reshape_of_1d_broadcast_is_cheap &&
ShapeUtil::ReshapeLeavesDimensionsUnmodified(
rearrange->shape(),
rearrange->operand(0)->shape(),
instr->dimensions())
.has_value());
}
if (rearrange->opcode() == HloOpcode::kTranspose) {
return absl::c_is_sorted(TransposedBcastDims(
instr->dimensions(), InversePermutation(rearrange->dimensions())));
}
}
return false;
}
const HloInstruction* ReshapeMover::FirstNontrivialRearrange(
absl::Span<const HloInstruction* const> instrs) {
auto rearrange_it = absl::c_find_if(instrs, [&](const HloInstruction* instr) {
return IsRearrange(instr) &&
!CanTriviallyRearrange(instr->operand(0), instr);
});
if (rearrange_it == instrs.end()) {
return nullptr;
}
return *rearrange_it;
}
bool ReshapeMover::IsReshapeMoveCandidate(HloInstruction* instruction) {
auto print_no_metadata = HloPrintOptions().set_print_metadata(false);
VLOG(5) << "** Checking instruction: "
<< instruction->ToString(print_no_metadata);
if (!instruction->IsElementwise()) {
return false;
}
const HloInstruction* rearrange =
FirstNontrivialRearrange(instruction->operands());
if (rearrange == nullptr) {
return false;
}
return absl::c_all_of(
instruction->operands(), [&](const HloInstruction* operand) {
return (IsRearrange(operand) &&
AreEquivalentRearranges(operand, rearrange)) ||
(!IsRearrange(operand) &&
CanTriviallyRearrange(operand, rearrange));
});
}
absl::StatusOr<HloInstruction*> ReshapeMover::ApplyInverseRearrange(
const HloInstruction* rearrange, HloInstruction* operand) {
switch (rearrange->opcode()) {
case HloOpcode::kReshape: {
Shape new_shape = ShapeUtil::ChangeElementType(
rearrange->operand(0)->shape(), operand->shape().element_type());
if (operand->shape() != new_shape) {
return MakeReshapeHlo(new_shape, operand);
} else {
return operand;
}
}
case HloOpcode::kTranspose: {
if (!IsIdentityPermutation(rearrange->dimensions())) {
return MakeTransposeHlo(operand,
InversePermutation(rearrange->dimensions()));
} else {
return operand;
}
}
default:
LOG(FATAL) << "Invalid rearrange op: " << rearrange->ToString();
}
}
absl::StatusOr<bool> ReshapeMover::SinkRearrangeOperands(
HloInstruction* instruction) {
auto print_no_metadata = HloPrintOptions().set_print_metadata(false);
HloComputation* computation = instruction->parent();
const HloInstruction* rearrange =
FirstNontrivialRearrange(instruction->operands());
CHECK(rearrange != nullptr);
const Shape& new_operand_shape = rearrange->operand(0)->shape();
VLOG(3) << "** Sinking reshape or transpose: "
<< instruction->ToString(print_no_metadata)
<< "\n\tfirst rearrange operand: "
<< rearrange->ToString(print_no_metadata)
<< "\n\tnew operand shape: "
<< ShapeUtil::HumanString(new_operand_shape);
auto operands = instruction->operands();
for (size_t i = 0; i < operands.size(); ++i) {
VLOG(3) << "Updating operand #" << i << ": "
<< operands[i]->ToString(print_no_metadata);
TF_ASSIGN_OR_RETURN(operands[i],
ApplyInverseRearrange(rearrange, operands[i]));
VLOG(3) << "Updated operand #" << i
<< " to: " << operands[i]->ToString(print_no_metadata);
}
HloInstruction* new_elementwise =
computation->AddInstruction(instruction->CloneWithNewOperands(
ShapeUtil::ChangeElementType(new_operand_shape,
instruction->shape().element_type()),
operands));
std::unique_ptr<HloInstruction> new_rearrange;
switch (rearrange->opcode()) {
case HloOpcode::kReshape:
VLOG(3) << "Creating new reshape for new elementwise op: "
<< new_elementwise->ToString(print_no_metadata);
new_rearrange =
HloInstruction::CreateReshape(instruction->shape(), new_elementwise);
break;
case HloOpcode::kTranspose:
new_rearrange = HloInstruction::CreateTranspose(
instruction->shape(), new_elementwise, rearrange->dimensions());
break;
default:
LOG(FATAL) << "Bad opcode";
}
if (instruction->has_sharding()) {
new_elementwise->clear_sharding();
}
TF_RETURN_IF_ERROR(computation->ReplaceWithNewInstruction(
instruction, std::move(new_rearrange)));
return true;
}
absl::StatusOr<bool> ReshapeMover::TryReshapeMoveOnCandidates(
HloInstructionSet* candidates) {
bool removed = true;
while (!candidates->empty() && removed) {
if (VLOG_IS_ON(5)) {
for (const HloInstruction* instruction : *candidates) {
VLOG(5) << "candidate " << instruction->ToString();
}
}
ConstHloInstructionSet rearrange_operands;
for (const HloInstruction* instruction : *candidates) {
for (const auto* operand : instruction->operands()) {
if (IsRearrange(operand)) {
rearrange_operands.insert(operand);
}
}
}
removed = false;
for (auto operand : rearrange_operands) {
if (absl::c_any_of(operand->users(), [&](HloInstruction* user) {
return !candidates->count(user);
})) {
for (auto* user : operand->users()) {
removed |= candidates->erase(user) > 0;
}
}
}
}
if (candidates->empty()) {
return false;
}
for (HloInstruction* instruction : *candidates) {
if (!ConsumeFuel("reshape-mover", [&] {
return absl::StrCat("instruction: ", instruction->ToString(),
"\nFull module:\n",
instruction->GetModule()->ToString());
})) {
break;
}
TF_ASSIGN_OR_RETURN(bool did_change, SinkRearrangeOperands(instruction));
CHECK(did_change);
}
return true;
}
absl::StatusOr<bool> ReshapeMover::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
HloInstructionSet candidates;
for (HloInstruction* instruction : comp->instructions()) {
if (IsReshapeMoveCandidate(instruction)) {
candidates.insert(instruction);
}
}
TF_ASSIGN_OR_RETURN(bool did_change,
TryReshapeMoveOnCandidates(&candidates));
changed |= did_change;
}
return changed;
}
} | #include "xla/service/reshape_mover.h"
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_fix.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace m = xla::match;
class ReshapeMoverTest : public HloTestBase {
protected:
absl::Status RunPass(HloModule* module, bool change_expected,
ReshapeMoverOptions options = ReshapeMoverOptions{}) {
TF_ASSIGN_OR_RETURN(bool changed,
RunHloPass(ReshapeMover(options), module));
SCOPED_TRACE(module->ToString());
EXPECT_EQ(changed, change_expected);
TF_EXPECT_OK(RunHloPass(HloVerifier(HloVerifierOpts()), module).status());
TF_EXPECT_OK(RunHloPass(HloPassFix<AlgebraicSimplifier>(
AlgebraicSimplifierOptions()),
module)
.status());
return absl::OkStatus();
}
};
TEST_F(ReshapeMoverTest, ReshapesWithDifferentInputShapesNotMoved) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
reshape0 = f32[8,7] reshape(f32[1,8,1,7] parameter(0))
reshape1 = f32[8,7] reshape(f32[1,8,7,1] parameter(1))
ROOT add = add(reshape0, reshape1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, OneConstantAndOneReshapesOnRngNotMoved) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
rng = f32[1,8,1,7,1] rng(f32[] constant(0), f32[] constant(1)), distribution=rng_uniform
ROOT add = add(f32[8,7] reshape(rng), f32[8,7] constant({...}))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, EquivalentReshapesMoved) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
reshape0 = f32[8,7] reshape(f32[1,8,1,7] parameter(0))
reshape1 = f32[8,7] reshape(f32[1,8,1,7] parameter(1))
ROOT add = f32[8,7] add(reshape0, reshape1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(m::Add(m::Parameter(0), m::Parameter(1)))));
}
TEST_F(ReshapeMoverTest, SinkReshapeBelowSelect) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
ROOT select = f32[2,3] select(
pred[2,3] reshape(pred[6] parameter(0)),
f32[2,3] reshape(f32[6] parameter(1)),
f32[2,3] reshape(f32[6] parameter(2)))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(m::Select(m::Parameter(0), m::Parameter(1),
m::Parameter(2)))));
}
TEST_F(ReshapeMoverTest, SinkReshapeBelowSelectWithConstant) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
ROOT select = f32[2,3] select(
pred[2,3] reshape(pred[6] parameter(0)),
f32[2,3] reshape(f32[6] parameter(1)),
f32[2,3] constant({...}))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(m::Select(m::Parameter(0), m::Parameter(1),
m::Reshape(m::Constant())))));
}
TEST_F(ReshapeMoverTest, OneParameterAndOneReshapeNotMoved) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
reshape0 = f32[8,7] reshape(f32[1,8,1,7] parameter(0))
ROOT add = add(reshape0, f32[8,7] parameter(1))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, DontSinkReshapesOfConstants) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
ROOT select = select(
pred[3,2] parameter(0),
f32[3,2] reshape(f32[2,3] constant({...})),
f32[3,2] reshape(f32[2,3] constant({...})))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, OneNontrivialReshapeMoved) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
ROOT add = add(
f32[3,2] reshape(f32[2,3] parameter(0)),
f32[3,2] constant({...}))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(
m::Add(m::Parameter(0), m::Reshape(m::Constant())))));
}
TEST_F(ReshapeMoverTest, MultipleReshapes) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
add0 = f32[8,7,1] add(
f32[8,7,1] reshape(f32[1,8,1,7] parameter(0)),
f32[8,7,1] reshape(f32[1,8,1,7] parameter(1)))
ROOT add1 = f32[8,7] add(
f32[8,7] reshape(add0),
f32[8,7] reshape(f32[8,7,1] parameter(2)))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(
m::Add(m::Reshape(m::Add(m::Parameter(0), m::Parameter(1))),
m::Parameter(2)))));
}
TEST_F(ReshapeMoverTest, SinkTransposeAcrossBroadcastScalar) {
const std::string hlo_string = R"(
HloModule TransposeMulInversedTransposeModule
ENTRY TransposeMulInversedTranspose {
src0 = f32[20,8]{1,0} parameter(0)
transpose0 = f32[8,20]{1,0} transpose(src0), dimensions={1,0}
src1 = f32[] parameter(1)
broadcast0 = f32[8,20]{1,0} broadcast(src1), dimensions={}
ROOT multiply0 = f32[8,20]{1,0} multiply(transpose0, broadcast0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Transpose(m::Multiply(
m::Parameter(0), m::Broadcast(m::Parameter(1))))));
}
TEST_F(ReshapeMoverTest, ReshapeWithUsersOutsideCandidatesNotSink) {
const std::string hlo_string = R"(
HloModule ReshapeWithUsersOutsideCandidates
ENTRY ReshapeWithMultipleUsers {
param0 = f32[20,8]{1,0} parameter(0)
reshape0 = f32[8,20]{1,0} reshape(param0)
param1 = f32[] parameter(1)
broadcast0 = f32[8,20]{1,0} broadcast(param1), dimensions={}
param2 = f32[20,8]{1,0} parameter(2)
reshape1 = f32[8,20]{1,0} reshape(param2)
param3 = f32[20,8]{1,0} parameter(3)
reshape2 = f32[8,20]{1,0} reshape(param3)
param4 = f32[8,20]{1,0} parameter(4)
add0 = f32[8,20]{1,0} add(reshape0, broadcast0)
add1 = f32[8,20]{1,0} add(reshape0, reshape1)
add2 = f32[8,20]{1,0} add(reshape1, param4)
ROOT tuple = (f32[8,20]{1,0},f32[8,20]{1,0},
f32[8,20]{1,0}) tuple(add0, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, ReshapeNoUsersOutsideCandidatesSink1) {
const std::string hlo_string = R"(
HloModule ReshapeNoUsersOutsideCandidates1
ENTRY ReshapeWithMultipleUsers1 {
param0 = f32[20,8]{1,0} parameter(0)
reshape0 = f32[8,20]{1,0} reshape(param0)
param1 = f32[] parameter(1)
broadcast0 = f32[8,20]{1,0} broadcast(param1), dimensions={}
param2 = f32[20,8]{1,0} parameter(2)
reshape1 = f32[8,20]{1,0} reshape(param2)
param3 = f32[20,8]{1,0} parameter(3)
reshape2 = f32[8,20]{1,0} reshape(param3)
add0 = f32[8,20]{1,0} add(reshape0, broadcast0)
add1 = f32[8,20]{1,0} add(reshape0, reshape1)
add2 = f32[8,20]{1,0} add(reshape1, reshape2)
ROOT tuple = (f32[8,20]{1,0},f32[8,20]{1,0},
f32[8,20]{1,0}) tuple(add0, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Reshape(m::Add(m::Parameter(0), m::Broadcast(m::Parameter(1)))),
m::Reshape(m::Add(m::Parameter(0), m::Parameter(2))),
m::Reshape(m::Add(m::Parameter(2), m::Parameter(3))))));
}
TEST_F(ReshapeMoverTest, ReshapeNoUsersOutsideCandidatesSink2) {
const std::string hlo_string = R"(
HloModule ReshapeNoUsersOutsideCandidates2
ENTRY ReshapeWithMultipleUsers2 {
param0 = f32[20,8]{1,0} parameter(0)
reshape0 = f32[8,20]{1,0} reshape(param0)
ROOT add0 = f32[8,20]{1,0} add(reshape0, reshape0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(m::Add())));
}
TEST_F(ReshapeMoverTest, ReshapeOfRank1BroadcastIsNotTrivial) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
a = f32[2,3] broadcast(f32[2] parameter(0)), dimensions={0}
b = f32[2,3] reshape(f32[6] parameter(1))
ROOT add0 = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, ReshapeOfRank1BroadcastIsTrivial) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
a = f32[2,3] broadcast(f32[2] parameter(0)), dimensions={0}
b = f32[2,3] reshape(f32[6] parameter(1))
ROOT add0 = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
ReshapeMoverOptions options;
options.reshape_of_1d_broadcast_is_cheap = true;
TF_ASSERT_OK(RunPass(m.get(), true, options));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(
m::Add(m::Reshape(m::Broadcast(m::Parameter(0))), m::Parameter(1)))));
}
TEST_F(ReshapeMoverTest, ReshapeOfRank2BroadcastIsAllowed) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
a = f32[2,3,35] broadcast(f32[2,3] parameter(0)), dimensions={0,1}
b = f32[2,3,35] reshape(f32[2,3,5,7] parameter(1))
ROOT add0 = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
ReshapeMoverOptions options;
options.reshape_of_1d_broadcast_is_cheap = true;
TF_ASSERT_OK(RunPass(m.get(), true, options));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(
m::Add(m::Broadcast(m::Parameter(0)), m::Parameter(1)))));
}
TEST_F(ReshapeMoverTest, SinkDisallowedIfReshapeChangesBroadcastDims) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
a = f32[2,3,35] broadcast(f32[2,3] parameter(0)), dimensions={0,1}
b = f32[2,3,35] reshape(f32[6,5,7] parameter(1))
ROOT add0 = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, TransposeOfBroadcastIsAllowed) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
a = f32[2,3] broadcast(f32[2] parameter(0)), dimensions={0}
b = f32[2,3] transpose(f32[3,2] parameter(1)), dimensions={1,0}
ROOT add0 = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Transpose(
m::Add(m::Broadcast(m::Parameter(0)), m::Parameter(1)))));
}
TEST_F(ReshapeMoverTest, TransposeReordersBroadcastDims) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
a = f32[2,3,5] broadcast(f32[2,3] parameter(0)), dimensions={0,1}
b = f32[2,3,5] transpose(f32[3,2,5] parameter(1)), dimensions={1,0,2}
ROOT add0 = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, ShardingConsistencyPreservation) {
const std::string hlo_string = R"(
HloModule module
ENTRY entry {
copy.2424 = bf16[3,16,128]{2,1,0} parameter(0), sharding={replicated}
dot.987 = bf16[3,16,128,4096]{3,2,1,0} parameter(1), sharding={devices=[1,8,1,1]0,1,2,3,4,5,6,7}
reshape.5843 = bf16[3,16,128,1,4096]{4,3,2,1,0} reshape(dot.987), sharding={devices=[1,8,1,1,1]0,1,2,3,4,5,6,7}
transpose.21172 = bf16[3,1,4096,16,128]{2,1,4,3,0} transpose(reshape.5843), dimensions={0,3,4,1,2}, sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7}
reshape.291 = bf16[3,16,128]{2,1,0} reshape(copy.2424), sharding={devices=[1,8,1]0,1,2,3,4,5,6,7}
broadcast.21176 = bf16[3,1,4096,16,128]{4,3,2,1,0} broadcast(reshape.291), dimensions={0,3,4}, sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7}
multiply.21177 = bf16[3,1,4096,16,128]{2,1,4,3,0} multiply(transpose.21172, broadcast.21176), sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7}
ROOT slice.21180 = bf16[1,1,4096,16,128]{4,3,2,1,0} slice(multiply.21177), slice={[1:2], [0:1], [0:4096], [0:16], [0:128]}, sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
auto elementwise_op = FindInstruction(m.get(), HloOpcode::kMultiply);
EXPECT_FALSE(elementwise_op->has_sharding());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reshape_mover.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reshape_mover_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e8bdefdc-6e3e-4dc4-b2fa-2d86de8dd804 | cpp | tensorflow/tensorflow | run_handler_util | tensorflow/core/tfrt/run_handler_thread_pool/run_handler_util.cc | tensorflow/core/tfrt/run_handler_thread_pool/run_handler_util_test.cc | #include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler_util.h"
#include <cmath>
#include <cstdlib>
#include <string>
#include <vector>
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/str_util.h"
namespace tfrt {
namespace tf {
double ParamFromEnvWithDefault(const char* var_name, double default_value) {
const char* val = std::getenv(var_name);
double num;
return (val && tensorflow::strings::safe_strtod(val, &num)) ? num
: default_value;
}
std::vector<double> ParamFromEnvWithDefault(const char* var_name,
std::vector<double> default_value) {
const char* val = std::getenv(var_name);
if (!val) {
return default_value;
}
std::vector<std::string> splits = tensorflow::str_util::Split(val, ",");
std::vector<double> result;
result.reserve(splits.size());
for (auto& split : splits) {
double num;
if (tensorflow::strings::safe_strtod(split, &num)) {
result.push_back(num);
} else {
LOG(ERROR) << "Wrong format for " << var_name << ". Use default value.";
return default_value;
}
}
return result;
}
std::vector<int> ParamFromEnvWithDefault(const char* var_name,
std::vector<int> default_value) {
const char* val = std::getenv(var_name);
if (!val) {
return default_value;
}
std::vector<std::string> splits = tensorflow::str_util::Split(val, ",");
std::vector<int> result;
result.reserve(splits.size());
for (auto& split : splits) {
int num;
if (tensorflow::strings::safe_strto32(split, &num)) {
result.push_back(num);
} else {
LOG(ERROR) << "Wrong format for " << var_name << ". Use default value.";
return default_value;
}
}
return result;
}
bool ParamFromEnvBoolWithDefault(const char* var_name, bool default_value) {
const char* val = std::getenv(var_name);
return (val) ? absl::AsciiStrToLower(val) == "true" : default_value;
}
}
} | #include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler_util.h"
#include <vector>
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tfrt {
namespace tf {
namespace {
TEST(RunHandlerUtilTest, TestParamFromEnvWithDefault) {
std::vector<double> result = ParamFromEnvWithDefault(
"RUN_HANDLER_TEST_ENV", std::vector<double>{0, 0, 0});
EXPECT_EQ(result.size(), 3);
EXPECT_EQ(result[0], 0);
EXPECT_EQ(result[1], 0);
EXPECT_EQ(result[2], 0);
std::vector<int> result2 = ParamFromEnvWithDefault("RUN_HANDLER_TEST_ENV",
std::vector<int>{0, 0, 0});
EXPECT_EQ(result2.size(), 3);
EXPECT_EQ(result2[0], 0);
EXPECT_EQ(result2[1], 0);
EXPECT_EQ(result2[2], 0);
bool result3 =
ParamFromEnvBoolWithDefault("RUN_HANDLER_TEST_ENV_BOOL", false);
EXPECT_EQ(result3, false);
EXPECT_EQ(setenv("RUN_HANDLER_TEST_ENV", "1,2,3", true), 0);
result = ParamFromEnvWithDefault("RUN_HANDLER_TEST_ENV",
std::vector<double>{0, 0, 0});
EXPECT_EQ(result.size(), 3);
EXPECT_EQ(result[0], 1);
EXPECT_EQ(result[1], 2);
EXPECT_EQ(result[2], 3);
result2 = ParamFromEnvWithDefault("RUN_HANDLER_TEST_ENV",
std::vector<int>{0, 0, 0});
EXPECT_EQ(result.size(), 3);
EXPECT_EQ(result2[0], 1);
EXPECT_EQ(result2[1], 2);
EXPECT_EQ(result2[2], 3);
EXPECT_EQ(setenv("RUN_HANDLER_TEST_ENV_BOOL", "true", true), 0);
result3 = ParamFromEnvBoolWithDefault("RUN_HANDLER_TEST_ENV_BOOL", false);
EXPECT_EQ(result3, true);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/run_handler_thread_pool/run_handler_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/run_handler_thread_pool/run_handler_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a7fcf972-41f2-48c3-b0a6-e320192e62bb | cpp | tensorflow/tensorflow | dispatch | tensorflow/lite/experimental/shlo/legacy/src/dispatch.h | tensorflow/lite/experimental/shlo/dispatch_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_LEGACY_SRC_DISPATCH_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_LEGACY_SRC_DISPATCH_H_
namespace stablehlo {
#define DISPATCH_INT(name, element_type, ...) \
{ \
switch (element_type) { \
case ElementType::kSI8: \
return name<ElementType::kSI8, ElementType::kSI8>(__VA_ARGS__); \
case ElementType::kSI16: \
return name<ElementType::kSI16, ElementType::kSI16>(__VA_ARGS__); \
case ElementType::kSI32: \
return name<ElementType::kSI32, ElementType::kSI32>(__VA_ARGS__); \
default: \
return absl::InvalidArgumentError("Unsupported element type"); \
} \
}
#define DISPATCH_FLOAT(name, element_type, ...) \
{ \
switch (element_type) { \
case ElementType::kBF16: \
return name<ElementType::kBF16, ElementType::kBF16>(__VA_ARGS__); \
case ElementType::kF16: \
return name<ElementType::kF16, ElementType::kF16>(__VA_ARGS__); \
case ElementType::kF32: \
return name<ElementType::kF32, ElementType::kF32>(__VA_ARGS__); \
default: \
return absl::InvalidArgumentError("Unsupported element type"); \
} \
}
#define DISPATCH_INT_FLOAT(name, element_type, ...) \
{ \
switch (element_type) { \
case ElementType::kSI8: \
return name<ElementType::kSI8, ElementType::kSI8>(__VA_ARGS__); \
case ElementType::kSI16: \
return name<ElementType::kSI16, ElementType::kSI16>(__VA_ARGS__); \
case ElementType::kSI32: \
return name<ElementType::kSI32, ElementType::kSI32>(__VA_ARGS__); \
case ElementType::kBF16: \
return name<ElementType::kBF16, ElementType::kBF16>(__VA_ARGS__); \
case ElementType::kF16: \
return name<ElementType::kF16, ElementType::kF16>(__VA_ARGS__); \
case ElementType::kF32: \
return name<ElementType::kF32, ElementType::kF32>(__VA_ARGS__); \
default: \
return absl::InvalidArgumentError("Unsupported element type"); \
} \
}
#define DISPATCH_BOOL_INT_FLOAT(name, element_type, ...) \
{ \
switch (element_type) { \
case ElementType::kI1: \
return name<ElementType::kI1, ElementType::kI1>(__VA_ARGS__); \
case ElementType::kSI8: \
return name<ElementType::kSI8, ElementType::kSI8>(__VA_ARGS__); \
case ElementType::kSI16: \
return name<ElementType::kSI16, ElementType::kSI16>(__VA_ARGS__); \
case ElementType::kSI32: \
return name<ElementType::kSI32, ElementType::kSI32>(__VA_ARGS__); \
case ElementType::kBF16: \
return name<ElementType::kBF16, ElementType::kBF16>(__VA_ARGS__); \
case ElementType::kF16: \
return name<ElementType::kF16, ElementType::kF16>(__VA_ARGS__); \
case ElementType::kF32: \
return name<ElementType::kF32, ElementType::kF32>(__VA_ARGS__); \
default: \
return absl::InvalidArgumentError("Unsupported element type"); \
} \
}
#define DISPATCH_QUANTIZED(name, storage_type, expressed_type, ...) \
{ \
switch (storage_type) { \
case ElementType::kSI8: \
switch (expressed_type) { \
case ElementType::kBF16: \
return name<ElementType::kSI8, ElementType::kBF16>(__VA_ARGS__); \
case ElementType::kF16: \
return name<ElementType::kSI8, ElementType::kF16>(__VA_ARGS__); \
case ElementType::kF32: \
return name<ElementType::kSI8, ElementType::kF32>(__VA_ARGS__); \
default: \
return absl::InvalidArgumentError("Unsupported expressed type"); \
} \
break; \
case ElementType::kSI16: \
switch (expressed_type) { \
case ElementType::kBF16: \
return name<ElementType::kSI16, ElementType::kBF16>(__VA_ARGS__); \
case ElementType::kF16: \
return name<ElementType::kSI16, ElementType::kF16>(__VA_ARGS__); \
case ElementType::kF32: \
return name<ElementType::kSI16, ElementType::kF32>(__VA_ARGS__); \
default: \
return absl::InvalidArgumentError("Unsupported expressed type"); \
} \
break; \
case ElementType::kSI32: \
switch (expressed_type) { \
case ElementType::kBF16: \
return name<ElementType::kSI32, ElementType::kBF16>(__VA_ARGS__); \
case ElementType::kF16: \
return name<ElementType::kSI32, ElementType::kF16>(__VA_ARGS__); \
case ElementType::kF32: \
return name<ElementType::kSI32, ElementType::kF32>(__VA_ARGS__); \
default: \
return absl::InvalidArgumentError("Unsupported expressed type"); \
} \
break; \
default: \
return absl::InvalidArgumentError("Unsupported storage type"); \
} \
}
}
#endif | #include "tensorflow/lite/experimental/shlo/dispatch.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
namespace {
void VoidFunction() {}
TEST(DispatchTest, ReturnAbslOkIfVoidCompiles) {
auto f = []() -> absl::Status { RETURN_OK_STATUS_IF_VOID(VoidFunction()); };
EXPECT_OK(f());
}
TEST(DispatchTest, AbslOkStatusCompiles) {
auto f = []() -> absl::Status { RETURN_OK_STATUS_IF_VOID(absl::OkStatus()); };
EXPECT_OK(f());
}
TEST(DispatchTest, AbslErrorCompiles) {
auto f = []() -> absl::Status {
RETURN_OK_STATUS_IF_VOID(absl::UnknownError("error message"));
};
EXPECT_EQ(f(), absl::UnknownError("error message"));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/dispatch.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/dispatch_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e94359c1-9f65-4511-82da-655ef1410361 | cpp | abseil/abseil-cpp | utf8_for_code_point | absl/debugging/internal/utf8_for_code_point.cc | absl/debugging/internal/utf8_for_code_point_test.cc | #include "absl/debugging/internal/utf8_for_code_point.h"
#include <cstdint>
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
namespace {
constexpr uint32_t kMinSurrogate = 0xd800, kMaxSurrogate = 0xdfff;
constexpr uint32_t kMax1ByteCodePoint = 0x7f;
constexpr uint32_t kMax2ByteCodePoint = 0x7ff;
constexpr uint32_t kMax3ByteCodePoint = 0xffff;
constexpr uint32_t kMaxCodePoint = 0x10ffff;
}
Utf8ForCodePoint::Utf8ForCodePoint(uint64_t code_point) {
if (code_point <= kMax1ByteCodePoint) {
length = 1;
bytes[0] = static_cast<char>(code_point);
return;
}
if (code_point <= kMax2ByteCodePoint) {
length = 2;
bytes[0] = static_cast<char>(0xc0 | (code_point >> 6));
bytes[1] = static_cast<char>(0x80 | (code_point & 0x3f));
return;
}
if (kMinSurrogate <= code_point && code_point <= kMaxSurrogate) return;
if (code_point <= kMax3ByteCodePoint) {
length = 3;
bytes[0] = static_cast<char>(0xe0 | (code_point >> 12));
bytes[1] = static_cast<char>(0x80 | ((code_point >> 6) & 0x3f));
bytes[2] = static_cast<char>(0x80 | (code_point & 0x3f));
return;
}
if (code_point > kMaxCodePoint) return;
length = 4;
bytes[0] = static_cast<char>(0xf0 | (code_point >> 18));
bytes[1] = static_cast<char>(0x80 | ((code_point >> 12) & 0x3f));
bytes[2] = static_cast<char>(0x80 | ((code_point >> 6) & 0x3f));
bytes[3] = static_cast<char>(0x80 | (code_point & 0x3f));
}
}
ABSL_NAMESPACE_END
} | #include "absl/debugging/internal/utf8_for_code_point.h"
#include <cstdint>
#include "gtest/gtest.h"
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
namespace {
TEST(Utf8ForCodePointTest, RecognizesTheSmallestCodePoint) {
Utf8ForCodePoint utf8(uint64_t{0});
ASSERT_EQ(utf8.length, 1);
EXPECT_EQ(utf8.bytes[0], '\0');
}
TEST(Utf8ForCodePointTest, RecognizesAsciiSmallA) {
Utf8ForCodePoint utf8(uint64_t{'a'});
ASSERT_EQ(utf8.length, 1);
EXPECT_EQ(utf8.bytes[0], 'a');
}
TEST(Utf8ForCodePointTest, RecognizesTheLargestOneByteCodePoint) {
Utf8ForCodePoint utf8(uint64_t{0x7f});
ASSERT_EQ(utf8.length, 1);
EXPECT_EQ(utf8.bytes[0], '\x7f');
}
TEST(Utf8ForCodePointTest, RecognizesTheSmallestTwoByteCodePoint) {
Utf8ForCodePoint utf8(uint64_t{0x80});
ASSERT_EQ(utf8.length, 2);
EXPECT_EQ(utf8.bytes[0], static_cast<char>(0xc2));
EXPECT_EQ(utf8.bytes[1], static_cast<char>(0x80));
}
TEST(Utf8ForCodePointTest, RecognizesSmallNWithTilde) {
Utf8ForCodePoint utf8(uint64_t{0xf1});
ASSERT_EQ(utf8.length, 2);
const char* want = "ñ";
EXPECT_EQ(utf8.bytes[0], want[0]);
EXPECT_EQ(utf8.bytes[1], want[1]);
}
TEST(Utf8ForCodePointTest, RecognizesCapitalPi) {
Utf8ForCodePoint utf8(uint64_t{0x3a0});
ASSERT_EQ(utf8.length, 2);
const char* want = "Π";
EXPECT_EQ(utf8.bytes[0], want[0]);
EXPECT_EQ(utf8.bytes[1], want[1]);
}
TEST(Utf8ForCodePointTest, RecognizesTheLargestTwoByteCodePoint) {
Utf8ForCodePoint utf8(uint64_t{0x7ff});
ASSERT_EQ(utf8.length, 2);
EXPECT_EQ(utf8.bytes[0], static_cast<char>(0xdf));
EXPECT_EQ(utf8.bytes[1], static_cast<char>(0xbf));
}
TEST(Utf8ForCodePointTest, RecognizesTheSmallestThreeByteCodePoint) {
Utf8ForCodePoint utf8(uint64_t{0x800});
ASSERT_EQ(utf8.length, 3);
EXPECT_EQ(utf8.bytes[0], static_cast<char>(0xe0));
EXPECT_EQ(utf8.bytes[1], static_cast<char>(0xa0));
EXPECT_EQ(utf8.bytes[2], static_cast<char>(0x80));
}
TEST(Utf8ForCodePointTest, RecognizesTheChineseCharacterZhong1AsInZhong1Wen2) {
Utf8ForCodePoint utf8(uint64_t{0x4e2d});
ASSERT_EQ(utf8.length, 3);
const char* want = "中";
EXPECT_EQ(utf8.bytes[0], want[0]);
EXPECT_EQ(utf8.bytes[1], want[1]);
EXPECT_EQ(utf8.bytes[2], want[2]);
}
TEST(Utf8ForCodePointTest, RecognizesOneBeforeTheSmallestSurrogate) {
Utf8ForCodePoint utf8(uint64_t{0xd7ff});
ASSERT_EQ(utf8.length, 3);
EXPECT_EQ(utf8.bytes[0], static_cast<char>(0xed));
EXPECT_EQ(utf8.bytes[1], static_cast<char>(0x9f));
EXPECT_EQ(utf8.bytes[2], static_cast<char>(0xbf));
}
TEST(Utf8ForCodePointTest, RejectsTheSmallestSurrogate) {
Utf8ForCodePoint utf8(uint64_t{0xd800});
EXPECT_EQ(utf8.length, 0);
}
TEST(Utf8ForCodePointTest, RejectsTheLargestSurrogate) {
Utf8ForCodePoint utf8(uint64_t{0xdfff});
EXPECT_EQ(utf8.length, 0);
}
TEST(Utf8ForCodePointTest, RecognizesOnePastTheLargestSurrogate) {
Utf8ForCodePoint utf8(uint64_t{0xe000});
ASSERT_EQ(utf8.length, 3);
EXPECT_EQ(utf8.bytes[0], static_cast<char>(0xee));
EXPECT_EQ(utf8.bytes[1], static_cast<char>(0x80));
EXPECT_EQ(utf8.bytes[2], static_cast<char>(0x80));
}
TEST(Utf8ForCodePointTest, RecognizesTheLargestThreeByteCodePoint) {
Utf8ForCodePoint utf8(uint64_t{0xffff});
ASSERT_EQ(utf8.length, 3);
EXPECT_EQ(utf8.bytes[0], static_cast<char>(0xef));
EXPECT_EQ(utf8.bytes[1], static_cast<char>(0xbf));
EXPECT_EQ(utf8.bytes[2], static_cast<char>(0xbf));
}
TEST(Utf8ForCodePointTest, RecognizesTheSmallestFourByteCodePoint) {
Utf8ForCodePoint utf8(uint64_t{0x10000});
ASSERT_EQ(utf8.length, 4);
EXPECT_EQ(utf8.bytes[0], static_cast<char>(0xf0));
EXPECT_EQ(utf8.bytes[1], static_cast<char>(0x90));
EXPECT_EQ(utf8.bytes[2], static_cast<char>(0x80));
EXPECT_EQ(utf8.bytes[3], static_cast<char>(0x80));
}
TEST(Utf8ForCodePointTest, RecognizesTheJackOfHearts) {
Utf8ForCodePoint utf8(uint64_t{0x1f0bb});
ASSERT_EQ(utf8.length, 4);
const char* want = "🂻";
EXPECT_EQ(utf8.bytes[0], want[0]);
EXPECT_EQ(utf8.bytes[1], want[1]);
EXPECT_EQ(utf8.bytes[2], want[2]);
EXPECT_EQ(utf8.bytes[3], want[3]);
}
TEST(Utf8ForCodePointTest, RecognizesTheLargestFourByteCodePoint) {
Utf8ForCodePoint utf8(uint64_t{0x10ffff});
ASSERT_EQ(utf8.length, 4);
EXPECT_EQ(utf8.bytes[0], static_cast<char>(0xf4));
EXPECT_EQ(utf8.bytes[1], static_cast<char>(0x8f));
EXPECT_EQ(utf8.bytes[2], static_cast<char>(0xbf));
EXPECT_EQ(utf8.bytes[3], static_cast<char>(0xbf));
}
TEST(Utf8ForCodePointTest, RejectsTheSmallestOverlargeCodePoint) {
Utf8ForCodePoint utf8(uint64_t{0x110000});
EXPECT_EQ(utf8.length, 0);
}
TEST(Utf8ForCodePointTest, RejectsAThroughlyOverlargeCodePoint) {
Utf8ForCodePoint utf8(uint64_t{0xffffffff00000000});
EXPECT_EQ(utf8.length, 0);
}
TEST(Utf8ForCodePointTest, OkReturnsTrueForAValidCodePoint) {
EXPECT_TRUE(Utf8ForCodePoint(uint64_t{0}).ok());
}
TEST(Utf8ForCodePointTest, OkReturnsFalseForAnInvalidCodePoint) {
EXPECT_FALSE(Utf8ForCodePoint(uint64_t{0xffffffff00000000}).ok());
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/debugging/internal/utf8_for_code_point.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/debugging/internal/utf8_for_code_point_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
8c29c712-c7a8-476a-9c88-27afb11d1cb1 | cpp | google/quiche | http2_hpack_constants | quiche/http2/hpack/http2_hpack_constants.cc | quiche/http2/hpack/http2_hpack_constants_test.cc | #include "quiche/http2/hpack/http2_hpack_constants.h"
#include <ostream>
#include <string>
#include "absl/strings/str_cat.h"
namespace http2 {
std::string HpackEntryTypeToString(HpackEntryType v) {
switch (v) {
case HpackEntryType::kIndexedHeader:
return "kIndexedHeader";
case HpackEntryType::kDynamicTableSizeUpdate:
return "kDynamicTableSizeUpdate";
case HpackEntryType::kIndexedLiteralHeader:
return "kIndexedLiteralHeader";
case HpackEntryType::kUnindexedLiteralHeader:
return "kUnindexedLiteralHeader";
case HpackEntryType::kNeverIndexedLiteralHeader:
return "kNeverIndexedLiteralHeader";
}
return absl::StrCat("UnknownHpackEntryType(", static_cast<int>(v), ")");
}
std::ostream& operator<<(std::ostream& out, HpackEntryType v) {
return out << HpackEntryTypeToString(v);
}
} | #include "quiche/http2/hpack/http2_hpack_constants.h"
#include <sstream>
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
namespace {
TEST(HpackEntryTypeTest, HpackEntryTypeToString) {
EXPECT_EQ("kIndexedHeader",
HpackEntryTypeToString(HpackEntryType::kIndexedHeader));
EXPECT_EQ("kDynamicTableSizeUpdate",
HpackEntryTypeToString(HpackEntryType::kDynamicTableSizeUpdate));
EXPECT_EQ("kIndexedLiteralHeader",
HpackEntryTypeToString(HpackEntryType::kIndexedLiteralHeader));
EXPECT_EQ("kUnindexedLiteralHeader",
HpackEntryTypeToString(HpackEntryType::kUnindexedLiteralHeader));
EXPECT_EQ("kNeverIndexedLiteralHeader",
HpackEntryTypeToString(HpackEntryType::kNeverIndexedLiteralHeader));
EXPECT_EQ("UnknownHpackEntryType(12321)",
HpackEntryTypeToString(static_cast<HpackEntryType>(12321)));
}
TEST(HpackEntryTypeTest, OutputHpackEntryType) {
{
std::stringstream log;
log << HpackEntryType::kIndexedHeader;
EXPECT_EQ("kIndexedHeader", log.str());
}
{
std::stringstream log;
log << HpackEntryType::kDynamicTableSizeUpdate;
EXPECT_EQ("kDynamicTableSizeUpdate", log.str());
}
{
std::stringstream log;
log << HpackEntryType::kIndexedLiteralHeader;
EXPECT_EQ("kIndexedLiteralHeader", log.str());
}
{
std::stringstream log;
log << HpackEntryType::kUnindexedLiteralHeader;
EXPECT_EQ("kUnindexedLiteralHeader", log.str());
}
{
std::stringstream log;
log << HpackEntryType::kNeverIndexedLiteralHeader;
EXPECT_EQ("kNeverIndexedLiteralHeader", log.str());
}
{
std::stringstream log;
log << static_cast<HpackEntryType>(1234321);
EXPECT_EQ("UnknownHpackEntryType(1234321)", log.str());
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/http2_hpack_constants.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/http2_hpack_constants_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
f625f0ba-808b-401d-8be6-8a292e612afc | cpp | tensorflow/tensorflow | algebraic_simplifier | third_party/xla/xla/service/gpu/transforms/algebraic_simplifier.cc | third_party/xla/xla/service/gpu/transforms/algebraic_simplifier_test.cc | #include "xla/service/gpu/transforms/algebraic_simplifier.h"
#include "absl/log/check.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/triton/triton_support_legacy.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/xla_data.pb.h"
namespace xla::gpu {
bool GpuAlgebraicSimplifierVisitor::ShouldStrengthReduceDotToReduce(
const HloInstruction* hlo) {
if (!options_.enable_dot_strength_reduction()) {
return false;
}
const HloDotInstruction* dot = DynCast<HloDotInstruction>(hlo);
if (dot == nullptr) {
return false;
}
const HloInstruction* lhs = dot->operand(0);
const HloInstruction* rhs = dot->operand(1);
DotDimensionNumbers dnums = dot->dot_dimension_numbers();
bool lhs_is_vector = (dnums.lhs_batch_dimensions_size() +
dnums.lhs_contracting_dimensions_size() ==
lhs->shape().rank());
bool rhs_is_vector = (dnums.rhs_batch_dimensions_size() +
dnums.rhs_contracting_dimensions_size() ==
rhs->shape().rank());
if (lhs_is_vector && rhs_is_vector) {
return true;
}
absl::StatusOr<bool> is_too_small =
IsMatrixMultiplicationTooSmallForRewriting(*hlo, 10000000);
CHECK_OK(is_too_small.status());
if (is_too_small.value()) {
return true;
}
return !legacy_triton::CanTritonHandleGEMM(*dot, compute_capability_);
}
} | #include "xla/service/gpu/transforms/algebraic_simplifier.h"
#include <string>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
class GpuAlgebraicSimplifierTest : public HloTestBase {};
TEST_F(GpuAlgebraicSimplifierTest, VectorVectorDotShouldBeStrengthReduced) {
const std::string& hlo_string = R"(
HloModule m
ENTRY entry {
p0 = f32[32, 500] parameter(0)
p1 = f32[32, 500] parameter(1)
ROOT dot = f32[32] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
AlgebraicSimplifierOptions options;
options.set_enable_dot_strength_reduction(true);
se::CudaComputeCapability ampere(8, 0);
GpuAlgebraicSimplifier simplifier(options, ampere);
GpuAlgebraicSimplifierVisitor visitor(options, ampere, &simplifier);
EXPECT_TRUE(visitor.ShouldStrengthReduceDotToReduce(dot));
}
TEST_F(GpuAlgebraicSimplifierTest, MatrixVectorDotShouldNotBeStrengthReduced) {
const std::string& hlo_string = R"(
HloModule m
ENTRY entry {
p0 = f32[32, 5000, 7000] parameter(0)
p1 = f32[32, 5000] parameter(1)
ROOT dot = f32[32,7000] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1},
algorithm=dot_bf16_bf16_f32_x6
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
AlgebraicSimplifierOptions options;
options.set_enable_dot_strength_reduction(true);
se::CudaComputeCapability ampere(8, 0);
GpuAlgebraicSimplifier simplifier(options, ampere);
GpuAlgebraicSimplifierVisitor visitor(options, ampere, &simplifier);
EXPECT_FALSE(visitor.ShouldStrengthReduceDotToReduce(dot));
}
TEST_F(GpuAlgebraicSimplifierTest,
DotWithTypeUnsupportedByGemmFusionShouldBeStrengthReduced) {
const std::string& hlo_string = R"(
HloModule m
ENTRY entry {
p0 = c64[32, 5000, 7000] parameter(0)
p1 = c64[32, 5000] parameter(1)
ROOT dot = c64[32,7000] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
AlgebraicSimplifierOptions options;
options.set_enable_dot_strength_reduction(true);
se::CudaComputeCapability ampere(8, 0);
GpuAlgebraicSimplifier simplifier(options, ampere);
GpuAlgebraicSimplifierVisitor visitor(options, ampere, &simplifier);
EXPECT_TRUE(visitor.ShouldStrengthReduceDotToReduce(dot));
}
TEST_F(GpuAlgebraicSimplifierTest, SmallDotShouldBeStrengthReduced) {
const std::string& hlo_string = R"(
HloModule m
ENTRY entry {
p0 = f32[32, 50, 70] parameter(0)
p1 = f32[32, 50] parameter(1)
ROOT dot = f32[32,70] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1},
algorithm=dot_bf16_bf16_f32_x6
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
AlgebraicSimplifierOptions options;
options.set_enable_dot_strength_reduction(true);
se::CudaComputeCapability ampere(8, 0);
GpuAlgebraicSimplifier simplifier(options, ampere);
GpuAlgebraicSimplifierVisitor visitor(options, ampere, &simplifier);
EXPECT_TRUE(visitor.ShouldStrengthReduceDotToReduce(dot));
}
TEST_F(GpuAlgebraicSimplifierTest, SmallDotShouldBeStrengthReduced2) {
const std::string& hlo_string = R"(
HloModule m
ENTRY entry {
p0 = f32[2000, 3000] parameter(0)
p1 = f32[2000] parameter(1)
ROOT dot = f32[3000] dot(p0, p1), lhs_contracting_dims={0},
rhs_contracting_dims={0}, algorithm=dot_bf16_bf16_f32_x6
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
AlgebraicSimplifierOptions options;
options.set_enable_dot_strength_reduction(true);
se::CudaComputeCapability ampere(8, 0);
GpuAlgebraicSimplifier simplifier(options, ampere);
GpuAlgebraicSimplifierVisitor visitor(options, ampere, &simplifier);
EXPECT_TRUE(visitor.ShouldStrengthReduceDotToReduce(dot));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/algebraic_simplifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/algebraic_simplifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ab83b9d8-6c24-44c9-a4c8-cd77b911d2de | cpp | tensorflow/tensorflow | cupti_buffer_events | third_party/xla/xla/backends/profiler/gpu/cupti_buffer_events.cc | third_party/xla/xla/backends/profiler/gpu/cupti_buffer_events_test.cc | #include "xla/backends/profiler/gpu/cupti_buffer_events.h"
#include "absl/strings/str_cat.h"
#include "third_party/gpus/cuda/include/cuda.h"
#include "xla/backends/profiler/gpu/cupti_interface.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/mem.h"
namespace xla {
namespace profiler {
namespace {
using absl::StatusCode;
template <typename CuptiActivity>
struct CuptiActivityHasGraphId {
static constexpr bool value = false;
};
#if CUDA_VERSION >= 12000
#define TF_CUPTI_HAS_CHANNEL_ID 1
using CuptiActivityKernelTy = CUpti_ActivityKernel9;
using CuptiActivityMemcpyTy = CUpti_ActivityMemcpy5;
using CuptiActivityMemcpyP2PTy = CUpti_ActivityMemcpyPtoP4;
using CuptiActivityMemsetTy = CUpti_ActivityMemset4;
template <>
struct CuptiActivityHasGraphId<CuptiActivityKernelTy> {
static constexpr bool value = true;
};
template <>
struct CuptiActivityHasGraphId<CuptiActivityMemcpyTy> {
static constexpr bool value = true;
};
template <>
struct CuptiActivityHasGraphId<CuptiActivityMemcpyP2PTy> {
static constexpr bool value = true;
};
template <>
struct CuptiActivityHasGraphId<CuptiActivityMemsetTy> {
static constexpr bool value = true;
};
#elif CUDA_VERSION >= 11060
#define TF_CUPTI_HAS_CHANNEL_ID 1
using CuptiActivityKernelTy = CUpti_ActivityKernel7;
using CuptiActivityMemcpyTy = CUpti_ActivityMemcpy5;
using CuptiActivityMemcpyP2PTy = CUpti_ActivityMemcpyPtoP4;
using CuptiActivityMemsetTy = CUpti_ActivityMemset4;
template <>
struct CuptiActivityHasGraphId<CuptiActivityKernelTy> {
static constexpr bool value = true;
};
template <>
struct CuptiActivityHasGraphId<CuptiActivityMemcpyTy> {
static constexpr bool value = true;
};
template <>
struct CuptiActivityHasGraphId<CuptiActivityMemcpyP2PTy> {
static constexpr bool value = true;
};
template <>
struct CuptiActivityHasGraphId<CuptiActivityMemsetTy> {
static constexpr bool value = true;
};
#else
using CuptiActivityKernelTy = CUpti_ActivityKernel4;
using CuptiActivityMemcpyTy = CUpti_ActivityMemcpy;
using CuptiActivityMemcpyP2PTy = CUpti_ActivityMemcpy2;
using CuptiActivityMemsetTy = CUpti_ActivityMemset;
#endif
#if CUDA_VERSION >= 11070
using CuptiActivityGraphTraceTy = CUpti_ActivityGraphTrace;
#endif
const char *getActivityOverheadKindString(CUpti_ActivityOverheadKind kind) {
switch (kind) {
case CUPTI_ACTIVITY_OVERHEAD_DRIVER_COMPILER:
return "COMPILER";
case CUPTI_ACTIVITY_OVERHEAD_CUPTI_BUFFER_FLUSH:
return "BUFFER_FLUSH";
case CUPTI_ACTIVITY_OVERHEAD_CUPTI_INSTRUMENTATION:
return "INSTRUMENTATION";
case CUPTI_ACTIVITY_OVERHEAD_CUPTI_RESOURCE:
return "RESOURCE";
default:
break;
}
return "<UNKNOWN>";
}
const char *getActivityUnifiedMemoryKindString(
CUpti_ActivityUnifiedMemoryCounterKind kind) {
switch (kind) {
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD:
return "UM_BYTES_TRANSFER_HTOD";
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH:
return "UM_BYTES_TRANSFER_DTOH";
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_CPU_PAGE_FAULT_COUNT:
return "UM_CPU_PAGE_FAULT";
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_GPU_PAGE_FAULT:
return "UM_GPU_PAGE_FAULT";
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THRASHING:
return "UM_THRASHING";
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THROTTLING:
return "UM_THROTTLING";
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_REMOTE_MAP:
return "UM_REMOTE_MAP";
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOD:
return "UM_BYTES_TRANSFER_DTOD";
default:
break;
}
return "<UNKNOWN>";
}
template <typename CuptiActivity>
void SetEventGraphId(CuptiTracerEvent &event,
const CuptiActivity *cupti_activity) {
if constexpr (CuptiActivityHasGraphId<CuptiActivity>::value) {
event.graph_id = cupti_activity->graphId;
}
}
template <bool cupti_has_channel_id, typename CuptiActivityKernel>
void AddKernelActivityEvent(CuptiEventCollectorDelegate &collector,
const CuptiActivityKernel *kernel) {
CuptiTracerEvent event{};
event.type = CuptiTracerEventType::Kernel;
event.source = CuptiTracerEventSource::Activity;
event.name = kernel->name;
event.start_time_ns = kernel->start;
event.end_time_ns = kernel->end;
event.device_id = kernel->deviceId;
event.context_id = kernel->contextId;
event.stream_id = kernel->streamId;
event.correlation_id = kernel->correlationId;
AnnotationMap::AnnotationInfo info =
collector.annotation_map.LookUp(event.device_id, event.correlation_id);
event.annotation = info.annotation;
event.nvtx_range = info.nvtx_range;
SetEventGraphId(event, kernel);
event.kernel_info.registers_per_thread = kernel->registersPerThread;
event.kernel_info.static_shared_memory_usage = kernel->staticSharedMemory;
event.kernel_info.dynamic_shared_memory_usage = kernel->dynamicSharedMemory;
event.kernel_info.block_x = kernel->blockX;
event.kernel_info.block_y = kernel->blockY;
event.kernel_info.block_z = kernel->blockZ;
event.kernel_info.grid_x = kernel->gridX;
event.kernel_info.grid_y = kernel->gridY;
event.kernel_info.grid_z = kernel->gridZ;
if constexpr (cupti_has_channel_id) {
event.kernel_info.channel_id = kernel->channelID;
event.kernel_info.channel_type = kernel->channelType;
}
collector.receive(std::move(event));
}
void AddGraphTraceActivityEvent(CuptiEventCollectorDelegate &collector,
CuptiActivityGraphTraceTy *graph_trace) {
AnnotationMap::AnnotationInfo info = collector.annotation_map.LookUp(
graph_trace->deviceId, graph_trace->correlationId);
collector.receive(CuptiTracerEvent{
CuptiTracerEventType::CudaGraph,
CuptiTracerEventSource::Activity,
absl::StrCat("CudaGraphExec:", graph_trace->graphId),
info.annotation,
info.nvtx_range,
graph_trace->start,
graph_trace->end,
graph_trace->deviceId,
graph_trace->correlationId,
CuptiTracerEvent::kInvalidThreadId,
graph_trace->contextId,
graph_trace->streamId,
graph_trace->graphId,
});
}
void AddMemcpyActivityEvent(CuptiEventCollectorDelegate &collector,
const CuptiActivityMemcpyTy *memcpy) {
CuptiTracerEvent event{};
switch (memcpy->copyKind) {
case CUPTI_ACTIVITY_MEMCPY_KIND_HTOD:
event.type = CuptiTracerEventType::MemcpyH2D;
event.name = "MemcpyH2D";
break;
case CUPTI_ACTIVITY_MEMCPY_KIND_DTOH:
event.type = CuptiTracerEventType::MemcpyD2H;
event.name = "MemcpyD2H";
break;
case CUPTI_ACTIVITY_MEMCPY_KIND_DTOD:
event.type = CuptiTracerEventType::MemcpyD2D;
event.name = "MemcpyD2D";
break;
case CUPTI_ACTIVITY_MEMCPY_KIND_PTOP:
event.type = CuptiTracerEventType::MemcpyP2P;
event.name = "MemcpyP2P";
break;
default:
event.type = CuptiTracerEventType::MemcpyOther;
event.name = "MemcpyOther";
break;
}
event.source = CuptiTracerEventSource::Activity;
event.start_time_ns = memcpy->start;
event.end_time_ns = memcpy->end;
event.device_id = memcpy->deviceId;
event.context_id = memcpy->contextId;
event.stream_id = memcpy->streamId;
event.correlation_id = memcpy->correlationId;
AnnotationMap::AnnotationInfo info =
collector.annotation_map.LookUp(event.device_id, event.correlation_id);
event.annotation = info.annotation;
SetEventGraphId(event, memcpy);
event.memcpy_info.copy_kind = memcpy->copyKind;
event.memcpy_info.num_bytes = memcpy->bytes;
event.memcpy_info.destination = memcpy->deviceId;
event.memcpy_info.async = memcpy->flags & CUPTI_ACTIVITY_FLAG_MEMCPY_ASYNC;
event.memcpy_info.src_mem_kind = memcpy->srcKind;
event.memcpy_info.dst_mem_kind = memcpy->dstKind;
#if TF_CUPTI_HAS_CHANNEL_ID
event.memcpy_info.channel_id = memcpy->channelID;
event.memcpy_info.channel_type = memcpy->channelType;
#endif
collector.receive(std::move(event));
}
void AddMemcpyP2PActivityEvent(CuptiEventCollectorDelegate &collector,
const CuptiActivityMemcpyP2PTy *memcpy) {
CuptiTracerEvent event{};
event.type = CuptiTracerEventType::MemcpyP2P;
event.name = "MemcpyP2P";
event.source = CuptiTracerEventSource::Activity;
event.start_time_ns = memcpy->start;
event.end_time_ns = memcpy->end;
event.device_id = memcpy->srcDeviceId;
event.context_id = memcpy->contextId;
event.stream_id = memcpy->streamId;
event.correlation_id = memcpy->correlationId;
AnnotationMap::AnnotationInfo info =
collector.annotation_map.LookUp(event.device_id, event.correlation_id);
event.annotation = info.annotation;
SetEventGraphId(event, memcpy);
event.memcpy_info.copy_kind = CUPTI_ACTIVITY_MEMCPY_KIND_PTOP;
event.memcpy_info.num_bytes = memcpy->bytes;
event.memcpy_info.destination = memcpy->dstDeviceId;
event.memcpy_info.async = memcpy->flags & CUPTI_ACTIVITY_FLAG_MEMCPY_ASYNC;
event.memcpy_info.src_mem_kind = memcpy->srcKind;
event.memcpy_info.dst_mem_kind = memcpy->dstKind;
#if TF_CUPTI_HAS_CHANNEL_ID
event.memcpy_info.channel_id = memcpy->channelID;
event.memcpy_info.channel_type = memcpy->channelType;
#endif
collector.receive(std::move(event));
}
void AddCuptiOverheadActivityEvent(CuptiEventCollectorDelegate &collector,
const CUpti_ActivityOverhead *overhead) {
CuptiTracerEvent event{};
event.type = CuptiTracerEventType::Overhead;
event.name = getActivityOverheadKindString(overhead->overheadKind);
event.source = CuptiTracerEventSource::Activity;
event.start_time_ns = overhead->start;
event.end_time_ns = overhead->end;
event.device_id = 0;
switch (overhead->objectKind) {
case CUPTI_ACTIVITY_OBJECT_UNKNOWN:
return;
case CUPTI_ACTIVITY_OBJECT_THREAD:
case CUPTI_ACTIVITY_OBJECT_PROCESS:
event.thread_id = overhead->objectId.pt.threadId;
break;
case CUPTI_ACTIVITY_OBJECT_STREAM:
event.stream_id = overhead->objectId.dcs.streamId;
TF_FALLTHROUGH_INTENDED;
case CUPTI_ACTIVITY_OBJECT_DEVICE:
case CUPTI_ACTIVITY_OBJECT_CONTEXT:
event.device_id = overhead->objectId.dcs.deviceId;
break;
default:
LOG(ERROR) << "Unexpected object kind: " << overhead->objectKind;
return;
}
collector.receive(std::move(event));
}
void AddUnifiedMemoryActivityEvent(
CuptiEventCollectorDelegate &collector,
const CUpti_ActivityUnifiedMemoryCounter2 *record) {
VLOG(3) << "Cuda Unified Memory Activity, kind: " << record->counterKind
<< " src: " << record->srcId << " dst: " << record->dstId;
CuptiTracerEvent event{};
event.type = CuptiTracerEventType::UnifiedMemory;
event.name = getActivityUnifiedMemoryKindString(record->counterKind);
event.source = CuptiTracerEventSource::Activity;
event.start_time_ns = record->start;
if (record->counterKind ==
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_CPU_PAGE_FAULT_COUNT ||
record->counterKind ==
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THRASHING ||
record->counterKind ==
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_REMOTE_MAP ||
record->end <= record->start) {
event.end_time_ns = record->start + 1;
} else {
event.end_time_ns = record->end;
}
event.device_id = record->srcId;
constexpr int kPseudoStreamId = 0x10000000;
event.stream_id = kPseudoStreamId + record->counterKind;
event.memcpy_info.copy_kind = CUPTI_ACTIVITY_MEMCPY_KIND_UNKNOWN;
if (record->counterKind ==
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD ||
record->counterKind ==
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH ||
record->counterKind ==
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOD) {
event.memcpy_info.num_bytes = record->value;
} else {
event.memcpy_info.num_bytes = 0;
}
event.memcpy_info.destination = record->dstId;
event.memcpy_info.async = false;
collector.receive(std::move(event));
}
void AddMemoryActivityEvent(CuptiEventCollectorDelegate &collector,
const CUpti_ActivityMemory *memory) {
CuptiTracerEvent event{};
event.name = absl::StrCat("Memory ", GetMemoryKindName(memory->memoryKind));
event.type = CuptiTracerEventType::MemoryResidency;
event.source = CuptiTracerEventSource::Activity;
event.start_time_ns = memory->start;
event.end_time_ns = std::max(memory->end, memory->start + 1);
event.device_id = memory->deviceId;
event.context_id = memory->contextId;
event.stream_id = 0;
event.memory_residency_info.num_bytes = memory->bytes;
event.memory_residency_info.mem_kind = memory->memoryKind;
event.memory_residency_info.address = memory->address;
VLOG(5) << "Cuda activity " << event.name
<< " addr: " << reinterpret_cast<void *>(memory->address)
<< " bytes: " << memory->bytes;
collector.receive(std::move(event));
}
void AddMemsetActivityEvent(CuptiEventCollectorDelegate &collector,
const CuptiActivityMemsetTy *memset) {
auto mem_kind = memset->memoryKind;
CuptiTracerEvent event{};
event.type = CuptiTracerEventType::Memset;
event.source = CuptiTracerEventSource::Activity;
event.name = absl::StrCat("Memset ", mem_kind);
event.start_time_ns = memset->start;
event.end_time_ns = std::max(memset->end, memset->start + 1);
event.device_id = memset->deviceId;
event.correlation_id = memset->correlationId;
event.context_id = memset->contextId;
event.stream_id = memset->streamId;
SetEventGraphId(event, memset);
event.memset_info.num_bytes = memset->bytes;
event.memset_info.mem_kind = mem_kind;
event.memset_info.async = (memset->flags & CUPTI_ACTIVITY_FLAG_MEMSET_ASYNC);
#if TF_CUPTI_HAS_CHANNEL_ID
event.memset_info.channel_id = memset->channelID;
event.memset_info.channel_type = memset->channelType;
#endif
VLOG(5) << "Cuda activity " << event.name << " bytes: " << memset->bytes
<< " async: " << event.memset_info.async;
collector.receive(std::move(event));
}
void AddSynchronizationActivityEvent(
CuptiEventCollectorDelegate &collector,
const CUpti_ActivitySynchronization *sync) {
CuptiTracerEvent event{};
event.type = CuptiTracerEventType::Generic;
event.source = CuptiTracerEventSource::Activity;
switch (sync->type) {
case CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_EVENT_SYNCHRONIZE:
event.name = "cuEventSynchronize";
break;
case CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_STREAM_WAIT_EVENT:
event.name = "cuStreamWaitEvent";
break;
case CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_STREAM_SYNCHRONIZE:
event.name = "cuStreamSynchronize";
break;
case CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_CONTEXT_SYNCHRONIZE:
event.name = "cuCtxSynchronize";
break;
default:
event.name = "unknown synchronization event";
break;
}
event.start_time_ns = sync->start;
event.end_time_ns = std::max(sync->end, sync->start + 1);
event.correlation_id = sync->correlationId;
event.context_id = sync->contextId;
VLOG(5) << "Cuda activity " << event.name;
collector.receive(std::move(event));
}
static absl::Status ConvertActivityBuffer(
CuptiEventCollectorDelegate &collector, uint8_t *buffer, const size_t size,
const size_t max_activity_event_count, size_t &total_activity_event_count,
size_t &dropped_activity_event_count) {
CuptiInterface *cupti_interface = GetCuptiInterface();
CUpti_Activity *record = nullptr;
while (true) {
CUptiResult status =
cupti_interface->ActivityGetNextRecord(buffer, size, &record);
if (status == CUPTI_SUCCESS) {
if (total_activity_event_count >= max_activity_event_count) {
dropped_activity_event_count++;
continue;
}
total_activity_event_count++;
switch (record->kind) {
case CUPTI_ACTIVITY_KIND_KERNEL:
case CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL:
AddKernelActivityEvent<TF_CUPTI_HAS_CHANNEL_ID>(
collector, reinterpret_cast<CuptiActivityKernelTy *>(record));
break;
case CUPTI_ACTIVITY_KIND_CDP_KERNEL:
AddKernelActivityEvent<false>(
collector, reinterpret_cast<CUpti_ActivityCdpKernel *>(record));
break;
case CUPTI_ACTIVITY_KIND_MEMCPY:
AddMemcpyActivityEvent(
collector, reinterpret_cast<CuptiActivityMemcpyTy *>(record));
break;
case CUPTI_ACTIVITY_KIND_MEMCPY2:
AddMemcpyP2PActivityEvent(
collector, reinterpret_cast<CuptiActivityMemcpyP2PTy *>(record));
break;
case CUPTI_ACTIVITY_KIND_OVERHEAD:
AddCuptiOverheadActivityEvent(
collector, reinterpret_cast<CUpti_ActivityOverhead *>(record));
break;
case CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER:
AddUnifiedMemoryActivityEvent(
collector,
reinterpret_cast<CUpti_ActivityUnifiedMemoryCounter2 *>(record));
break;
case CUPTI_ACTIVITY_KIND_MEMORY: {
AddMemoryActivityEvent(
collector, reinterpret_cast<CUpti_ActivityMemory *>(record));
} break;
case CUPTI_ACTIVITY_KIND_MEMSET:
AddMemsetActivityEvent(
collector, reinterpret_cast<CuptiActivityMemsetTy *>(record));
break;
case CUPTI_ACTIVITY_KIND_SYNCHRONIZATION:
AddSynchronizationActivityEvent(
collector,
reinterpret_cast<CUpti_ActivitySynchronization *>(record));
break;
#if CUDA_VERSION >= 11070
case CUPTI_ACTIVITY_KIND_GRAPH_TRACE:
AddGraphTraceActivityEvent(
collector, reinterpret_cast<CuptiActivityGraphTraceTy *>(record));
break;
#endif
default:
VLOG(3) << "Activity type " << record->kind << " is not supported.";
break;
}
} else if (status == CUPTI_ERROR_MAX_LIMIT_REACHED) {
break;
} else if (status == CUPTI_ERROR_INVALID_KIND) {
VLOG(3) << "CUPTI parse ACTIVITY buffer got CUPTI_ERROR_INVALID_KIND";
break;
} else {
LOG(WARNING) << "CUPTI parse ACTIVITY buffer error: " << status;
return absl::Status(StatusCode::kInternal,
"Parse cupti activity buffer error.");
}
}
VLOG(3) << "CUPTI tracer post-process one ACTIVITY buffer of size: " << size
<< ", total events count:" << total_activity_event_count;
return absl::OkStatus();
}
}
absl::string_view StringDeduper::Dedup(absl::string_view str,
size_t max_unique_count) {
if (str.empty()) return absl::string_view();
auto it = strings_.find(str);
if (it != strings_.end()) return *it;
if (max_unique_count == 0 || strings_.size() < max_unique_count)
return *strings_.emplace(str).first;
return absl::string_view();
}
void AnnotationMap::Add(uint32_t device_id, uint32_t correlation_id,
const absl::string_view annotation,
const absl::string_view nvtx_range) {
if (annotation.empty() && nvtx_range.empty()) return;
VLOG(3) << "Add annotation: device_id: " << device_id
<< " correlation_id: " << correlation_id
<< " annotation: " << annotation;
if (device_id >= per_device_map_.size()) return;
auto &per_device_map = per_device_map_[device_id];
if (per_device_map.annotation_deduper.Size() < max_size_) {
AnnotationInfo info;
info.annotation = per_device_map.annotation_deduper.Dedup(annotation);
info.nvtx_range = per_device_map.nvtx_range_deduper.Dedup(nvtx_range);
per_device_map.correlation_map.emplace(correlation_id, info);
}
}
AnnotationMap::AnnotationInfo AnnotationMap::LookUp(
uint32_t device_id, uint32_t correlation_id) const {
if (device_id >= per_device_map_.size()) return AnnotationInfo();
auto &per_device_map = per_device_map_[device_id];
auto it = per_device_map.correlation_map.find(correlation_id);
return it != per_device_map.correlation_map.end() ? it->second
: AnnotationInfo();
}
CuptiActivityBufferManager::ActivityBufferAndSize::ActivityBufferAndSize(
uint8_t *p, size_t sz)
: buffer(p,
[](uint8_t *p) {
if (p != nullptr) tsl::port::AlignedFree(p);
}),
size(sz) {}
void AddActivityBufferListEventsTo(
CuptiEventCollectorDelegate &collector,
std::list<CuptiActivityBufferManager::ActivityBufferAndSize> &buffer_list,
size_t max_activity_event_count, size_t &dropped_activity_event_count) {
dropped_activity_event_count = 0;
size_t total_activity_event_count = 0;
while (!buffer_list.empty()) {
CuptiActivityBufferManager::ActivityBufferAndSize buffer_and_size(
std::move(buffer_list.front()));
buffer_list.pop_front();
ConvertActivityBuffer(collector, buffer_and_size.buffer.get(),
buffer_and_size.size, max_activity_event_count,
total_activity_event_count,
dropped_activity_event_count)
.IgnoreError();
}
}
CallbackAnnotationsAndEvents::CallbackAnnotationsAndEvents(
CallbackAnnotationsAndEvents &&another) {
*this = std::move(another);
}
CallbackAnnotationsAndEvents &CallbackAnnotationsAndEvents::operator=(
CallbackAnnotationsAndEvents &&another) {
annotations_ = std::move(another.annotations_);
nvtx_ranges_ = std::move(another.nvtx_ranges_);
num_dropped_events_ = another.num_dropped_events_;
event_queue_ = std::move(another.event_queue_);
another.Clear();
return *this;
}
void CallbackAnnotationsAndEvents::Clear() {
annotations_.Clear();
nvtx_ranges_.Clear();
num_dropped_events_ = 0;
event_queue_.Clear();
}
}
} | #include "xla/backends/profiler/gpu/cupti_buffer_events.h"
#include "tsl/platform/test.h"
namespace xla {
namespace profiler {
namespace test {
namespace {
TEST(CuptiBufferEventsTest, EventInitialization) {
CuptiTracerEvent event{
CuptiTracerEventType::CudaGraph,
CuptiTracerEventSource::Activity,
"CudaGraphExec:2",
"annotation",
"nvtx_range",
100,
200,
6,
8,
12345,
9,
2,
5,
};
EXPECT_EQ(event.type, CuptiTracerEventType::CudaGraph);
EXPECT_EQ(event.source, CuptiTracerEventSource::Activity);
EXPECT_EQ(event.name, "CudaGraphExec:2");
EXPECT_EQ(event.annotation, "annotation");
EXPECT_EQ(event.nvtx_range, "nvtx_range");
EXPECT_EQ(event.start_time_ns, 100);
EXPECT_EQ(event.end_time_ns, 200);
EXPECT_EQ(event.device_id, 6);
EXPECT_EQ(event.correlation_id, 8);
EXPECT_EQ(event.thread_id, 12345);
EXPECT_EQ(event.context_id, 9);
EXPECT_EQ(event.stream_id, 2);
EXPECT_EQ(event.graph_id, 5);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/profiler/gpu/cupti_buffer_events.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/profiler/gpu/cupti_buffer_events_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cee74dcb-bf1f-4997-86e7-5f7c7ff793fa | cpp | tensorflow/tensorflow | sort_rewriter | third_party/xla/xla/service/gpu/transforms/sort_rewriter.cc | third_party/xla/xla/service/gpu/transforms/sort_rewriter_test.cc | #include "xla/service/gpu/transforms/sort_rewriter.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/runtime/cub_sort_thunk.h"
#include "xla/service/stable_sort_expander.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
struct SortComputationAnalysis {
int key_operand;
bool descending;
};
std::pair<int64_t, int64_t> ParametersFromCmpOperands(
const HloCompareInstruction* cmp_op) {
if (cmp_op == nullptr) {
return std::pair<int64_t, int64_t>(-1, -1);
}
const HloParameterInstruction* param0 =
DynCast<HloParameterInstruction>(cmp_op->operand(0));
const HloParameterInstruction* param1 =
DynCast<HloParameterInstruction>(cmp_op->operand(1));
return (param0 && param1) ? std::make_pair(param0->parameter_number(),
param1->parameter_number())
: std::pair<int64_t, int64_t>(-1, -1);
}
std::optional<SortComputationAnalysis> AnalyzeCompareOp(
const HloInstruction* maybe_compare_op) {
const HloCompareInstruction* compare =
DynCast<HloCompareInstruction>(maybe_compare_op);
if (compare == nullptr || compare->direction() == ComparisonDirection::kEq ||
compare->direction() == ComparisonDirection::kNe) {
return std::nullopt;
}
auto [index0, index1] = ParametersFromCmpOperands(compare);
if (index0 == -1 || index1 == -1) {
return std::nullopt;
}
int first_index = std::min(index0, index1);
if (first_index % 2 != 0 || std::max(index0, index1) != first_index + 1) {
return std::nullopt;
}
bool descending = compare->direction() == ComparisonDirection::kGt ||
compare->direction() == ComparisonDirection::kGe;
bool reverse = first_index != index0;
return SortComputationAnalysis{first_index / 2, descending != reverse};
}
std::optional<SortComputationAnalysis> AnalyzeComplexSortComputation(
const HloSortInstruction& sort_op) {
auto computation = sort_op.called_computations().front();
if (computation->num_parameters() != 4) {
return std::nullopt;
}
int64_t iota_operand_index =
StableSortExpander::IotaOperandIndexForStableSort(sort_op);
if (iota_operand_index < 0) {
return std::nullopt;
}
auto root = computation->root_instruction();
if (root->opcode() != HloOpcode::kSelect) {
return std::nullopt;
}
auto iota_cmp = DynCast<HloCompareInstruction>(root->operand(1));
auto [iotap0, iotap1] = ParametersFromCmpOperands(iota_cmp);
if (iota_cmp == nullptr ||
iota_cmp->direction() != ComparisonDirection::kLt ||
iotap0 != iota_operand_index * 2 ||
iotap1 != iota_operand_index * 2 + 1) {
return std::nullopt;
}
auto eq_cmp = DynCast<HloCompareInstruction>(root->operand(0));
if (eq_cmp == nullptr || eq_cmp->direction() != ComparisonDirection::kEq) {
return std::nullopt;
}
auto [p0, p1] = ParametersFromCmpOperands(eq_cmp);
if (p0 < 0 || p1 < 0) {
auto cmp = DynCast<HloCompareInstruction>(eq_cmp->operand(0));
auto cmp_reverse = DynCast<HloCompareInstruction>(eq_cmp->operand(1));
auto [a, b] = ParametersFromCmpOperands(cmp);
auto [p, q] = ParametersFromCmpOperands(cmp_reverse);
if (cmp == nullptr || cmp_reverse == nullptr || a < 0 || b < 0 || a != q ||
b != p || cmp->direction() != cmp_reverse->direction() ||
cmp->direction() == Comparison::Direction::kEq ||
cmp->direction() == Comparison::Direction::kNe) {
return std::nullopt;
}
}
return AnalyzeCompareOp(root->operand(2));
}
std::optional<SortComputationAnalysis> AnalyzeSortOp(
const HloSortInstruction& sort_op) {
auto computation = sort_op.called_computations().front();
auto result = AnalyzeCompareOp(computation->root_instruction());
if (!result.has_value()) {
result = AnalyzeComplexSortComputation(sort_op);
}
return result;
}
absl::StatusOr<std::unique_ptr<CubSortRunnerInterface>> CreateRunner(
HloSortInstruction* sort_op, const SortComputationAnalysis& sort_config) {
int value_index = 1 - sort_config.key_operand;
return CubSortRunnerInterface::Create(
sort_op->operand(sort_config.key_operand)->shape().element_type(),
sort_op->operand_count() == 2
? std::optional(sort_op->operand(value_index)->shape().element_type())
: std::nullopt);
}
bool IsCubCompatibleSort(HloSortInstruction* sort_op) {
VLOG(1) << "Sort instruction: " << sort_op->name();
if (sort_op->operand_count() != 1 && sort_op->operand_count() != 2) {
VLOG(2) << "Unsupported operand count: " << sort_op->operand_count();
return false;
}
const Shape& operand_shape = sort_op->operand(0)->shape();
if (sort_op->sort_dimension() != operand_shape.rank() - 1) {
VLOG(2) << "Sort dimension should be the minor one";
return false;
}
if (Product(operand_shape.dimensions()) < SortRewriter::SortSizeThreshold()) {
VLOG(2) << "Tensor shape size is too small to see an improvement";
return false;
}
auto sort_config = AnalyzeSortOp(*sort_op);
if (!sort_config.has_value()) {
VLOG(2) << "Only simple compare computations are supported";
return false;
}
if (!CreateRunner(sort_op, *sort_config).ok()) {
VLOG(2) << "Unsupported operand types (no compiled CUB kernels)";
return false;
}
VLOG(2) << "Sort operation is compatible";
return true;
}
HloInstruction* UnpackResultPair(HloSortInstruction* sort_op,
HloInstruction* custom_call, bool swap) {
HloComputation* parent = sort_op->parent();
HloInstruction* gte0 =
parent->AddInstruction(HloInstruction::CreateGetTupleElement(
sort_op->operand(0)->shape(), custom_call, swap ? 1 : 0));
HloInstruction* gte1 =
parent->AddInstruction(HloInstruction::CreateGetTupleElement(
sort_op->operand(1)->shape(), custom_call, swap ? 0 : 1));
return parent->AddInstruction(HloInstruction::CreateTuple({gte0, gte1}));
}
}
absl::StatusOr<bool> SortRewriter::RunOnInstruction(
HloSortInstruction* sort_op) {
SortComputationAnalysis sort_config = AnalyzeSortOp(*sort_op).value();
const Shape& operand_shape = sort_op->operand(0)->shape();
int64_t batch_size = Product(operand_shape.dimensions()) /
operand_shape.dimensions(sort_op->sort_dimension());
TF_ASSIGN_OR_RETURN(auto runner, CreateRunner(sort_op, sort_config));
TF_ASSIGN_OR_RETURN(
int64_t scratch_size,
runner->GetScratchSize(Product(operand_shape.dimensions()), batch_size));
if (batch_size > 1) {
scratch_size += sizeof(int) - scratch_size % sizeof(int);
scratch_size += (batch_size + 1) * sizeof(int);
}
HloInstruction* keys = sort_op->mutable_operand(0);
HloInstruction* values = nullptr;
if (sort_op->operand_count() == 2) {
values = sort_op->mutable_operand(1);
if (sort_config.key_operand == 1) {
std::swap(keys, values);
}
}
std::vector<Shape> shapes{keys->shape()};
std::vector<HloInstruction*> operands{keys};
if (values != nullptr) {
shapes.push_back(values->shape());
operands.push_back(values);
}
shapes.push_back(ShapeUtil::MakeShape(U8, {scratch_size}));
Shape call_shape = ShapeUtil::MakeTupleShape(absl::MakeSpan(shapes));
HloInstruction* custom_call =
sort_op->parent()->AddInstruction(HloInstruction::CreateCustomCall(
call_shape, absl::MakeSpan(operands), kCubDeviceRadixSortTarget));
xla::SortOptions backend_config;
backend_config.set_descending(sort_config.descending);
TF_RETURN_IF_ERROR(custom_call->set_backend_config(backend_config));
HloInstruction* replacement;
if (sort_op->operand_count() == 1) {
replacement =
sort_op->parent()->AddInstruction(HloInstruction::CreateGetTupleElement(
sort_op->shape(), custom_call, 0));
} else {
replacement = UnpackResultPair(sort_op, custom_call,
sort_config.key_operand == 1);
}
TF_RETURN_IF_ERROR(
sort_op->parent()->ReplaceInstruction(sort_op, replacement));
return true;
}
absl::StatusOr<bool> SortRewriter::RunOnComputation(
HloComputation* computation) {
std::vector<HloSortInstruction*> sort_ops;
for (auto* inst : computation->instructions()) {
HloSortInstruction* sort = DynCast<HloSortInstruction>(inst);
if (sort != nullptr && IsCubCompatibleSort(sort)) {
sort_ops.push_back(sort);
}
}
bool changed = false;
for (auto* sort : sort_ops) {
TF_ASSIGN_OR_RETURN(bool result, RunOnInstruction(sort));
changed |= result;
}
return changed;
}
absl::StatusOr<bool> SortRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(2, "SortRewriter::Run(), before:\n" + module->ToString());
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool result, RunOnComputation(computation));
changed |= result;
}
XLA_VLOG_LINES(2, "SortRewriter::Run(), after:\n" + module->ToString());
return changed;
}
}
} | #include "xla/service/gpu/transforms/sort_rewriter.h"
#include <utility>
#include <gtest/gtest.h>
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class SortRewriterTest : public HloTestBase {
public:
void SetUp() override {
HloTestBase::SetUp();
SortRewriter::SetSortSizeThresholdForTestingOnly(1000);
}
bool RunModuleAndPass(HloModule* module) {
auto cloned = module->Clone();
bool changed = SortRewriter().Run(module).value();
if (changed) {
EXPECT_TRUE(RunAndCompare(std::move(cloned), ErrorSpec{0, 0}));
}
return changed;
}
void ExpectDirection(const HloInstruction* instruction, bool descending) {
auto config = instruction->backend_config<xla::SortOptions>();
EXPECT_EQ(config->descending(), descending);
}
};
TEST_F(SortRewriterTest, SortKeysLessThan) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT
}
ENTRY %main {
%input = f32[1000] parameter(0)
ROOT %sort = f32[1000] sort(%input), dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_TRUE(RunModuleAndPass(module.get()));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCubDeviceRadixSortTarget}, m::Parameter()), 0)));
ExpectDirection(module->entry_computation()->root_instruction()->operand(0),
false);
}
TEST_F(SortRewriterTest, SortKeysGreaterThan) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %gt = pred[] compare(%lhs, %rhs), direction=GT
}
ENTRY %main {
%input = f32[1000] parameter(0)
ROOT %sort = f32[1000] sort(%input), dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_TRUE(RunModuleAndPass(module.get()));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCubDeviceRadixSortTarget}, m::Parameter()), 0)));
ExpectDirection(module->entry_computation()->root_instruction()->operand(0),
true);
}
TEST_F(SortRewriterTest, SortKeysGreaterThanSwapped) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = f32[] parameter(1)
%rhs = f32[] parameter(0)
ROOT %gt = pred[] compare(%lhs, %rhs), direction=GT
}
ENTRY %main {
%input = f32[1000] parameter(0)
ROOT %sort = f32[1000] sort(%input), dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_TRUE(RunModuleAndPass(module.get()));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCubDeviceRadixSortTarget}, m::Parameter()), 0)));
ExpectDirection(module->entry_computation()->root_instruction()->operand(0),
false);
}
TEST_F(SortRewriterTest, SortPairs) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs_key = u32[] parameter(0)
%rhs_key = u32[] parameter(1)
%lhs_value = f32[] parameter(2)
%rhs_value = f32[] parameter(3)
ROOT %lt = pred[] compare(%lhs_key, %rhs_key), direction=LT
}
ENTRY %main {
%input_keys = u32[1000] parameter(0)
%input_values = f32[1000] parameter(1)
ROOT %sort = (u32[1000], f32[1000]) sort(%input_keys, %input_values),
dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_TRUE(RunModuleAndPass(module.get()));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::GetTupleElement(m::CustomCall(), 0),
m::GetTupleElement(m::CustomCall(), 1))));
}
TEST_F(SortRewriterTest, SortPairsSwapped) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(1)
%lhs_key = u32[] parameter(2)
%rhs_key = u32[] parameter(3)
ROOT %lt = pred[] compare(%lhs_key, %rhs_key), direction=LT
}
ENTRY %main {
%input_values = f32[1000] parameter(0)
%input_keys = u32[1000] parameter(1)
ROOT %sort = (f32[1000], u32[1000]) sort(%input_values, %input_keys),
dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_TRUE(RunModuleAndPass(module.get()));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::GetTupleElement(m::CustomCall(), 1),
m::GetTupleElement(m::CustomCall(), 0))));
}
TEST_F(SortRewriterTest, NoRewriteManyTensors) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
%unused1 = f64[] parameter(2)
%unused2 = f64[] parameter(3)
%unused3 = u64[] parameter(4)
%unused4 = u64[] parameter(5)
ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT
}
ENTRY %main {
%input1 = f32[1000] parameter(0)
%input2 = f64[1000] parameter(1)
%input3 = u64[1000] parameter(2)
ROOT %sort = (f32[1000], f64[1000], u64[1000]) sort(%input1, %input2, %input3),
dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_FALSE(RunModuleAndPass(module.get()));
}
TEST_F(SortRewriterTest, NoRewriteNonMinorSortDimension) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT
}
ENTRY %main {
%input = f32[1000,4] parameter(0)
ROOT %sort = f32[1000,4] sort(%input), dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_FALSE(RunModuleAndPass(module.get()));
}
TEST_F(SortRewriterTest, NoRewriteUnsupportedType) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = pred[] parameter(0)
%rhs = pred[] parameter(1)
ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT
}
ENTRY %main {
%input = pred[1000] parameter(0)
ROOT %sort = pred[1000] sort(%input), dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_FALSE(RunModuleAndPass(module.get()));
}
TEST_F(SortRewriterTest, NoRewriteComplexComparer) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = f32[] parameter(0)
%lhs_scaled = f32[] multiply(%lhs, f32[] constant(2))
%rhs = f32[] parameter(1)
ROOT %lt = pred[] compare(%lhs_scaled, %rhs), direction=LT
}
ENTRY %main {
%input = f32[1000] parameter(0)
ROOT %sort = f32[1000] sort(%input), dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_FALSE(RunModuleAndPass(module.get()));
}
TEST_F(SortRewriterTest, NoRewriteMixedKeysValues) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs_key = u32[] parameter(0)
%rhs_key = u32[] parameter(1)
%lhs_value = u32[] parameter(2)
%rhs_value = u32[] parameter(3)
ROOT %mixed = pred[] compare(%rhs_key, %lhs_value), direction=LT
}
ENTRY %main {
%input_keys = u32[1000] parameter(0)
%input_values = u32[1000] parameter(1)
ROOT %sort = (u32[1000], u32[1000]) sort(%input_keys, %input_values),
dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_FALSE(RunModuleAndPass(module.get()));
}
TEST_F(SortRewriterTest, NoRewriteSmallSize) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT
}
ENTRY %main {
%input = f32[100] parameter(0)
ROOT %sort = f32[100] sort(%input), dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_FALSE(RunModuleAndPass(module.get()));
}
TEST_F(SortRewriterTest, SortWithBatchDim) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT
}
ENTRY %main {
%input = f32[10,100] parameter(0)
ROOT %sort = f32[10,100] sort(%input), dimensions={1}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_TRUE(RunModuleAndPass(module.get()));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCubDeviceRadixSortTarget}, m::Parameter()), 0)));
ExpectDirection(module->entry_computation()->root_instruction()->operand(0),
false);
}
TEST_F(SortRewriterTest, SortWithMultipleBatchDims) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT
}
ENTRY %main {
%input = f32[10,10,10] parameter(0)
ROOT %sort = f32[10,10,10] sort(%input), dimensions={2}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_TRUE(RunModuleAndPass(module.get()));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCubDeviceRadixSortTarget}, m::Parameter()), 0)));
ExpectDirection(module->entry_computation()->root_instruction()->operand(0),
false);
}
TEST_F(SortRewriterTest, SortPairsIotaComparerSimple) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = u16[] parameter(0)
%rhs = u16[] parameter(1)
%lhs_index = s32[] parameter(2)
%rhs_index = s32[] parameter(3)
cmp_indices = pred[] compare(%lhs_index, %rhs_index), direction=LT
cmp_lr = pred[] compare(%lhs, %rhs), direction=GT
cmp_eq = pred[] compare(%lhs, %rhs), direction=EQ
ROOT %lt = pred[] select(cmp_eq, cmp_indices, cmp_lr)
}
ENTRY %main {
%inputs = u16[1000] parameter(0)
%iota = s32[1000] iota(), iota_dimension=0
ROOT %sort = (u16[1000], s32[1000]) sort(%inputs, %iota),
dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_TRUE(RunModuleAndPass(module.get()));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::GetTupleElement(m::CustomCall(), 0),
m::GetTupleElement(m::CustomCall(), 1))));
}
TEST_F(SortRewriterTest, SortPairsIotaComparerLikeStableSortExpander) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = u16[] parameter(0)
%rhs = u16[] parameter(1)
%lhs_index = s32[] parameter(2)
%rhs_index = s32[] parameter(3)
cmp_indices = pred[] compare(%lhs_index, %rhs_index), direction=LT
cmp_lr = pred[] compare(%lhs, %rhs), direction=GT
cmp_rl = pred[] compare(%rhs, %lhs), direction=GT
cmp_eq = pred[] compare(cmp_lr, cmp_rl), direction=EQ
ROOT %lt = pred[] select(cmp_eq, cmp_indices, cmp_lr)
}
ENTRY %main {
%inputs = u16[1000] parameter(0)
%iota = s32[1000] iota(), iota_dimension=0
ROOT %sort = (u16[1000], s32[1000]) sort(%inputs, %iota),
dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_TRUE(RunModuleAndPass(module.get()));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::GetTupleElement(m::CustomCall(), 0),
m::GetTupleElement(m::CustomCall(), 1))));
}
TEST_F(SortRewriterTest, SortSizeThresholdIsSet) {
EXPECT_EQ(SortRewriter::SortSizeThreshold(), 1000);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/sort_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/sort_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ec5e98fc-8ed2-4635-ad58-fbb594789502 | cpp | tensorflow/tensorflow | benchmark_model | tensorflow/lite/tools/benchmark/benchmark_model.cc | tensorflow/tools/benchmark/benchmark_model_test.cc | #include "tensorflow/lite/tools/benchmark/benchmark_model.h"
#include <cstdint>
#ifdef __linux__
#include <unistd.h>
#endif
#include <iostream>
#include <memory>
#include <sstream>
#include <string>
#include "tensorflow/lite/profiling/memory_info.h"
#include "tensorflow/lite/profiling/time.h"
#include "tensorflow/lite/tools/benchmark/benchmark_utils.h"
#include "tensorflow/lite/tools/logging.h"
namespace tflite {
namespace benchmark {
using tensorflow::Stat;
constexpr int kMemoryCheckIntervalMs = 50;
#ifdef __linux__
void GetRssStats(size_t* vsize, size_t* rss, size_t* shared, size_t* code) {
FILE* fp = fopen("/proc/self/statm", "rt");
*vsize = 0;
*rss = 0;
*shared = 0;
*code = 0;
if (fp == nullptr) return;
(void)!fscanf(fp, "%zu %zu %zu %zu", vsize, rss, shared, code);
fclose(fp);
*vsize = *vsize * getpagesize() >> 20;
*rss = *rss * getpagesize() >> 20;
*shared = *shared * getpagesize() >> 20;
*code = *code * getpagesize() >> 20;
}
#endif
BenchmarkParams BenchmarkModel::DefaultParams() {
BenchmarkParams params;
params.AddParam("num_runs", BenchmarkParam::Create<int32_t>(50));
params.AddParam("min_secs", BenchmarkParam::Create<float>(1.0f));
params.AddParam("max_secs", BenchmarkParam::Create<float>(150.0f));
params.AddParam("run_delay", BenchmarkParam::Create<float>(-1.0f));
params.AddParam("run_frequency", BenchmarkParam::Create<float>(-1.0f));
params.AddParam("num_threads", BenchmarkParam::Create<int32_t>(-1));
params.AddParam("use_caching", BenchmarkParam::Create<bool>(false));
params.AddParam("benchmark_name", BenchmarkParam::Create<std::string>(""));
params.AddParam("output_prefix", BenchmarkParam::Create<std::string>(""));
params.AddParam("warmup_runs", BenchmarkParam::Create<int32_t>(1));
params.AddParam("warmup_min_secs", BenchmarkParam::Create<float>(0.5f));
params.AddParam("verbose", BenchmarkParam::Create<bool>(false));
params.AddParam("dry_run", BenchmarkParam::Create<bool>(false));
params.AddParam("report_peak_memory_footprint",
BenchmarkParam::Create<bool>(false));
params.AddParam("memory_footprint_check_interval_ms",
BenchmarkParam::Create<int32_t>(kMemoryCheckIntervalMs));
params.AddParam("gpu_invoke_loop_times", BenchmarkParam::Create<int32_t>(1));
return params;
}
BenchmarkModel::BenchmarkModel() : params_(DefaultParams()) {}
void BenchmarkLoggingListener::OnBenchmarkEnd(const BenchmarkResults& results) {
auto inference_us = results.inference_time_us();
auto init_us = results.startup_latency_us();
auto warmup_us = results.warmup_time_us();
auto init_mem_usage = results.init_mem_usage();
auto overall_mem_usage = results.overall_mem_usage();
TFLITE_LOG(INFO) << "Inference timings in us: "
<< "Init: " << init_us << ", "
<< "First inference: " << warmup_us.first() << ", "
<< "Warmup (avg): " << warmup_us.avg() << ", "
<< "Inference (avg): " << inference_us.avg();
if (!init_mem_usage.IsSupported()) return;
TFLITE_LOG(INFO)
<< "Note: as the benchmark tool itself affects memory footprint, the "
"following is only APPROXIMATE to the actual memory footprint of the "
"model at runtime. Take the information at your discretion.";
TFLITE_LOG(INFO) << "Memory footprint delta from the start of the tool (MB): "
<< "init=" << init_mem_usage.mem_footprint_kb / 1024.0
<< " overall="
<< overall_mem_usage.mem_footprint_kb / 1024.0;
auto peak_mem_mb = results.peak_mem_mb();
if (peak_mem_mb > 0) {
TFLITE_LOG(INFO)
<< "Overall peak memory footprint (MB) via periodic monitoring: "
<< peak_mem_mb;
#ifdef __linux__
size_t vsize, rss, shared, code;
GetRssStats(&vsize, &rss, &shared, &code);
TFLITE_LOG(INFO) << "Memory status at the end of exeution:";
TFLITE_LOG(INFO) << "- VmRSS : " << rss << " MB";
TFLITE_LOG(INFO) << "+ RssAnnon : " << rss - shared << " MB";
TFLITE_LOG(INFO) << "+ RssFile + RssShmem : " << shared << " MB";
#endif
}
}
std::vector<Flag> BenchmarkModel::GetFlags() {
return {
CreateFlag<int32_t>(
"num_runs", ¶ms_,
"expected number of runs, see also min_secs, max_secs"),
CreateFlag<float>(
"min_secs", ¶ms_,
"minimum number of seconds to rerun for, potentially making the "
"actual number of runs to be greater than num_runs"),
CreateFlag<float>(
"max_secs", ¶ms_,
"maximum number of seconds to rerun for, potentially making the "
"actual number of runs to be less than num_runs. Note if --max-secs "
"is exceeded in the middle of a run, the benchmark will continue to "
"the end of the run but will not start the next run."),
CreateFlag<float>("run_delay", ¶ms_, "delay between runs in seconds"),
CreateFlag<float>(
"run_frequency", ¶ms_,
"Execute at a fixed frequency, instead of a fixed delay."
"Note if the targeted rate per second cannot be reached, the "
"benchmark would start the next run immediately, trying its best to "
"catch up. If set, this will override run_delay."),
CreateFlag<int32_t>("num_threads", ¶ms_, "number of threads"),
CreateFlag<bool>(
"use_caching", ¶ms_,
"Enable caching of prepacked weights matrices in matrix "
"multiplication routines. Currently implies the use of the Ruy "
"library."),
CreateFlag<std::string>("benchmark_name", ¶ms_, "benchmark name"),
CreateFlag<std::string>("output_prefix", ¶ms_,
"benchmark output prefix"),
CreateFlag<int32_t>(
"warmup_runs", ¶ms_,
"minimum number of runs performed on initialization, to "
"allow performance characteristics to settle, see also "
"warmup_min_secs"),
CreateFlag<float>(
"warmup_min_secs", ¶ms_,
"minimum number of seconds to rerun for, potentially making the "
"actual number of warm-up runs to be greater than warmup_runs"),
CreateFlag<bool>("verbose", ¶ms_,
"Whether to log parameters whose values are not set. "
"By default, only log those parameters that are set by "
"parsing their values from the commandline flags."),
CreateFlag<bool>("dry_run", ¶ms_,
"Whether to run the tool just with simply loading the "
"model, allocating tensors etc. but without actually "
"invoking any op kernels."),
CreateFlag<bool>(
"report_peak_memory_footprint", ¶ms_,
"Report the peak memory footprint by periodically checking the "
"memory footprint. Internally, a separate thread will be spawned for "
"this periodic check. Therefore, the performance benchmark result "
"could be affected."),
CreateFlag<int32_t>("memory_footprint_check_interval_ms", ¶ms_,
"The interval in millisecond between two consecutive "
"memory footprint checks. This is only used when "
"--report_peak_memory_footprint is set to true."),
CreateFlag<int32_t>(
"gpu_invoke_loop_times", ¶ms_,
"Number of GPU delegate invoke loop iterations. If > 0 then reported "
"latency is divided by this number. Used only when "
"TFLITE_GPU_ENABLE_INVOKE_LOOP is defined.")};
}
void BenchmarkModel::LogParams() {
const bool verbose = params_.Get<bool>("verbose");
TFLITE_LOG(INFO) << "Log parameter values verbosely: [" << verbose << "]";
LOG_BENCHMARK_PARAM(int32_t, "num_runs", "Min num runs", verbose);
LOG_BENCHMARK_PARAM(float, "min_secs", "Min runs duration (seconds)",
verbose);
LOG_BENCHMARK_PARAM(float, "max_secs", "Max runs duration (seconds)",
verbose);
LOG_BENCHMARK_PARAM(float, "run_delay", "Inter-run delay (seconds)", verbose);
LOG_BENCHMARK_PARAM(float, "run_frequency",
"Number of prorated runs per second", verbose);
LOG_BENCHMARK_PARAM(int32_t, "num_threads", "Num threads", verbose);
LOG_BENCHMARK_PARAM(bool, "use_caching", "Use caching", verbose);
LOG_BENCHMARK_PARAM(std::string, "benchmark_name", "Benchmark name", verbose);
LOG_BENCHMARK_PARAM(std::string, "output_prefix", "Output prefix", verbose);
LOG_BENCHMARK_PARAM(int32_t, "warmup_runs", "Min warmup runs", verbose);
LOG_BENCHMARK_PARAM(float, "warmup_min_secs",
"Min warmup runs duration (seconds)", verbose);
LOG_BENCHMARK_PARAM(bool, "dry_run", "Run w/o invoking kernels", verbose);
LOG_BENCHMARK_PARAM(bool, "report_peak_memory_footprint",
"Report the peak memory footprint", verbose);
LOG_BENCHMARK_PARAM(int32_t, "memory_footprint_check_interval_ms",
"Memory footprint check interval (ms)", verbose);
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
LOG_BENCHMARK_PARAM(int32_t, "gpu_invoke_loop_times",
"Number of GPU delegate invoke loop iterations. Latency "
"will be divided by it.",
verbose);
#endif
}
TfLiteStatus BenchmarkModel::PrepareInputData() { return kTfLiteOk; }
TfLiteStatus BenchmarkModel::ResetInputsAndOutputs() { return kTfLiteOk; }
Stat<int64_t> BenchmarkModel::Run(int min_num_times, float min_secs,
float max_secs, RunType run_type,
TfLiteStatus* invoke_status) {
Stat<int64_t> run_stats;
TFLITE_LOG(INFO) << "Running benchmark for at least " << min_num_times
<< " iterations and at least " << min_secs << " seconds but"
<< " terminate if exceeding " << max_secs << " seconds.";
int64_t now_us = profiling::time::NowMicros();
int64_t min_finish_us = now_us + static_cast<int64_t>(min_secs * 1.e6f);
int64_t max_finish_us = now_us + static_cast<int64_t>(max_secs * 1.e6f);
*invoke_status = kTfLiteOk;
float inter_run_sleep_time = params_.Get<float>("run_delay");
auto run_frequency = params_.Get<float>("run_frequency");
double manual_inter_run_gap = 1.0 / run_frequency;
double next_run_finish_time = now_us * 1e-6 + manual_inter_run_gap;
for (int run = 0; (run < min_num_times || now_us < min_finish_us) &&
now_us <= max_finish_us;
run++) {
ResetInputsAndOutputs();
listeners_.OnSingleRunStart(run_type);
int64_t start_us = profiling::time::NowMicros();
TfLiteStatus status = RunImpl();
int64_t end_us = profiling::time::NowMicros();
listeners_.OnSingleRunEnd();
int64_t run_duration_us = end_us - start_us;
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
int32_t gpu_invoke_loop_times = params_.Get<int>("gpu_invoke_loop_times");
if (gpu_invoke_loop_times > 0) {
run_duration_us = static_cast<int64_t>(
static_cast<double>(run_duration_us) / gpu_invoke_loop_times);
}
#endif
run_stats.UpdateStat(run_duration_us);
if (run_frequency > 0) {
inter_run_sleep_time =
next_run_finish_time - profiling::time::NowMicros() * 1e-6;
next_run_finish_time += manual_inter_run_gap;
}
util::SleepForSeconds(inter_run_sleep_time);
now_us = profiling::time::NowMicros();
if (status != kTfLiteOk) {
*invoke_status = status;
}
}
std::stringstream stream;
run_stats.OutputToStream(&stream);
TFLITE_LOG(INFO) << stream.str() << std::endl;
return run_stats;
}
TfLiteStatus BenchmarkModel::ValidateParams() {
if (params_.Get<bool>("report_peak_memory_footprint")) {
const int32_t interval =
params_.Get<int32_t>("memory_footprint_check_interval_ms");
if (interval <= 0) {
TFLITE_LOG(WARN) << "--memory_footprint_check_interval_ms is set to "
<< interval
<< " (ms), This value is invalid, and it will be set to "
"the default value "
<< kMemoryCheckIntervalMs << " (ms).";
params_.Set<int32_t>("memory_footprint_check_interval_ms",
kMemoryCheckIntervalMs);
}
}
return kTfLiteOk;
}
TfLiteStatus BenchmarkModel::Run(int argc, char** argv) {
TF_LITE_ENSURE_STATUS(ParseFlags(argc, argv));
return Run();
}
TfLiteStatus BenchmarkModel::Run() {
TF_LITE_ENSURE_STATUS(ValidateParams());
LogParams();
auto peak_memory_reporter = MayCreateMemoryUsageMonitor();
if (peak_memory_reporter != nullptr) peak_memory_reporter->Start();
const double model_size_mb = MayGetModelFileSize() / 1e6;
const auto start_mem_usage = profiling::memory::GetMemoryUsage();
int64_t initialization_start_us = profiling::time::NowMicros();
TF_LITE_ENSURE_STATUS(Init());
const auto init_end_mem_usage = profiling::memory::GetMemoryUsage();
int64_t initialization_end_us = profiling::time::NowMicros();
int64_t startup_latency_us = initialization_end_us - initialization_start_us;
const auto init_mem_usage = init_end_mem_usage - start_mem_usage;
if (model_size_mb > 0) {
TFLITE_LOG(INFO) << "The input model file size (MB): " << model_size_mb;
} else {
TFLITE_LOG(WARN) << "Failed to get the input model file size.";
}
TFLITE_LOG(INFO) << "Initialized session in " << startup_latency_us / 1e3
<< "ms.";
TF_LITE_ENSURE_STATUS(PrepareInputData());
TfLiteStatus status = kTfLiteOk;
uint64_t input_bytes = ComputeInputBytes();
if (params_.Get<bool>("dry_run")) {
params_.Set("warmup_runs", 0);
params_.Set("warmup_min_secs", -1.0f);
params_.Set("num_runs", 0);
params_.Set("min_secs", -1.0f);
}
listeners_.OnBenchmarkStart(params_);
Stat<int64_t> warmup_time_us =
Run(params_.Get<int32_t>("warmup_runs"),
params_.Get<float>("warmup_min_secs"), params_.Get<float>("max_secs"),
WARMUP, &status);
if (status != kTfLiteOk) {
return status;
}
Stat<int64_t> inference_time_us =
Run(params_.Get<int32_t>("num_runs"), params_.Get<float>("min_secs"),
params_.Get<float>("max_secs"), REGULAR, &status);
const auto overall_mem_usage =
profiling::memory::GetMemoryUsage() - start_mem_usage;
float peak_mem_mb = profiling::memory::MemoryUsageMonitor::kInvalidMemUsageMB;
if (peak_memory_reporter != nullptr) {
peak_memory_reporter->Stop();
peak_mem_mb = peak_memory_reporter->GetPeakMemUsageInMB();
}
listeners_.OnBenchmarkEnd({model_size_mb, startup_latency_us, input_bytes,
warmup_time_us, inference_time_us, init_mem_usage,
overall_mem_usage, peak_mem_mb});
return status;
}
TfLiteStatus BenchmarkModel::ParseFlags(int* argc, char** argv) {
auto flag_list = GetFlags();
const bool parse_result =
Flags::Parse(argc, const_cast<const char**>(argv), flag_list);
if (!parse_result ||
(params_.HasParam("help") && params_.Get<bool>("help"))) {
std::string usage = Flags::Usage(argv[0], flag_list);
TFLITE_LOG(ERROR) << usage;
return kTfLiteError;
}
std::string unconsumed_args =
Flags::ArgsToString(*argc, const_cast<const char**>(argv));
if (!unconsumed_args.empty()) {
TFLITE_LOG(WARN) << "Unconsumed cmdline flags: " << unconsumed_args;
}
return kTfLiteOk;
}
std::unique_ptr<profiling::memory::MemoryUsageMonitor>
BenchmarkModel::MayCreateMemoryUsageMonitor() const {
if (!params_.Get<bool>("report_peak_memory_footprint")) return nullptr;
return std::make_unique<profiling::memory::MemoryUsageMonitor>(
params_.Get<int32_t>("memory_footprint_check_interval_ms"));
}
}
} | #include "tensorflow/tools/benchmark/benchmark_model.h"
#include <memory>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/stat_summarizer.h"
namespace tensorflow {
namespace {
void CreateTestGraph(const ::tensorflow::Scope& root,
benchmark_model::InputLayerInfo* input,
string* output_name, GraphDef* graph_def) {
const int input_width = 400;
const int input_height = 10;
input->shape = TensorShape({input_width, input_height});
input->data_type = DT_FLOAT;
const TensorShape constant_shape({input_height, input_width});
Tensor constant_tensor(DT_FLOAT, constant_shape);
test::FillFn<float>(&constant_tensor, [](int) -> float { return 3.0; });
auto placeholder =
ops::Placeholder(root, DT_FLOAT, ops::Placeholder::Shape(input->shape));
input->name = placeholder.node()->name();
auto m = ops::MatMul(root, placeholder, constant_tensor);
*output_name = m.node()->name();
TF_ASSERT_OK(root.ToGraphDef(graph_def));
}
TEST(BenchmarkModelTest, InitializeAndRun) {
const string dir = testing::TmpDir();
const string filename_pb = io::JoinPath(dir, "graphdef.pb");
auto root = Scope::NewRootScope().ExitOnError();
benchmark_model::InputLayerInfo input;
string output_name;
GraphDef graph_def;
CreateTestGraph(root, &input, &output_name, &graph_def);
string graph_def_serialized;
graph_def.SerializeToString(&graph_def_serialized);
TF_ASSERT_OK(
WriteStringToFile(Env::Default(), filename_pb, graph_def_serialized));
std::unique_ptr<Session> session;
std::unique_ptr<GraphDef> loaded_graph_def;
TF_ASSERT_OK(benchmark_model::InitializeSession(1, filename_pb, &session,
&loaded_graph_def));
std::unique_ptr<StatSummarizer> stats;
stats =
std::make_unique<tensorflow::StatSummarizer>(*(loaded_graph_def.get()));
int64_t time;
int64_t num_runs = 0;
TF_ASSERT_OK(benchmark_model::TimeMultipleRuns(
0.0, 10, 0.0, {input}, {output_name}, {}, session.get(), stats.get(),
&time, &num_runs));
ASSERT_EQ(num_runs, 10);
}
TEST(BenchmarkModeTest, TextProto) {
const string dir = testing::TmpDir();
const string filename_txt = io::JoinPath(dir, "graphdef.pb.txt");
auto root = Scope::NewRootScope().ExitOnError();
benchmark_model::InputLayerInfo input;
string output_name;
GraphDef graph_def;
CreateTestGraph(root, &input, &output_name, &graph_def);
TF_ASSERT_OK(WriteTextProto(Env::Default(), filename_txt, graph_def));
std::unique_ptr<Session> session;
std::unique_ptr<GraphDef> loaded_graph_def;
TF_ASSERT_OK(benchmark_model::InitializeSession(1, filename_txt, &session,
&loaded_graph_def));
std::unique_ptr<StatSummarizer> stats;
stats =
std::make_unique<tensorflow::StatSummarizer>(*(loaded_graph_def.get()));
int64_t time;
int64_t num_runs = 0;
TF_ASSERT_OK(benchmark_model::TimeMultipleRuns(
0.0, 10, 0.0, {input}, {output_name}, {}, session.get(), stats.get(),
&time, &num_runs));
ASSERT_EQ(num_runs, 10);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/benchmark/benchmark_model.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/benchmark/benchmark_model_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0de25d93-901e-4158-b088-a4a0e872cc8c | cpp | tensorflow/tensorflow | toco_convert | tensorflow/lite/toco/toco_convert.cc | tensorflow/lite/toco/toco_convert_test.cc | #include <cstdio>
#include <memory>
#include <string>
#include "absl/strings/string_view.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/model_cmdline_flags.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/toco_cmdline_flags.h"
#include "tensorflow/lite/toco/toco_flags.pb.h"
#include "tensorflow/lite/toco/toco_port.h"
#include "tensorflow/lite/toco/toco_tooling.h"
#include "tensorflow/lite/toco/toco_types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/logging.h"
namespace toco {
namespace {
void CheckOutputFilePermissions(const Arg<std::string>& output_file) {
QCHECK(output_file.specified()) << "Missing required flag --output_file.\n";
QCHECK(port::file::Writable(output_file.value()).ok())
<< "Specified output_file is not writable: " << output_file.value()
<< ".\n";
}
void CheckFrozenModelPermissions(const Arg<std::string>& input_file) {
QCHECK(input_file.specified()) << "Missing required flag --input_file.\n";
QCHECK(port::file::Exists(input_file.value(), port::file::Defaults()).ok())
<< "Specified input_file does not exist: " << input_file.value() << ".\n";
QCHECK(port::file::Readable(input_file.value(), port::file::Defaults()).ok())
<< "Specified input_file exists, but is not readable: "
<< input_file.value() << ".\n";
}
void ReadInputData(const ParsedTocoFlags& parsed_toco_flags,
const ParsedModelFlags& parsed_model_flags,
std::string* graph_def_contents) {
port::CheckInitGoogleIsDone("InitGoogle is not done yet.\n");
QCHECK(!parsed_toco_flags.savedmodel_directory.specified())
<< "Use `tensorflow/lite/python/tflite_convert` script with "
<< "SavedModel directories.\n";
CheckFrozenModelPermissions(parsed_toco_flags.input_file);
CHECK(port::file::GetContents(parsed_toco_flags.input_file.value(),
graph_def_contents, port::file::Defaults())
.ok());
}
}
tensorflow::Status Convert(const std::string& graph_def_contents,
const TocoFlags& toco_flags,
const ModelFlags& model_flags,
std::string* output_file_contents,
int64_t* arithmetic_ops_count = nullptr) {
std::unique_ptr<Model> model =
Import(toco_flags, model_flags, graph_def_contents);
TF_RETURN_IF_ERROR(TransformWithStatus(toco_flags, model.get()));
TF_RETURN_IF_ERROR(Export(toco_flags, *model, toco_flags.allow_custom_ops(),
output_file_contents));
if (arithmetic_ops_count != nullptr) {
*arithmetic_ops_count = model->ArithmeticOpsCount();
}
return absl::OkStatus();
}
tensorflow::Status Convert(const ParsedTocoFlags& parsed_toco_flags,
const ParsedModelFlags& parsed_model_flags) {
ModelFlags model_flags;
ReadModelFlagsFromCommandLineFlags(parsed_model_flags, &model_flags);
TocoFlags toco_flags;
ReadTocoFlagsFromCommandLineFlags(parsed_toco_flags, &toco_flags);
std::string graph_def_contents;
ReadInputData(parsed_toco_flags, parsed_model_flags, &graph_def_contents);
CheckOutputFilePermissions(parsed_toco_flags.output_file);
std::string output_file_contents;
TF_RETURN_IF_ERROR(Convert(graph_def_contents, toco_flags, model_flags,
&output_file_contents));
TF_RETURN_IF_ERROR(
port::file::SetContents(parsed_toco_flags.output_file.value(),
output_file_contents, port::file::Defaults()));
return tensorflow::Status();
}
} | #include "tensorflow/lite/toco/toco_convert.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/toco/toco_port.h"
namespace toco {
namespace {
TEST(TocoTest, MissingInputFile) {
ParsedTocoFlags toco_flags;
ParsedModelFlags model_flags;
EXPECT_DEATH(EXPECT_TRUE(Convert(toco_flags, model_flags).ok()),
"Missing required flag --input_file");
}
TEST(TocoTest, BadInputFormat) {
TocoFlags toco_flags;
ModelFlags model_flags;
std::string input;
std::string output;
EXPECT_DEATH(
EXPECT_TRUE(Convert(input, toco_flags, model_flags, &output).ok()),
"Unhandled input_format='FILE_FORMAT_UNKNOWN'");
}
TEST(TocoTest, MissingOutputArrays) {
TocoFlags toco_flags;
ModelFlags model_flags;
toco_flags.set_input_format(TENSORFLOW_GRAPHDEF);
std::string input;
std::string output;
EXPECT_DEATH(
EXPECT_TRUE(Convert(input, toco_flags, model_flags, &output).ok()),
"This model does not define output arrays, so a --output_arrays "
"flag must be given on the command-line");
}
TEST(TocoTest, BadOutputArray) {
TocoFlags toco_flags;
ModelFlags model_flags;
toco_flags.set_input_format(TENSORFLOW_GRAPHDEF);
model_flags.add_output_arrays("output1");
std::string input;
std::string output;
EXPECT_DEATH(
EXPECT_TRUE(Convert(input, toco_flags, model_flags, &output).ok()),
"Specified output array .output1. is not produced by any op "
"in this graph. Is it a typo");
}
TEST(TocoTest, BadOutputFormat) {
TocoFlags toco_flags;
ModelFlags model_flags;
toco_flags.set_input_format(TENSORFLOW_GRAPHDEF);
model_flags.add_output_arrays("output1");
std::string input = R"GraphDef(
node {
name: "output1"
input: "input1"
input: "input2"
op: "Sub"
attr { key: "T" value { type: DT_FLOAT } }
}
)GraphDef";
std::string output;
EXPECT_DEATH(
EXPECT_TRUE(Convert(input, toco_flags, model_flags, &output).ok()),
"Unhandled output_format='FILE_FORMAT_UNKNOWN'");
}
TEST(TocoTest, SimpleFloatModel) {
TocoFlags toco_flags;
ModelFlags model_flags;
toco_flags.set_input_format(TENSORFLOW_GRAPHDEF);
toco_flags.set_output_format(TENSORFLOW_GRAPHDEF);
model_flags.add_output_arrays("output1");
std::string input = R"GraphDef(
node {
name: "input1"
op: "Placeholder"
attr { key: "dtype" value { type: DT_INT64 } }
}
node {
name: "input2"
op: "Placeholder"
attr { key: "dtype" value { type: DT_INT64 } }
}
node {
name: "output1"
input: "input1"
input: "input2"
op: "Sub"
attr { key: "T" value { type: DT_FLOAT } }
}
)GraphDef";
std::string output;
EXPECT_TRUE(Convert(input, toco_flags, model_flags, &output).ok());
EXPECT_TRUE(!output.empty());
}
TEST(TocoTest, TransientStringTensors) {
TocoFlags toco_flags;
ModelFlags model_flags;
toco_flags.set_input_format(TENSORFLOW_GRAPHDEF);
toco_flags.set_output_format(TFLITE);
toco::InputArray* input_1 = model_flags.add_input_arrays();
input_1->set_name("input1");
toco::InputArray* indices_1 = model_flags.add_input_arrays();
indices_1->set_name("indices1");
model_flags.add_output_arrays("output1");
std::string input = R"GraphDef(
node {
name: "input1"
op: "Placeholder"
attr { key: "dtype" value { type: DT_STRING } }
attr { key: "shape" value { shape { dim { size:1 }}}}
}
node {
name: "indices1"
op: "Placeholder"
attr { key: "dtype" value { type: DT_INT64 } }
}
node {
name: "intermediate1"
op: "Gather"
input: "input1"
input: "indices1"
attr { key: "Tparams" value { type: DT_STRING } }
attr { key: "Tindices" value { type: DT_INT64 } }
}
node {
name: "output1"
op: "Gather"
input: "intermediate1"
input: "indices2"
attr { key: "Tparams" value { type: DT_STRING } }
attr { key: "Tindices" value { type: DT_INT64 } }
}
)GraphDef";
std::string output;
EXPECT_TRUE(Convert(input, toco_flags, model_flags, &output).ok());
EXPECT_TRUE(!output.empty());
}
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/toco_convert.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/toco_convert_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
be17faa3-3c5c-43a2-8fd7-e2e54af789ce | cpp | tensorflow/tensorflow | conv | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/conv.cc | tensorflow/lite/delegates/hexagon/builders/tests/conv_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/conv.h"
#include <cstdint>
#include <string>
#include <tuple>
#include <vector>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/conv_util.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/op_util_common.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir::odml {
namespace {
using ::llvm::ArrayRef;
bool IsShapeFullyStatic(ArrayRef<int64_t> shape) {
return llvm::all_of(shape, [](int64_t d) { return d >= 0; });
}
bool NonBatchDimsFullyStatic(ArrayRef<int64_t> shape) {
return IsShapeFullyStatic(shape.drop_front());
}
bool AreShapesFullyStatic(const ConvView& data) {
return IsShapeFullyStatic(data.InputShape()) &&
IsShapeFullyStatic(data.KernelShape()) &&
IsShapeFullyStatic(data.OutputShape());
}
bool InputOutputNonBatchDimsFullyStatic(const ConvView& data) {
return NonBatchDimsFullyStatic(data.InputShape()) &&
IsShapeFullyStatic(data.KernelShape()) &&
NonBatchDimsFullyStatic(data.OutputShape());
}
bool IsPaddingSupported(const ConvView& data) {
return llvm::all_of(data.Padding(), [](const DimPadding& p) {
return p.Hi() == 0 && p.Lo() == 0;
});
}
bool IsInputDilationSupported(const ConvView& data) {
return llvm::all_of(data.InputDilations(), [](int64_t v) { return v == 1; });
}
bool IsBatchGroupSupported(const ConvView& data) {
return data.BatchGroupCount() == 1;
}
bool IsWindowReversalSupported(const ConvView& data) {
return llvm::all_of(data.WindowReversal(), [](bool b) { return !b; });
}
bool IsConvLegal(mhlo::ConvolutionOp op) {
const ConvView data(op);
const bool supported_conv_type = IsStandardConv(data) ||
IsDepthwiseConv(data) ||
IsSupportedNonTrivialConv(data);
const bool is_non_supported_trivial_conv =
(!IsSupportedNonTrivialConv(data) &&
(!IsPaddingSupported(data) || !IsInputDilationSupported(data)));
const bool are_shapes_supported =
((IsStandardConv(data) || IsDepthwiseConv(data)) &&
InputOutputNonBatchDimsFullyStatic(data)) ||
AreShapesFullyStatic(data);
return !supported_conv_type || !IsBatchGroupSupported(data) ||
!are_shapes_supported || !IsTFLNativeLayout(data) ||
is_non_supported_trivial_conv || !IsWindowReversalSupported(data);
}
arith::ConstantOp BuildEmptyBias(OpBuilder& b, Location loc,
const ConvView& data) {
auto bias_type = RankedTensorType::get(
{data.OutputLayout().SpecialDim2(data.OutputShape())},
data.ElementType());
auto bias_const_data = b.getZeroAttr(bias_type);
return b.create<arith::ConstantOp>(loc, bias_const_data);
}
class LegalizeConv2D : public OpConversionPattern<mhlo::ConvolutionOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::ConvolutionOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeConv2D::matchAndRewrite(
mhlo::ConvolutionOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
const ConvView data(op);
if (IsConvLegal(op) || !IsStandardConv(data) ||
data.InputLayout().Rank() != 4) {
return failure();
}
const auto& kernel_dilations = data.KernelDilations();
auto tfl_h_dilation = rewriter.getI32IntegerAttr(kernel_dilations[0]);
auto tfl_w_dilation = rewriter.getI32IntegerAttr(kernel_dilations[1]);
const auto& window_strides = data.Strides();
auto tfl_h_stride = rewriter.getI32IntegerAttr(window_strides[0]);
auto tfl_w_stride = rewriter.getI32IntegerAttr(window_strides[1]);
auto tfl_padding = rewriter.getStringAttr("VALID");
auto bias = BuildEmptyBias(rewriter, op->getLoc(), data);
auto tfl_faf_none = rewriter.getStringAttr("NONE");
rewriter.replaceOpWithNewOp<TFL::Conv2DOp>(
op, op.getResult().getType(), op.getLhs(), op.getRhs(), bias,
tfl_h_dilation, tfl_w_dilation, tfl_faf_none, tfl_padding, tfl_h_stride,
tfl_w_stride);
return success();
}
class LegalizeConvDepthwise : public OpConversionPattern<mhlo::ConvolutionOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::ConvolutionOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeConvDepthwise::matchAndRewrite(
mhlo::ConvolutionOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
const ConvView data(op);
if (IsConvLegal(op) || !IsDepthwiseConv(data)) {
return failure();
}
const auto& kernel_dilations = data.KernelDilations();
auto tfl_h_dilation = rewriter.getI32IntegerAttr(kernel_dilations[0]);
auto tfl_w_dilation = rewriter.getI32IntegerAttr(kernel_dilations[1]);
const auto& window_strides = data.Strides();
auto tfl_h_stride = rewriter.getI32IntegerAttr(window_strides[0]);
auto tfl_w_stride = rewriter.getI32IntegerAttr(window_strides[1]);
auto tfl_padding = rewriter.getStringAttr("VALID");
const int64_t out_channels =
data.OutputLayout().SpecialDim2(data.OutputShape());
const int64_t in_channels = data.InputLayout().SpecialDim2(data.InputShape());
const int32_t depth_multiplier = out_channels / in_channels;
auto depth_multipler_attr = rewriter.getI32IntegerAttr(depth_multiplier);
auto bias = BuildEmptyBias(rewriter, op->getLoc(), data);
auto tfl_faf_none = rewriter.getStringAttr("NONE");
rewriter.replaceOpWithNewOp<TFL::DepthwiseConv2DOp>(
op, op.getResult().getType(), op.getLhs(), op.getRhs(), bias,
tfl_h_dilation, tfl_w_dilation, tfl_faf_none, tfl_padding, tfl_h_stride,
tfl_w_stride, depth_multipler_attr);
return success();
}
class LegalizeConv3D : public OpConversionPattern<mhlo::ConvolutionOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::ConvolutionOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeConv3D::matchAndRewrite(
mhlo::ConvolutionOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
const ConvView data(op);
if (IsConvLegal(op) || !IsStandardConv(data) ||
data.InputLayout().Rank() != 5) {
return failure();
}
const auto& kernel_dilations = data.KernelDilations();
auto tfl_d_dilation = rewriter.getI32IntegerAttr(kernel_dilations[0]);
auto tfl_h_dilation = rewriter.getI32IntegerAttr(kernel_dilations[1]);
auto tfl_w_dilation = rewriter.getI32IntegerAttr(kernel_dilations[2]);
const auto& window_strides = data.Strides();
auto tfl_d_stride = rewriter.getI32IntegerAttr(window_strides[0]);
auto tfl_h_stride = rewriter.getI32IntegerAttr(window_strides[1]);
auto tfl_w_stride = rewriter.getI32IntegerAttr(window_strides[2]);
auto tfl_padding = rewriter.getStringAttr("VALID");
auto bias = BuildEmptyBias(rewriter, op->getLoc(), data);
auto tfl_faf_none = rewriter.getStringAttr("NONE");
rewriter.replaceOpWithNewOp<TFL::Conv3DOp>(
op, op.getResult().getType(), op.getLhs(), op.getRhs(), bias,
tfl_d_dilation, tfl_h_dilation, tfl_w_dilation, tfl_faf_none, tfl_padding,
tfl_d_stride, tfl_h_stride, tfl_w_stride);
return success();
}
class ConvertNonTrivialConvToResizeBilinearOp
: public OpConversionPattern<mhlo::ConvolutionOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::ConvolutionOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult ConvertNonTrivialConvToResizeBilinearOp::matchAndRewrite(
mhlo::ConvolutionOp conv_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
const ConvView data(conv_op);
bool align_corners;
if (!MatchWithResizeBilinearOp(data, align_corners)) {
return rewriter.notifyMatchFailure(
conv_op, "op does not match with resize_bilinear op");
}
SmallVector<int32_t, 4> output_shape_i32;
for (int64_t spatial_dim : data.InputLayout().Spatials()) {
output_shape_i32.push_back(
static_cast<int32_t>(data.OutputShape()[spatial_dim]));
}
Value output_sizes_attr = rewriter.create<mlir::arith::ConstantOp>(
conv_op.getLoc(), rewriter.getI32TensorAttr(output_shape_i32));
rewriter.replaceOpWithNewOp<TFL::ResizeBilinearOp>(
conv_op, conv_op.getType(), conv_op.getLhs(), output_sizes_attr,
rewriter.getBoolAttr(align_corners),
rewriter.getBoolAttr(false));
return success();
}
class ConvertNonTrivialConvToTransposeConvOp
: public OpConversionPattern<mhlo::ConvolutionOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::ConvolutionOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult ConvertNonTrivialConvToTransposeConvOp::matchAndRewrite(
mhlo::ConvolutionOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
const ConvView data(op);
if (!IsSupportedNonTrivialConv(data)) {
return rewriter.notifyMatchFailure(op, "Not a non-trivial convolution.");
}
if (op.getFeatureGroupCount() != 1) {
return rewriter.notifyMatchFailure(
op, "group or depthwise convolution is not supported");
}
auto strides = data.InputDilations();
auto tfl_h_stride = rewriter.getI32IntegerAttr(strides[0]);
auto tfl_w_stride = rewriter.getI32IntegerAttr(strides[1]);
std::string padding;
SmallVector<int64_t, 4> padding_array;
for (auto& padding : data.Padding()) {
padding_array.push_back(padding.Lo());
padding_array.push_back(padding.Hi());
}
if (IsTransposeConvPaddingValid(op, 2, strides,
padding_array)) {
padding = "VALID";
} else if (IsTransposeConvPaddingSame(op, 2, strides,
padding_array)) {
padding = "SAME";
} else {
return rewriter.notifyMatchFailure(op,
"requires padding to be SAME or VALID");
}
auto bias = BuildEmptyBias(rewriter, op->getLoc(), data);
auto tfl_faf_none = rewriter.getStringAttr("NONE");
SmallVector<int32_t> kernel_spatial_dims_i32(
data.KernelLayout().Spatials().begin(),
data.KernelLayout().Spatials().end());
Value axis = rewriter.create<arith::ConstantOp>(
op.getLoc(), rewriter.getI32TensorAttr(kernel_spatial_dims_i32));
auto filter = rewriter.create<TFL::ReverseV2Op>(
op.getLoc(), op.getRhs().getType(), op.getRhs(), axis);
SmallVector<int32_t, 4> output_shape_i32(data.OutputShape().begin(),
data.OutputShape().end());
auto output_sizes = rewriter.create<arith::ConstantOp>(
op.getLoc(), rewriter.getI32TensorAttr(output_shape_i32));
rewriter.replaceOpWithNewOp<TFL::TransposeConvOp>(
op, op.getResult().getType(), output_sizes,
filter, op.getLhs(), bias,
rewriter.getStringAttr(padding),
tfl_h_stride, tfl_w_stride,
tfl_faf_none);
return success();
}
class SliceDepthwiseTransposedConvolution
: public OpRewritePattern<mhlo::ConvolutionOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(mhlo::ConvolutionOp op,
PatternRewriter& rewriter) const final;
};
LogicalResult SliceDepthwiseTransposedConvolution::matchAndRewrite(
mhlo::ConvolutionOp conv_op, PatternRewriter& rewriter) const {
const ConvView data(conv_op);
if (!IsSupportedNonTrivialConv(data)) {
return rewriter.notifyMatchFailure(conv_op,
"Not a non-trivial convolution.");
}
mhlo::ConvDimensionNumbersAttr dnums = conv_op.getDimensionNumbers();
const int64_t input_feature_dimension = dnums.getInputFeatureDimension();
const int64_t input_channels =
mlir::cast<ShapedType>(conv_op.getLhs().getType())
.getDimSize(input_feature_dimension);
const int64_t feature_group_count = conv_op.getFeatureGroupCount();
const int64_t kernel_input_feature_dimension =
dnums.getKernelInputFeatureDimension();
const int64_t kernel_input_channels =
mlir::cast<ShapedType>(conv_op.getRhs().getType())
.getDimSize(kernel_input_feature_dimension);
const int64_t kernel_output_feature_dimension =
dnums.getKernelOutputFeatureDimension();
const int64_t kernel_output_channels =
mlir::cast<ShapedType>(conv_op.getRhs().getType())
.getDimSize(kernel_output_feature_dimension);
if (feature_group_count == 1) {
return rewriter.notifyMatchFailure(conv_op, "Not a depthwise convolution");
}
if (input_channels != feature_group_count) {
return rewriter.notifyMatchFailure(
conv_op, "Not a detphwise transposed convolution");
}
if (MatchWithResizeBilinearOp(data)) {
return rewriter.notifyMatchFailure(
conv_op, "Op will be legalized to ResizeBilinearOp");
}
if ((kernel_output_channels % feature_group_count != 0) ||
(kernel_input_channels != 1)) {
return rewriter.notifyMatchFailure(
conv_op, "Not a supported detphwise transposed convolution");
}
if ((kernel_output_channels / feature_group_count) != 1) {
return rewriter.notifyMatchFailure(
conv_op,
"Unsupported detphwise transpose convolution with non-1 channel "
"multiplier");
}
auto create_slice = [&](mlir::Value tensor, int64_t depth_idx,
int64_t channel_idx,
bool is_kernel = false) -> mlir::Value {
auto tensor_shape =
mlir::cast<ShapedType>(tensor.getType()).getShape().vec();
llvm::SmallVector<int64_t> start_indices(tensor_shape.size(), 0);
auto limit_indices = tensor_shape;
const llvm::SmallVector<int64_t> strides(tensor_shape.size(), 1);
start_indices[channel_idx] = depth_idx;
if (is_kernel) {
limit_indices[channel_idx] =
depth_idx + (kernel_output_channels / feature_group_count);
} else {
limit_indices[channel_idx] = depth_idx + 1;
}
return rewriter.create<mhlo::SliceOp>(
conv_op.getLoc(), tensor, rewriter.getI64TensorAttr(start_indices),
rewriter.getI64TensorAttr(limit_indices),
rewriter.getI64TensorAttr(strides));
};
llvm::SmallVector<mlir::Value> conv_results;
for (int i = 0; i < feature_group_count; ++i) {
auto sliced_input =
create_slice(conv_op.getLhs(), i, input_feature_dimension);
auto sliced_kernel = create_slice(conv_op.getRhs(), i,
kernel_output_feature_dimension, true);
auto output_type = mlir::cast<ShapedType>(conv_op->getResult(0).getType());
auto new_output_shape = output_type.getShape().vec();
new_output_shape[dnums.getOutputFeatureDimension()] /= feature_group_count;
auto new_output_type =
RankedTensorType::get(new_output_shape, output_type.getElementType());
auto conv_result = rewriter.create<mhlo::ConvolutionOp>(
conv_op.getLoc(), new_output_type, sliced_input, sliced_kernel,
conv_op.getWindowStridesAttr(), conv_op.getPaddingAttr(),
conv_op.getLhsDilationAttr(), conv_op.getRhsDilationAttr(),
conv_op.getWindowReversalAttr(), conv_op.getDimensionNumbers(),
1, 1,
conv_op.getPrecisionConfigAttr());
conv_results.push_back(conv_result);
}
auto final_output = rewriter.create<mhlo::ConcatenateOp>(
conv_op.getLoc(), conv_results,
rewriter.getI64IntegerAttr(dnums.getOutputFeatureDimension()));
rewriter.replaceOp(conv_op, final_output.getResult());
return success();
}
class Conv1DToConv2D : public OpRewritePattern<mhlo::ConvolutionOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(mhlo::ConvolutionOp op,
PatternRewriter& rewriter) const final;
};
arith::ConstantOp ShapeToConst(PatternRewriter& rewriter,
ArrayRef<int64_t> shape, Location loc) {
auto attr_type = RankedTensorType::get({static_cast<int64_t>(shape.size())},
rewriter.getIntegerType(32));
auto casted_shape = llvm::map_range(shape, [](auto i64) -> int32_t {
return (i64 < 0) ? -1 : static_cast<int32_t>(i64);
});
auto attr =
DenseIntElementsAttr::get(attr_type, llvm::to_vector(casted_shape));
return rewriter.create<arith::ConstantOp>(loc, attr_type, attr);
}
std::tuple<llvm::SmallVector<int64_t>, Layout> InsertTrivialSpatialDim(
const Layout& layout, ArrayRef<int64_t> shape) {
const int64_t last_spatial = layout.Spatials()[layout.Rank() - 3];
const int64_t new_dim1 = (layout.SpecialDim1() > last_spatial)
? layout.SpecialDim1() + 1
: layout.SpecialDim1();
const int64_t new_dim2 = (layout.SpecialDim2() > last_spatial)
? layout.SpecialDim2() + 1
: layout.SpecialDim2();
llvm::SmallVector<int64_t> new_spatials(layout.Spatials());
const int64_t new_last_spatial = new_spatials.back() + 1;
new_spatials.push_back(new_last_spatial);
llvm::SmallVector<int64_t, 4> new_shape(shape.size() + 1, 1);
new_shape[new_dim1] = layout.SpecialDim1(shape);
new_shape[new_dim2] = layout.SpecialDim2(shape);
for (auto new_spatial : new_spatials) {
if (new_spatial == new_last_spatial) {
continue;
}
new_shape[new_spatial] = shape[new_spatial];
}
return std::tuple(new_shape, Layout(new_dim1, new_dim2, new_spatials));
}
LogicalResult Conv1DToConv2D::matchAndRewrite(mhlo::ConvolutionOp op,
PatternRewriter& rewriter) const {
const ConvView view(op);
if (view.InputLayout().Rank() != 3) {
return rewriter.notifyMatchFailure(op, "Not 1D conv.");
}
if (!IsInputDilationSupported(view)) {
return rewriter.notifyMatchFailure(op, "Expects trivial lhs dims.");
}
if (!InputOutputNonBatchDimsFullyStatic(view)) {
return rewriter.notifyMatchFailure(op, "Expects static dims.");
}
if (!IsWindowReversalSupported(view)) {
return rewriter.notifyMatchFailure(op, "Expects window reversal trivial.");
}
if (!view.InputLayout().AreSpatialsIota() ||
!view.KernelLayout().AreSpatialsIota() ||
!view.OutputLayout().AreSpatialsIota()) {
return rewriter.notifyMatchFailure(op,
"Expects well formed spatials dims.");
}
auto [lhs_new_shape, lhs_new_layout] =
InsertTrivialSpatialDim(view.InputLayout(), view.InputShape());
auto lhs_new_type = op.getLhs().getType().clone(lhs_new_shape);
auto new_lhs = rewriter.create<TFL::ReshapeOp>(
op.getLoc(), lhs_new_type, op.getLhs(),
ShapeToConst(rewriter, lhs_new_shape, op.getLoc()));
auto [rhs_new_shape, rhs_new_layout] =
InsertTrivialSpatialDim(view.KernelLayout(), view.KernelShape());
auto rhs_new_type = op.getRhs().getType().clone(rhs_new_shape);
auto new_rhs =
rewriter.create<mhlo::ReshapeOp>(op.getLoc(), rhs_new_type, op.getRhs());
auto [out_new_shape, out_new_layout] =
InsertTrivialSpatialDim(view.OutputLayout(), view.OutputShape());
auto out_new_type = op.getResult().getType().clone(out_new_shape);
llvm::SmallVector<int64_t, 2> strides_2d;
strides_2d.push_back(view.Strides()[0]);
strides_2d.push_back(1);
auto strides_2d_attr = DenseIntElementsAttr::get(
RankedTensorType::get({2}, rewriter.getI64Type()), strides_2d);
SmallVector<int64_t, 4> padding_2d;
const auto& dim_pad = view.Padding()[0];
padding_2d.push_back(dim_pad.Lo());
padding_2d.push_back(dim_pad.Hi());
padding_2d.push_back(0);
padding_2d.push_back(0);
auto padding_2d_attr = DenseIntElementsAttr::get(
RankedTensorType::get({2, 2}, rewriter.getI64Type()), padding_2d);
SmallVector<int64_t, 2> lhs_dilation_2d(2, 1);
auto lhs_dilation_2d_attr = DenseIntElementsAttr::get(
RankedTensorType::get({2}, rewriter.getI64Type()), lhs_dilation_2d);
SmallVector<int64_t, 2> rhs_dilation_2d;
rhs_dilation_2d.push_back(view.KernelDilations()[0]);
rhs_dilation_2d.push_back(1);
auto rhs_dilation_2d_attr = DenseIntElementsAttr::get(
RankedTensorType::get({2}, rewriter.getI64Type()), rhs_dilation_2d);
auto window_reversal_2d_attr = DenseIntElementsAttr::get(
RankedTensorType::get({2}, rewriter.getIntegerType(1)),
SmallVector<bool>({false, false}));
auto dnums_2d = mhlo::ConvDimensionNumbersAttr::get(
rewriter.getContext(), lhs_new_layout.SpecialDim1(),
lhs_new_layout.SpecialDim2(), lhs_new_layout.Spatials(),
rhs_new_layout.SpecialDim1(), rhs_new_layout.SpecialDim2(),
rhs_new_layout.Spatials(), out_new_layout.SpecialDim1(),
out_new_layout.SpecialDim2(), out_new_layout.Spatials());
auto conv2d_op = rewriter.create<mhlo::ConvolutionOp>(
op.getLoc(), out_new_type, new_lhs, new_rhs, strides_2d_attr,
padding_2d_attr, lhs_dilation_2d_attr, rhs_dilation_2d_attr,
window_reversal_2d_attr, dnums_2d, op.getFeatureGroupCount(),
op.getBatchGroupCount(), op.getPrecisionConfigAttr());
auto new_out_type = op.getResult().getType();
rewriter.replaceOpWithNewOp<TFL::ReshapeOp>(
op, new_out_type, conv2d_op.getResult(),
ShapeToConst(rewriter, new_out_type.getShape(), op.getLoc()));
return success();
}
}
void PopulateLegalizeConvPatterns(MLIRContext* ctx, RewritePatternSet& patterns,
ConversionTarget& target) {
patterns.add<LegalizeConv2D, LegalizeConv3D, LegalizeConvDepthwise,
ConvertNonTrivialConvToResizeBilinearOp,
ConvertNonTrivialConvToTransposeConvOp>(ctx);
target.addDynamicallyLegalOp<mhlo::ConvolutionOp>(IsConvLegal);
}
void PopulatePrepareConvPatterns(MLIRContext* ctx,
RewritePatternSet& patterns) {
patterns.add<Conv1DToConv2D, SliceDepthwiseTransposedConvolution>(ctx);
}
} | #include <initializer_list>
#include <numeric>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/internal/test_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
int NumElements(const std::vector<int>& dims) {
return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<int>());
}
class QuantizedConvolutionOpModel : public SingleOpModelWithHexagon {
public:
QuantizedConvolutionOpModel(BuiltinOperator type, const TensorData& input,
const TensorData& filter,
const TensorData& output, Padding padding_type,
int dilation_factor = 1, int stride_length = 1,
ActivationFunctionType fused_activation_function =
ActivationFunctionType_NONE) {
input_ = AddInput(input);
filter_ = AddInput(filter);
int bias_size = GetShape(filter_)[0];
if (type == BuiltinOperator_DEPTHWISE_CONV_2D) {
bias_size = GetShape(filter_)[3];
}
if (filter.per_channel_quantization) {
std::vector<float> bias_scale(
filter.per_channel_quantization_scales.size());
std::vector<int64_t> bias_zero_points(
filter.per_channel_quantization_scales.size());
for (size_t i = 0; i < filter.per_channel_quantization_scales.size();
++i) {
bias_scale[i] = input.scale * filter.per_channel_quantization_scales[i];
bias_zero_points[i] = 0;
}
TensorData bias{TensorType_INT32,
{bias_size},
0,
0,
0,
0,
true,
bias_scale,
bias_zero_points,
0};
bias_ = AddInput(bias);
} else {
auto bias_scale = GetScale(input_) * GetScale(filter_);
TensorData bias{TensorType_INT32, {bias_size}, 0, 0, bias_scale};
bias_ = AddInput(bias);
}
output_ = AddOutput(output);
if (type == BuiltinOperator_DEPTHWISE_CONV_2D) {
int input_depth = GetShape(input_)[3];
int output_depth = GetShape(filter_)[3];
int depth_mul = output_depth / input_depth;
SetBuiltinOp(
BuiltinOperator_DEPTHWISE_CONV_2D,
BuiltinOptions_DepthwiseConv2DOptions,
CreateDepthwiseConv2DOptions(
builder_, padding_type, stride_length, stride_length, depth_mul,
fused_activation_function, dilation_factor, dilation_factor)
.Union());
} else {
SetBuiltinOp(BuiltinOperator_CONV_2D, BuiltinOptions_Conv2DOptions,
CreateConv2DOptions(builder_, padding_type, stride_length,
stride_length, fused_activation_function,
dilation_factor, dilation_factor)
.Union());
}
BuildInterpreter({GetShape(input_), GetShape(filter_), GetShape(bias_)});
auto* filter_tensor = interpreter_->tensor(filter_);
filter_tensor->allocation_type = kTfLiteMmapRo;
}
void SetInput(std::initializer_list<float> data) {
QuantizeAndPopulate<uint8_t>(input_, data);
}
void SetFilter(std::initializer_list<float> data) {
QuantizeAndPopulate<uint8_t>(filter_, data);
}
void SetBias(std::initializer_list<float> data) {
QuantizeAndPopulate<int>(bias_, data);
}
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
void SetInt8Input(std::initializer_list<float> data) {
QuantizeAndPopulate<int8_t>(input_, data);
}
void SetInt8Input(const std::vector<float>& data) {
QuantizeAndPopulate<int8_t>(input_, data);
}
void SetPerChannelQuantizedFilter(std::initializer_list<float> data) {
PerChannelSymmetricQuantizeAndPopulate(filter_, data);
}
void SetPerChannelQuantizedFilter(const std::vector<float>& data) {
PerChannelSymmetricQuantizeAndPopulate(filter_, data);
}
void SetPerChannelQuantizedBias(std::initializer_list<float> data) {
PerChannelQuantizeBias(bias_, data);
}
void SetPerChannelQuantizedBias(const std::vector<float>& data) {
PerChannelQuantizeBias(bias_, data);
}
protected:
int input_;
int filter_;
int bias_;
int output_;
};
TEST(QuantizedConvolutionOpModel, SimpleConvTestNoActivation) {
QuantizedConvolutionOpModel m(
BuiltinOperator_CONV_2D, {TensorType_UINT8, {2, 2, 4, 1}, -63.5, 64},
{TensorType_UINT8, {3, 2, 2, 1}, -63.5, 64},
{TensorType_UINT8, {}, -127, 128}, Padding_VALID, 1,
2);
m.SetInput({
1, 1, 1, 1,
2, 2, 2, 2,
1, 2, 3, 4,
1, 2, 3, 4,
});
m.SetFilter({
1, 2, 3, 4,
-1, 1, -1, 1,
-1, -1, 1, 1,
});
m.SetBias({1, 2, 3});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(
{
18, 2, 5,
18, 2, 5,
17, 4, 3,
37, 4, 3,
},
1e-5)));
}
TEST(QuantizedConvolutionOpModel, SimpleConvTestReLU6Activation) {
QuantizedConvolutionOpModel m(
BuiltinOperator_CONV_2D, {TensorType_UINT8, {2, 2, 4, 1}, -63.5, 64},
{TensorType_UINT8, {3, 2, 2, 1}, -63.5, 64},
{TensorType_UINT8, {}, -127, 128}, Padding_VALID, 1,
2, ActivationFunctionType_RELU6);
m.SetInput({
1, 1, 1, 1,
2, 2, 2, 2,
1, 2, 3, 4,
1, 2, 3, 4,
});
m.SetFilter({
1, 2, 3, 4,
-1, 1, -1, 1,
-1, -1, 1, 1,
});
m.SetBias({1, 2, 3});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(
{
6, 2, 5,
6, 2, 5,
6, 4, 3,
6, 4, 3,
},
1e-5)));
}
TEST(QuantizedConvolutionOpModel,
SimpleConvTestReLU6Activation_NoRequantizeRequired) {
QuantizedConvolutionOpModel m(
BuiltinOperator_CONV_2D, {TensorType_UINT8, {2, 2, 4, 1}, -63.5, 64},
{TensorType_UINT8, {3, 2, 2, 1}, -63.5, 64}, {TensorType_UINT8, {}, 0, 6},
Padding_VALID, 1,
2, ActivationFunctionType_RELU6);
m.SetInput({
1, 1, 1, 1,
2, 2, 2, 2,
1, 2, 3, 4,
1, 2, 3, 4,
});
m.SetFilter({
1, 2, 3, 4,
-1, 1, -1, 1,
-1, -1, 1, 1,
});
m.SetBias({1, 2, 3});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(
{
6, 2, 5,
6, 2, 5,
6, 4, 3,
6, 4, 3,
},
2e-2)));
}
TEST(QuantizedConvolutionOpModel, SimplePerTensor_Int8) {
QuantizedConvolutionOpModel m(
BuiltinOperator_CONV_2D,
{TensorType_INT8, {1, 2, 3, 2}, -63.5, 64, 0.5, -1},
{TensorType_INT8,
{2, 2, 2, 2},
0,
0,
0,
0,
true,
{1},
{0},
0},
{TensorType_INT8, {}, -63.5, 64, 0.5, -1}, Padding_VALID);
m.SetInt8Input({
3, 2,
1, -1,
-2, -3,
4, 3,
2, -2,
-3, -4,
});
m.SetPerChannelQuantizedFilter(
{
1, 2,
3, 4,
3, 4,
5, 6,
7, 8,
5, 6,
3, 4,
1, 2,
});
m.SetPerChannelQuantizedBias({3, -2});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({31, 56, -57, -44}, 1e-5)));
}
TEST(QuantizedConvolutionOpModel, SimplePerChannel_Int8) {
QuantizedConvolutionOpModel m(
BuiltinOperator_CONV_2D,
{TensorType_INT8, {1, 2, 3, 2}, -63.5, 64, 0.5, -1},
{TensorType_INT8,
{2, 2, 2, 2},
0,
0,
0,
0,
true,
{1, 2},
{0, 0},
0},
{TensorType_INT8, {}, -63.5, 64, 0.5, -1}, Padding_VALID);
m.SetInt8Input({
3, 2,
1, -1,
-2, -3,
4, 3,
2, -2,
-3, -4,
});
m.SetPerChannelQuantizedFilter(
{
1, 2,
3, 4,
3, 4,
5, 6,
7, 8,
5, 6,
3, 4,
1, 2,
});
m.SetPerChannelQuantizedBias({3, -2});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({31, 64, -57, -46}, 0.6f)));
}
TEST(QuantizedConvolutionOpModel, SimpleDilatedDepthwiseConvTestPaddingValid) {
const int depth = 1;
const int image_width = 9;
const int image_height = 9;
const int image_batch_count = 1;
const int filter_size = 3;
const int filter_count = 1;
const int dilation_factor = 3;
QuantizedConvolutionOpModel m(
BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_UINT8,
{image_batch_count, image_height, image_width, depth},
0,
255},
{TensorType_UINT8,
{depth, filter_size, filter_size, filter_count},
0,
255},
{TensorType_UINT8, {}, 0, 255}, Padding_VALID, dilation_factor);
m.SetInput({0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0});
m.SetFilter({1, 2, 3, 4, 5, 6, 7, 8, 9});
m.SetBias({0});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray({5, 5, 5, 5, 5, 5, 5, 5, 5}));
}
TEST(QuantizedConvolutionOpModel, DepthwiseConv5x5) {
QuantizedConvolutionOpModel m(BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_UINT8, {1, 6, 6, 2}, -63.5, 64},
{TensorType_UINT8, {1, 5, 5, 2}, -63.5, 64},
{TensorType_UINT8, {}, -127, 128},
Padding_VALID);
m.SetInput({0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0});
m.SetFilter({1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2,
3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4,
5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5});
m.SetBias({1, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto reference_output = m.GetDequantizedOutput<uint8_t>();
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(reference_output, 1e-5)));
}
TEST(QuantizedConvolutionOpModel, DepthwiseConvWithMultiplier_InputDepth1) {
QuantizedConvolutionOpModel m(BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_UINT8, {1, 6, 6, 1}, -63.5, 64},
{TensorType_UINT8, {1, 5, 5, 3}, -63.5, 64},
{TensorType_UINT8, {}, -127, 128},
Padding_VALID);
m.SetInput({0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0});
m.SetFilter({1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
6, 7, 8, 9, 10,
6, 7, 8, 9, 10,
6, 7, 8, 9, 10,
6, 7, 8, 9, 10,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5});
m.SetBias({1, 2, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto reference_output = m.GetDequantizedOutput<uint8_t>();
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(reference_output, 1e-5)));
}
TEST(QuantizedConvolutionOpModel,
DepthwiseConvWithMultiplier_InputDepth1_RELU) {
QuantizedConvolutionOpModel m(BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_UINT8, {1, 6, 6, 1}, -63.5, 64},
{TensorType_UINT8, {1, 5, 5, 3}, -63.5, 64},
{TensorType_UINT8, {}, -127, 128},
Padding_VALID, 1,
2, ActivationFunctionType_RELU6);
m.SetInput({0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0});
m.SetFilter({1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
6, 7, 8, 9, 10,
6, 7, 8, 9, 10,
6, 7, 8, 9, 10,
6, 7, 8, 9, 10,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5});
m.SetBias({1, 2, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto reference_output = m.GetDequantizedOutput<uint8_t>();
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(reference_output, 1e-5)));
}
TEST(QuantizedConvolutionOpModel, DepthwiseConvSimplePerTensor_Int8) {
QuantizedConvolutionOpModel m(
BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_INT8, {1, 2, 3, 1}, -63.5, 64, 0.5, -1},
{TensorType_INT8,
{1, 2, 2, 4},
0,
0,
0,
0,
true,
{1},
{0},
3},
{TensorType_INT8, {}, -63.5, 64, 0.5, -1}, Padding_VALID);
m.SetInt8Input({
3,
1,
-2,
4,
2,
-3,
});
m.SetPerChannelQuantizedFilter({
1, 2, 3, 4,
3, 4, 5, 6,
7, 8, 5, 6,
3, 4, 1, 2,
});
m.SetPerChannelQuantizedBias({3, -2, 4, 6});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({43, 48, 40, 52, 3, -4, 4, 4}, 0.6f)));
}
TEST(QuantizedConvolutionOpModel, DepthwiseConvSimplePerTensor_Int8_RELU1) {
QuantizedConvolutionOpModel m(
BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_INT8, {1, 2, 3, 1}, -63.5, 64, 0.5, -1},
{TensorType_INT8,
{1, 2, 2, 4},
0,
0,
0,
0,
true,
{0.1, 2, 3, 0.4},
{0, 0, 0, 0},
3},
{TensorType_INT8, {}, -63.5, 64, 0.5, -1}, Padding_VALID,
1,
1, ActivationFunctionType_RELU_N1_TO_1);
m.SetInt8Input({
3,
1,
-2,
4,
2,
-4,
});
m.SetPerChannelQuantizedFilter({
1, 2, 3, 4,
3, 4, 5, 6,
7, 8, 5, 6,
3, 4, 1, 2,
});
m.SetPerChannelQuantizedBias({3, -2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto reference_output = m.GetDequantizedOutput<int8_t>();
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear(reference_output, 1e-2)));
}
TEST(QuantizedConvolutionOpModel, DepthwiseConvSimplePerAxis_Int8) {
QuantizedConvolutionOpModel m(
BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_INT8, {1, 2, 3, 1}, -63.5, 64, 0.5, -1},
{TensorType_INT8,
{1, 2, 2, 4},
0,
0,
0,
0,
true,
{0.1, 2, 3, 0.4},
{0, 0, 0, 0},
3},
{TensorType_INT8, {}, -63.5, 64, 0.5, -1}, Padding_VALID);
m.SetInt8Input({
3,
1,
-2,
4,
2,
-4,
});
m.SetPerChannelQuantizedFilter({
1, 2, 3, 4,
3, 4, 5, 6,
7, 8, 5, 6,
3, 4, 1, 2,
});
m.SetPerChannelQuantizedBias({3, -2, 4, 6});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({43, 48, 42, 52, 0, -8, 6, 2}, 0.6f)));
}
TEST(QuantizedConvolutionOpModel, DepthwiseConvPerChannel_3x3Filter) {
QuantizedConvolutionOpModel m(
BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_INT8, {1, 3, 3, 8}, -63.5, 64, 0.5, -1},
{TensorType_INT8,
{1, 3, 3, 8},
0,
0,
0,
0,
true,
{0.1, 0.2, 0.3, 0.4, 0.4, 0.3, 0.2, 0.1},
{0, 0, 0, 0, 0, 0, 0, 0},
3},
{TensorType_INT8, {}, -63.5, 64, 0.5, -1}, Padding_VALID);
m.SetInt8Input({
1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0,
1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0});
m.SetPerChannelQuantizedFilter(
{
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8});
m.SetPerChannelQuantizedBias({0, 0, 0, 0, 0, 0, 0, 0});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({9, 18, 0, 0, 47, 54, 0, 0}, 0.6f)));
}
TEST(QuantizedConvolutionOpModel,
DepthwiseConvPerChannel_3x3FilterPaddingSame) {
QuantizedConvolutionOpModel m(
BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_INT8, {1, 3, 3, 8}, -63.5, 64, 0.5, -1},
{TensorType_INT8,
{1, 3, 3, 8},
0,
0,
0,
0,
true,
{0.1, 0.2, 0.3, 0.4, 0.4, 0.3, 0.2, 0.1},
{0, 0, 0, 0, 0, 0, 0, 0},
3},
{TensorType_INT8, {}, -63.5, 64, 0.5, -1}, Padding_SAME);
m.SetInt8Input({
1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0,
1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0});
m.SetPerChannelQuantizedFilter(
{
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8});
m.SetPerChannelQuantizedBias({0, 0, 0, 0, 0, 0, 0, 0});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear(
{
4, 8, 0, 0, 21, 24, 0, 0, 6, 12, 0, 0, 31.5, 36, 0, 0,
4, 8, 0, 0, 21, 24, 0, 0, 6, 12, 0, 0, 31.5, 36, 0, 0,
9, 18, 0, 0, 47, 54, 0, 0, 6, 12, 0, 0, 31.5, 36, 0, 0,
4, 8, 0, 0, 21, 24, 0, 0, 6, 12, 0, 0, 31.5, 36, 0, 0,
4, 8, 0, 0, 21, 24, 0, 0,
},
0.6f)));
}
TEST(QuantizedConvolutionOpModel,
DepthwiseConvPerChannel_5x5Filt2x2Stride64Chan) {
std::vector<float> per_channel_quantization_scales = {
0.00053629, 0.00052256, 0.00051463, 0.00050993, 0.00050885, 0.00052403,
0.00053925, 0.00053854, 0.00053962, 0.00048332, 0.00053551, 0.00052817,
0.00052771, 0.00051854, 0.00053823, 0.000531, 0.000521, 0.00053908,
0.00053849, 0.0005063, 0.00052631, 0.00050862, 0.00050484, 0.00053353,
0.0005352, 0.00051084, 0.00052429, 0.00052653, 0.00051875, 0.0005391,
0.00050941, 0.00053934, 0.00049698, 0.00050956, 0.00053204, 0.00051116,
0.00052303, 0.00053624, 0.00053452, 0.00050418, 0.00048261, 0.00053418,
0.00053058, 0.0005359, 0.0005324, 0.00053648, 0.00053957, 0.00052388,
0.00053638, 0.00052164, 0.00052303, 0.00053624, 0.00053452, 0.00050418,
0.00048261, 0.00053418, 0.00053058, 0.0005359, 0.0005324, 0.00053648,
0.00053957, 0.00052388, 0.00053638, 0.00052164};
std::vector<int64_t> per_channel_quantization_offsets(64, 0);
QuantizedConvolutionOpModel m(
BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_INT8, {1, 5, 5, 64}, 0, 0, 1.8942945003509521, -6},
{TensorType_INT8,
{1, 5, 5, 64},
0,
0,
0,
0,
true,
per_channel_quantization_scales,
per_channel_quantization_offsets,
3},
{TensorType_INT8, {}, 0, 0, 0.2960677146911621, 7}, Padding_VALID,
1,
2);
std::vector<float> inputs;
std::vector<float> filter;
for (auto i = 0; i < 5 * 5 * 64; i++) {
inputs.push_back(UniformRandomFloat(-248, 234));
filter.push_back(UniformRandomFloat(-0.06, 0.06));
}
m.SetInt8Input(inputs);
m.SetPerChannelQuantizedFilter(filter);
std::vector<float> bias(64);
m.SetPerChannelQuantizedBias(bias);
m.Invoke();
auto interpreter_result = m.GetDequantizedOutput<int8_t>();
m.ApplyDelegateAndInvoke();
auto delegate_result = m.GetDequantizedOutput<int8_t>();
EXPECT_THAT(delegate_result,
ElementsAreArray(ArrayFloatNear(interpreter_result, 0.6f)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/conv.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/tests/conv_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1417bb56-2ee3-4b23-9a08-1dd6220d0e60 | cpp | tensorflow/tensorflow | reduction_layout_normalizer | third_party/xla/xla/service/gpu/transforms/reduction_layout_normalizer.cc | third_party/xla/xla/service/gpu/transforms/reduction_layout_normalizer_test.cc | #include "xla/service/gpu/transforms/reduction_layout_normalizer.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class EnforceMinorToMajorReduceOpVisitor : public DfsHloRewriteVisitor {
absl::Status HandleReduce(HloInstruction *hlo) override {
auto reduce = Cast<HloReduceInstruction>(hlo);
VLOG(5) << "Input: " << reduce->ToString();
int operand_idx = -1;
absl::InlinedVector<HloInstruction *, 2> canonical_reduce_inputs;
absl::InlinedVector<Shape, 2> new_reduce_shapes;
DimensionVector out_reduce_dimensions;
const Shape &first_instruction_shape = reduce->inputs()[0]->shape();
for (HloInstruction *operand : reduce->inputs()) {
operand_idx++;
if (operand_idx != 0 &&
operand->shape().layout() != first_instruction_shape.layout()) {
return FailedPrecondition(
"Layout assignment should have assigned the same layout to all "
"reduce inputs");
}
const Shape &operand_shape = operand->shape();
const Layout &operand_layout = operand_shape.layout();
const Shape &reduce_shape =
reduce->shape().IsTuple() ? reduce->shape().tuple_shapes(operand_idx)
: reduce->shape();
DimensionVector new_reduce_dimensions;
DimensionVector new_operand_shape_data;
DimensionVector new_reduce_shape_data;
DimensionVector new_reduce_shape_layout(reduce_shape.rank());
std::vector<int64_t> reduce_shape_logical_to_physical =
LayoutUtil::MakeLogicalToPhysical(reduce_shape.layout());
auto to_reduce_logical_dim = [&](int64_t op_logical_dim) {
return op_logical_dim -
absl::c_count_if(reduce->dimensions(), [&](int64_t dim) {
CHECK(dim != op_logical_dim);
return dim < op_logical_dim;
});
};
for (int i = 0; i < operand_shape.rank(); i++) {
int64_t major_to_minor_dim_idx = operand_shape.rank() - i - 1;
int64_t logical_dim =
operand_layout.minor_to_major(major_to_minor_dim_idx);
int64_t dim_size = operand_shape.dimensions(logical_dim);
VLOG(5) << "Processing logical dimension " << logical_dim << " of size "
<< dim_size;
new_operand_shape_data.push_back(dim_size);
if (absl::c_linear_search(reduce->dimensions(), logical_dim)) {
new_reduce_dimensions.push_back(i);
} else {
new_reduce_shape_data.push_back(dim_size);
int64_t logical_reduce_dim = to_reduce_logical_dim(logical_dim);
int64_t physical_reduce_dim =
reduce_shape_logical_to_physical[logical_reduce_dim];
VLOG(5) << "logical_reduce_dim = " << logical_reduce_dim << ", "
<< "physical_reduce_dim = " << physical_reduce_dim;
new_reduce_shape_layout[reduce_shape.rank() - physical_reduce_dim -
1] = new_reduce_shape_data.size() - 1;
}
}
Shape new_operand_shape = ShapeUtil::MakeShape(
operand_shape.element_type(), new_operand_shape_data);
Shape new_reduce_shape = ShapeUtil::MakeShapeWithDenseLayout(
reduce_shape.element_type(), new_reduce_shape_data,
new_reduce_shape_layout);
if (new_operand_shape == operand_shape && reduce->inputs().size() == 1) {
return absl::OkStatus();
}
HloInstruction *canonical_reduce_input =
new_operand_shape != operand_shape
? reduce->parent()->AddInstruction(
HloInstruction::CreateBitcast(new_operand_shape, operand))
: operand;
canonical_reduce_input->set_metadata(operand->metadata());
VLOG(5) << "Reduction input: " << canonical_reduce_input->ToString();
new_reduce_shapes.push_back(new_reduce_shape);
canonical_reduce_inputs.push_back(canonical_reduce_input);
if (out_reduce_dimensions.empty()) {
out_reduce_dimensions = new_reduce_dimensions;
} else {
TF_RET_CHECK(out_reduce_dimensions == new_reduce_dimensions);
}
}
Shape new_reduce_shape = ShapeUtil::MakeMaybeTupleShape(new_reduce_shapes);
std::unique_ptr<HloInstruction> new_reduce = HloInstruction::CreateReduce(
new_reduce_shape, canonical_reduce_inputs, reduce->init_values(),
out_reduce_dimensions, reduce->to_apply());
VLOG(5) << "Generated new reduction: " << new_reduce->ToString();
const Shape &orig_reduce_shape = reduce->shape();
if (new_reduce_shape != orig_reduce_shape) {
HloInstruction *wrapped_reduce =
reduce->parent()->AddInstruction(std::move(new_reduce));
if (!new_reduce_shape.IsTuple()) {
new_reduce =
HloInstruction::CreateBitcast(reduce->shape(), wrapped_reduce);
} else {
absl::InlinedVector<HloInstruction *, 2> out;
for (int oidx = 0; oidx < reduce->input_count(); oidx++) {
HloInstruction *gte = reduce->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(wrapped_reduce, oidx));
out.push_back(
reduce->parent()->AddInstruction(HloInstruction::CreateBitcast(
orig_reduce_shape.tuple_shapes(oidx), gte)));
}
new_reduce = HloInstruction::CreateTuple(out);
}
}
VLOG(5) << "Generated output: " << new_reduce->ToString();
return ReplaceWithNewInstruction(reduce, std::move(new_reduce));
}
};
absl::StatusOr<bool> ReductionLayoutNormalizer::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
TF_ASSIGN_OR_RETURN(bool changed,
EnforceMinorToMajorReduceOpVisitor().RunOnModule(
module, execution_threads));
return changed;
}
}
} | #include "xla/service/gpu/transforms/reduction_layout_normalizer.h"
#include <optional>
#include <utility>
#include <gmock/gmock.h>
#include "absl/strings/string_view.h"
#include "xla/error_spec.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
class ReductionLayoutNormalizerTest : public HloTestBase {
public:
void CheckReductionLayoutNormalizer(
absl::string_view hlo, std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(hlo, ReductionLayoutNormalizer{}, expected);
}
};
TEST_F(ReductionLayoutNormalizerTest, LayoutCanonicalizerTest) {
const char* hlo = R"(
HloModule ReduceWithLayoutChange
add {
x0 = f32[] parameter(0)
y0 = f32[] parameter(1)
ROOT add0 = f32[] add(x0, y0)
}
ENTRY main {
arg0 = f32[4,5,5,16,12,12,3,3]{2,3,5,4,0,7,6,1} parameter(0)
constant0 = f32[] constant(0)
ROOT reduce0 = f32[4,5,16,12,12]{4,3,2,1,0} reduce(arg0, constant0),
dimensions={1,6,7}, to_apply=add
}
)";
CheckReductionLayoutNormalizer(hlo,
R"(
)");
}
TEST_F(ReductionLayoutNormalizerTest, LayoutCanonicalizerTestVariadic) {
const char* hlo = R"(
HloModule ReduceWithLayoutChangeVariadic
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
arg0 = f32[4,5,5,16,12,12,3,3]{2,3,5,4,0,7,6,1} parameter(0)
idxs = u32[4,5,5,16,12,12,3,3]{2,3,5,4,0,7,6,1} parameter(1)
constant0 = f32[] constant(0)
constant1 = u32[] constant(0)
ROOT reduce0 = (
f32[4,5,16,12,12]{4,3,2,1,0},
u32[4,5,16,12,12]{4,3,2,1,0}
) reduce(arg0, idxs, constant0,constant1), dimensions={1,6,7}, to_apply=argmax
}
)";
CheckReductionLayoutNormalizer(hlo,
R"(
)");
}
TEST_F(ReductionLayoutNormalizerTest,
LayoutCanonicalizerTestVariadicDifferentLayouts) {
const char* hlo = R"(
HloModule ReduceWithLayoutChangeVariadicDifferent
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
arg0 = f32[2,3,4,7]{2,1,0,3} parameter(0)
idxs = u32[2,3,4,7]{3,2,1,0} parameter(1)
constant0 = f32[] constant(0)
constant1 = u32[] constant(0)
ROOT reduce0 = (
f32[2,3,4]{2,1,0},
u32[2,3,4]{2,1,0}
) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
auto cloned_module = module->Clone();
ReductionLayoutNormalizer normalizer;
EXPECT_THAT(normalizer.Run(module.get()),
StatusIs(tsl::error::FAILED_PRECONDITION,
HasSubstr("Layout assignment")));
EXPECT_TRUE(RunAndCompare(std::move(cloned_module), ErrorSpec{1e-5, 1e-5}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/reduction_layout_normalizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/reduction_layout_normalizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1c01a02e-63c2-4cb8-9d8e-94ac3c67739d | cpp | tensorflow/tensorflow | batch_input_task | tensorflow/core/kernels/batching_util/batch_input_task.h | tensorflow/core/kernels/batching_util/batch_input_task_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BATCH_INPUT_TASK_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BATCH_INPUT_TASK_H_
#include <algorithm>
#include <atomic>
#include <functional>
#include <memory>
#include <utility>
#include "absl/base/call_once.h"
#include "absl/container/fixed_array.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/concat_split_util.h"
#include "tensorflow/core/kernels/batching_util/input_split_metadata.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/util/incremental_barrier.h"
namespace tensorflow {
namespace serving {
namespace internal {
template <typename TaskType>
class BatchInputTaskHandleTestAccess;
template <typename TaskType>
class BatchInputTaskTestAccess;
template <typename TaskType>
class BatchInputTask;
template <typename TaskType>
class BatchInputTaskHandle : public BatchTask {
public:
BatchInputTaskHandle(
std::shared_ptr<BatchInputTask<TaskType>> batch_input_task, int split_id,
size_t task_size);
std::unique_ptr<TaskType> GetSplitTask();
size_t size() const override { return task_size_; }
private:
template <typename T>
friend class internal::BatchInputTaskHandleTestAccess;
int split_id() const { return split_id_; }
std::shared_ptr<BatchInputTask<TaskType>> batch_input_task_;
const int split_id_;
const size_t task_size_;
std::atomic<bool> once_{false};
};
template <typename TaskType>
class BatchInputTask
: public std::enable_shared_from_this<BatchInputTask<TaskType>> {
public:
using SplitInputFunc = std::function<Status(
std::unique_ptr<TaskType>* input_task, int first_output_task_size,
int input_batch_size_limit,
std::vector<std::unique_ptr<TaskType>>* output_tasks)>;
BatchInputTask(std::unique_ptr<TaskType> input_task,
int open_batch_remaining_slot, int batch_size_limit,
SplitInputFunc split_input_func);
void ToTaskHandles(
std::vector<std::unique_ptr<BatchInputTaskHandle<TaskType>>>*
output_task_handles);
private:
friend class BatchInputTaskHandle<TaskType>;
template <typename T>
friend class internal::BatchInputTaskTestAccess;
std::unique_ptr<TaskType> GetSplitTask(int split_id);
Status SplitBatches(std::vector<std::unique_ptr<TaskType>>* output_tasks);
std::unique_ptr<TaskType> input_task_;
const int input_task_size_ = 0;
const int open_batch_remaining_slot_;
const int batch_size_limit_;
const SplitInputFunc split_func_;
const InputSplitMetadata input_split_metadata_;
mutable absl::once_flag once_;
std::vector<std::unique_ptr<TaskType>> task_splits_;
Status split_status_;
};
template <typename TaskType>
BatchInputTaskHandle<TaskType>::BatchInputTaskHandle(
std::shared_ptr<BatchInputTask<TaskType>> batch_input_task, int split_id,
size_t task_size)
: batch_input_task_(batch_input_task),
split_id_(split_id),
task_size_(task_size) {}
template <typename TaskType>
std::unique_ptr<TaskType> BatchInputTaskHandle<TaskType>::GetSplitTask() {
if (once_.load(std::memory_order_acquire)) {
return nullptr;
}
once_.store(true, std::memory_order_release);
return batch_input_task_->GetSplitTask(split_id_);
}
template <typename TaskType>
BatchInputTask<TaskType>::BatchInputTask(std::unique_ptr<TaskType> input_task,
int open_batch_remaining_slot,
int batch_size_limit,
SplitInputFunc split_input_func)
: input_task_(std::move(input_task)),
input_task_size_(input_task_->size()),
open_batch_remaining_slot_(open_batch_remaining_slot),
batch_size_limit_(batch_size_limit),
split_func_(split_input_func),
input_split_metadata_(input_task_size_, open_batch_remaining_slot,
batch_size_limit) {}
template <typename TaskType>
void BatchInputTask<TaskType>::ToTaskHandles(
std::vector<std::unique_ptr<BatchInputTaskHandle<TaskType>>>*
task_handles) {
const absl::FixedArray<int>& task_sizes = input_split_metadata_.task_sizes();
task_handles->resize(task_sizes.size());
for (int i = 0; i < task_handles->size(); i++) {
(*task_handles)[i] = std::make_unique<BatchInputTaskHandle<TaskType>>(
this->shared_from_this(), i, task_sizes[i]);
}
}
template <typename TaskType>
std::unique_ptr<TaskType> BatchInputTask<TaskType>::GetSplitTask(int split_id) {
absl::call_once(once_,
[this]() { split_status_ = SplitBatches(&task_splits_); });
if (!split_status_.ok()) {
LOG_EVERY_N_SEC(WARNING, 60 )
<< "Split task with error: " << split_status_ << " split metadata is "
<< input_split_metadata_.DebugString();
return nullptr;
}
if (split_id >= 0 && split_id < task_splits_.size()) {
return std::move(task_splits_[split_id]);
}
return nullptr;
}
template <typename TaskType>
Status BatchInputTask<TaskType>::SplitBatches(
std::vector<std::unique_ptr<TaskType>>* output_tasks) {
return split_func_(&input_task_, open_batch_remaining_slot_,
batch_size_limit_, output_tasks);
}
}
}
}
#endif | #include "tensorflow/core/kernels/batching_util/batch_input_task.h"
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_properties.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/batching_util/batch_resource_base.h"
#include "tensorflow/core/kernels/batching_util/input_split_metadata.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace serving {
namespace internal {
template <typename TaskType>
class BatchInputTaskHandleTestAccess {
public:
explicit BatchInputTaskHandleTestAccess(
BatchInputTaskHandle<TaskType>* handle)
: handle_(handle) {}
int split_id() const { return handle_->split_id(); }
private:
BatchInputTaskHandle<TaskType>* const handle_;
};
namespace {
using TensorMatrix = std::vector<std::vector<Tensor>>;
using SplitFunc = std::function<Status(
std::unique_ptr<BatchResourceBase::BatchTask>* input_task,
int first_output_task_size, int input_batch_size_limit,
std::vector<std::unique_ptr<BatchResourceBase::BatchTask>>* output_tasks)>;
template <typename T>
static Tensor CreateTensor(const TensorShape& input_shape,
gtl::ArraySlice<T> input_data) {
Tensor tensor(DataTypeToEnum<T>::value, input_shape);
test::FillValues<T>(&tensor, input_data);
return tensor;
}
NodeDef CreateBatchKernelNodeDef() {
NodeDef batch_kernel_node_def;
NodeDefBuilder batch_function_builder("BatchTPUInput", "BatchFunction");
batch_function_builder.Attr("max_batch_size", 128);
batch_function_builder.Attr("num_batch_threads", 8);
batch_function_builder.Attr("allowed_batch_sizes", {2, 4, 8});
batch_function_builder.Attr("batch_timeout_micros", 1000);
batch_function_builder.Attr("max_enqueued_batches", 100);
batch_function_builder.Attr("enable_large_batch_splitting", true);
std::vector<DataType> input_dtypes({DataType::DT_INT64, DataType::DT_INT64});
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.resize(2);
inputs[0] = NodeDefBuilder::NodeOut({"n1", 0, DataType::DT_INT64});
inputs[1] = NodeDefBuilder::NodeOut({"n2", 1, DataType::DT_INT64});
batch_function_builder.Attr("Tin", input_dtypes);
batch_function_builder.Input(inputs);
batch_function_builder.Attr("Tcaptured",
std::vector<DataType>{DataType::DT_INT64});
batch_function_builder.Input(std::vector<NodeDefBuilder::NodeOut>{
NodeDefBuilder::NodeOut({"n3", 1, DataType::DT_INT64})});
batch_function_builder.Attr("Tout",
std::vector<DataType>(4, DataType::DT_INT64));
NameAttrList f;
f.set_name("func_to_batch");
batch_function_builder.Attr("f", f);
TF_CHECK_OK(batch_function_builder.Finalize(&batch_kernel_node_def));
return batch_kernel_node_def;
}
class BatchInputTaskTest : public ::testing::Test {
protected:
BatchInputTaskTest() {
device_ = DeviceFactory::NewDevice("CPU", SessionOptions{},
"/job:a/replica:0/task:0");
Status op_kernel_creation_status;
batch_kernel_ = CreateOpKernel(
DEVICE_CPU, device_.get(), device_->GetAllocator(AllocatorAttributes{}),
CreateBatchKernelNodeDef(), TF_GRAPH_DEF_VERSION,
&op_kernel_creation_status);
TF_CHECK_OK(op_kernel_creation_status);
EXPECT_NE(batch_kernel_, nullptr);
op_kernel_context_params_.device = device_.get();
op_kernel_context_params_.op_kernel = batch_kernel_.get();
op_kernel_context_ = std::make_unique<OpKernelContext>(
&op_kernel_context_params_, 4 );
}
OpKernelContext* op_kernel_context() const {
return op_kernel_context_.get();
}
private:
std::unique_ptr<Device> device_;
std::unique_ptr<OpKernel> batch_kernel_;
OpKernelContext::Params op_kernel_context_params_;
std::unique_ptr<OpKernelContext> op_kernel_context_;
};
TEST_F(BatchInputTaskTest, BatchInputToSplitTasks) {
auto batch_task = std::make_unique<BatchResourceBase::BatchTask>();
batch_task->inputs.push_back(CreateTensor<int64_t>(
TensorShape({5, 2, 1}), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}));
batch_task->inputs.push_back(CreateTensor<int64_t>(
TensorShape({5, 1, 2}), {11, 12, 13, 14, 15, 16, 17, 18, 19, 20}));
batch_task->captured_inputs.push_back(
CreateTensor<int64_t>(TensorShape{1}, {0}));
batch_task->context = op_kernel_context();
bool batch_task_done_callback_executed = false;
batch_task->output = std::make_shared<TensorMatrix>();
batch_task->status = std::make_shared<ThreadSafeStatus>();
batch_task->done_callback = [&batch_task_done_callback_executed]() {
batch_task_done_callback_executed = true;
};
auto batch_input_task =
std::make_shared<BatchInputTask<BatchResourceBase::BatchTask>>(
std::move(batch_task), 1,
3, BatchResourceBase::SplitInputTask);
std::vector<
std::unique_ptr<BatchInputTaskHandle<BatchResourceBase::BatchTask>>>
output_tasks;
batch_input_task->ToTaskHandles(&output_tasks);
ASSERT_FALSE(batch_task_done_callback_executed);
const std::vector<int> expected_task_sizes{1, 3, 1};
for (int i = 0; i < output_tasks.size(); i++) {
EXPECT_EQ(
internal::BatchInputTaskHandleTestAccess<BatchResourceBase::BatchTask>(
output_tasks[i].get())
.split_id(),
i);
auto batch_task = output_tasks[i]->GetSplitTask();
ASSERT_NE(batch_task, nullptr);
EXPECT_EQ(batch_task->size(), expected_task_sizes[i]);
batch_task->done_callback();
EXPECT_EQ(output_tasks[i]->GetSplitTask(), nullptr);
}
ASSERT_TRUE(batch_task_done_callback_executed);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/batch_input_task.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/batch_input_task_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e819cfb7-a025-47d3-82e8-885348ab9d1c | cpp | abseil/abseil-cpp | scoped_set_env | absl/base/internal/scoped_set_env.cc | absl/base/internal/scoped_set_env_test.cc | #include "absl/base/internal/scoped_set_env.h"
#ifdef _WIN32
#include <windows.h>
#endif
#include <cstdlib>
#include "absl/base/internal/raw_logging.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace {
#ifdef _WIN32
const int kMaxEnvVarValueSize = 1024;
#endif
void SetEnvVar(const char* name, const char* value) {
#ifdef _WIN32
SetEnvironmentVariableA(name, value);
#else
if (value == nullptr) {
::unsetenv(name);
} else {
::setenv(name, value, 1);
}
#endif
}
}
ScopedSetEnv::ScopedSetEnv(const char* var_name, const char* new_value)
: var_name_(var_name), was_unset_(false) {
#ifdef _WIN32
char buf[kMaxEnvVarValueSize];
auto get_res = GetEnvironmentVariableA(var_name_.c_str(), buf, sizeof(buf));
ABSL_INTERNAL_CHECK(get_res < sizeof(buf), "value exceeds buffer size");
if (get_res == 0) {
was_unset_ = (GetLastError() == ERROR_ENVVAR_NOT_FOUND);
} else {
old_value_.assign(buf, get_res);
}
SetEnvironmentVariableA(var_name_.c_str(), new_value);
#else
const char* val = ::getenv(var_name_.c_str());
if (val == nullptr) {
was_unset_ = true;
} else {
old_value_ = val;
}
#endif
SetEnvVar(var_name_.c_str(), new_value);
}
ScopedSetEnv::~ScopedSetEnv() {
SetEnvVar(var_name_.c_str(), was_unset_ ? nullptr : old_value_.c_str());
}
}
ABSL_NAMESPACE_END
} | #ifdef _WIN32
#include <windows.h>
#endif
#include "gtest/gtest.h"
#include "absl/base/internal/scoped_set_env.h"
namespace {
using absl::base_internal::ScopedSetEnv;
std::string GetEnvVar(const char* name) {
#ifdef _WIN32
char buf[1024];
auto get_res = GetEnvironmentVariableA(name, buf, sizeof(buf));
if (get_res >= sizeof(buf)) {
return "TOO_BIG";
}
if (get_res == 0) {
return "UNSET";
}
return std::string(buf, get_res);
#else
const char* val = ::getenv(name);
if (val == nullptr) {
return "UNSET";
}
return val;
#endif
}
TEST(ScopedSetEnvTest, SetNonExistingVarToString) {
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
{
ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value");
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
}
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
}
TEST(ScopedSetEnvTest, SetNonExistingVarToNull) {
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
{
ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", nullptr);
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
}
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
}
TEST(ScopedSetEnvTest, SetExistingVarToString) {
ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value");
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
{
ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "new_value");
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "new_value");
}
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
}
TEST(ScopedSetEnvTest, SetExistingVarToNull) {
ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value");
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
{
ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", nullptr);
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
}
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/scoped_set_env.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/scoped_set_env_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
a27f2e13-ab33-42ea-891a-e76f26a75708 | cpp | tensorflow/tensorflow | c_api_unified_experimental | tensorflow/c/eager/c_api_unified_experimental.cc | tensorflow/c/eager/c_api_unified_experimental_test.cc | #include "tensorflow/c/eager/c_api_unified_experimental.h"
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/types.h"
using tensorflow::string;
namespace tensorflow {
namespace tracing {
typedef absl::flat_hash_map<std::string, tracing::FactoryFunction> FactoriesMap;
static FactoriesMap& GetFactories() {
static FactoriesMap* factories = new FactoriesMap;
return *factories;
}
static tracing::FactoryFunction default_factory;
void RegisterTracingEngineFactory(const string& name, FactoryFunction factory) {
assert((!GetFactories().count(name)) ||
(GetFactories()[name] == factory) &&
"Duplicate tracing factory registration");
GetFactories()[name] = factory;
}
Status SetDefaultTracingEngine(const char* name) {
auto entry = GetFactories().find(name);
if (entry != GetFactories().end()) {
default_factory = GetFactories().find(name)->second;
return absl::OkStatus();
}
string msg = absl::StrCat(
"No tracing engine factory has been registered with the key '", name,
"' (available: ");
std::set<string> factories_sorted;
for (const auto& factory : GetFactories())
factories_sorted.insert(factory.first);
const char* comma = "";
for (const string& factory : factories_sorted) {
msg += comma + factory;
comma = ", ";
}
msg += ")";
return errors::InvalidArgument(msg.c_str());
}
static TracingContext* CreateTracingExecutionContext(const char* fn_name,
TF_Status* s) {
if (default_factory) {
return default_factory(fn_name, s);
}
tsl::Set_TF_Status_from_Status(
s, errors::FailedPrecondition("default_factory is nullptr"));
return nullptr;
}
}
}
using tensorflow::AbstractFunction;
using tensorflow::AbstractTensorHandle;
using tensorflow::DataType;
using tensorflow::dyn_cast;
using tensorflow::OutputList;
using tensorflow::Status;
using tensorflow::unwrap;
using tensorflow::wrap;
using tensorflow::tracing::CreateTracingExecutionContext;
using tensorflow::tracing::SetDefaultTracingEngine;
using tensorflow::tracing::TracingContext;
using tensorflow::tracing::TracingOperation;
using tensorflow::tracing::TracingTensorHandle;
void TF_SetTracingImplementation(const char* name, TF_Status* s) {
tsl::Set_TF_Status_from_Status(s, SetDefaultTracingEngine(name));
}
TF_ExecutionContext* TF_CreateFunction(const char* fn_name, TF_Status* s) {
return wrap(CreateTracingExecutionContext(fn_name, s));
}
TF_AbstractFunction* TF_FinalizeFunction(TF_ExecutionContext* ctx,
TF_OutputList* outputs, TF_Status* s) {
AbstractFunction* func;
TracingContext* tracing_ctx = dyn_cast<TracingContext>(unwrap(ctx));
if (!tracing_ctx) {
tsl::Set_TF_Status_from_Status(
s, tensorflow::errors::InvalidArgument(
"Only TracingContext can be converted into a function."));
return nullptr;
}
tsl::Set_TF_Status_from_Status(s,
tracing_ctx->Finalize(unwrap(outputs), &func));
TF_DeleteExecutionContext(ctx);
return wrap(func);
}
TF_AbstractTensor* TF_AddFunctionParameter(TF_ExecutionContext* func,
TF_DataType dtype, TF_Shape shape,
TF_Status* s) {
DCHECK_GE(shape.num_dims, -1);
TracingTensorHandle* t;
TracingContext* tracing_ctx = dyn_cast<TracingContext>(unwrap(func));
if (!tracing_ctx) {
tsl::Set_TF_Status_from_Status(
s, tensorflow::errors::InvalidArgument(
"TF_AddFunctionParameter must be called on a TracingContext."));
return nullptr;
}
tensorflow::PartialTensorShape partial_shape;
if (shape.num_dims != -1) {
DCHECK(shape.dim_sizes != nullptr);
Status status = tensorflow::PartialTensorShape::MakePartialShape(
reinterpret_cast<int64_t*>(shape.dim_sizes), shape.num_dims,
&partial_shape);
if (!status.ok()) {
tsl::Set_TF_Status_from_Status(s, status);
return nullptr;
}
}
tsl::Set_TF_Status_from_Status(
s, tracing_ctx->AddParameter(static_cast<DataType>(dtype), partial_shape,
&t));
return wrap(t);
}
void TF_DeleteExecutionContext(TF_ExecutionContext* c) { unwrap(c)->Release(); }
TF_AbstractOp* TF_NewAbstractOp(TF_ExecutionContext* c) {
return wrap((unwrap(c)->CreateOperation()));
}
void TF_DeleteAbstractOp(TF_AbstractOp* op) { unwrap(op)->Release(); }
void TF_DeleteAbstractTensor(TF_AbstractTensor* t) { unwrap(t)->Unref(); }
TF_OutputList* TF_NewOutputList() { return wrap(new OutputList); }
void TF_DeleteOutputList(TF_OutputList* o) { delete unwrap(o); }
void TF_OutputListSetNumOutputs(TF_OutputList* o, int num_outputs,
TF_Status* s) {
unwrap(o)->expected_num_outputs = num_outputs;
unwrap(o)->outputs.clear();
unwrap(o)->outputs.resize(num_outputs);
}
int TF_OutputListNumOutputs(TF_OutputList* o) {
return unwrap(o)->outputs.size();
}
TF_AbstractTensor* TF_OutputListGet(TF_OutputList* o, int i) {
return wrap(unwrap(o)->outputs[i]);
}
void TF_OutputListPushBack(TF_OutputList* o, TF_AbstractTensor* tensor,
TF_Status* s) {
unwrap(o)->outputs.push_back(unwrap(tensor));
}
void TF_AbstractOpSetOpType(TF_AbstractOp* op, const char* const op_type,
TF_Status* s) {
tsl::Set_TF_Status_from_Status(
s, unwrap(op)->Reset(op_type,
nullptr));
}
void TF_AbstractOpSetOpName(TF_AbstractOp* op, const char* const op_name,
TF_Status* s) {
TracingOperation* tracing_op = dyn_cast<TracingOperation>(unwrap(op));
if (!tracing_op) {
tsl::Set_TF_Status_from_Status(
s, tensorflow::errors::InvalidArgument(
"TF_AbstractOpSetOpName must be called on a TracingOperation."));
return;
}
tsl::Set_TF_Status_from_Status(s, tracing_op->SetOpName(op_name));
}
void TF_AbstractOpSetAttrType(TF_AbstractOp* op, const char* const attr_name,
TF_DataType value, TF_Status* s) {
Status status =
unwrap(op)->SetAttrType(attr_name, static_cast<DataType>(value));
TF_SetStatus(s, static_cast<TF_Code>(status.code()),
absl::StatusMessageAsCStr(status));
}
void TF_ExecuteOperation(TF_AbstractOp* op, int num_inputs,
TF_AbstractTensor* const* inputs, TF_OutputList* o,
TF_Status* s) {
for (int i = 0; i < num_inputs; i++) {
tsl::Set_TF_Status_from_Status(s, unwrap(op)->AddInput(unwrap(inputs[i])));
if (TF_GetCode(s) != TF_OK) {
return;
}
}
int num_outputs = unwrap(o)->expected_num_outputs;
tsl::Set_TF_Status_from_Status(
s, unwrap(op)->Execute(
absl::MakeSpan(reinterpret_cast<AbstractTensorHandle**>(
unwrap(o)->outputs.data()),
unwrap(o)->outputs.size()),
&num_outputs));
}
void TF_DeleteAbstractFunction(TF_AbstractFunction* func) {
unwrap(func)->Unref();
}
void TF_ExecutionContextRegisterFunction(TF_ExecutionContext* ctx,
TF_AbstractFunction* func,
TF_Status* s) {
tsl::Set_TF_Status_from_Status(s,
unwrap(ctx)->RegisterFunction(unwrap(func)));
} | #include "tensorflow/c/eager/c_api_unified_experimental.h"
#include <memory>
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
using tensorflow::Status;
using tensorflow::string;
using tensorflow::TF_StatusPtr;
namespace tensorflow {
namespace {
class UnifiedCAPI
: public ::testing::TestWithParam<std::tuple<const char*, bool>> {
protected:
void SetUp() override {
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
Status s = StatusFromTF_Status(status.get());
CHECK_EQ(errors::OK, s.code()) << s.message();
}
};
TEST_P(UnifiedCAPI, TestBasicEager) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetTfrt(opts, std::get<1>(GetParam()));
TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_Context* eager_ctx = TF_ExecutionContextGetTFEContext(ctx, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* t = TestScalarTensorHandle(eager_ctx, 2.0f);
TF_AbstractTensor* at =
TF_CreateAbstractTensorFromEagerTensor(t, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* op = TF_NewAbstractOp(ctx);
TF_AbstractOpSetOpType(op, "Add", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractTensor* inputs[2] = {at, at};
TF_OutputList* o = TF_NewOutputList();
TF_OutputListSetNumOutputs(o, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(op, 2, inputs, o, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeleteAbstractOp(op);
TF_DeleteAbstractTensor(at);
ASSERT_EQ(1, TF_OutputListNumOutputs(o));
TF_AbstractTensor* result = TF_OutputListGet(o, 0);
TFE_TensorHandle* result_t =
TF_AbstractTensorGetEagerTensor(result, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* result_tensor = TFE_TensorHandleResolve(result_t, status.get());
float* result_value = static_cast<float*>(TF_TensorData(result_tensor));
EXPECT_EQ(*result_value, 4.0);
TF_DeleteTensor(result_tensor);
TF_DeleteAbstractTensor(result);
TF_DeleteOutputList(o);
TF_DeleteExecutionContext(ctx);
}
TEST_P(UnifiedCAPI, TestBasicEagerMatMul) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
int64_t dims[] = {2, 2};
int num_dims = sizeof(dims) / sizeof(dims[0]);
float vals[] = {0.0f, 0.0f, 0.0f, 0.0f};
TFE_Context* eager_ctx = TF_ExecutionContextGetTFEContext(ctx, status.get());
TFE_TensorHandle* t =
TestMatrixTensorHandleWithInput(eager_ctx, vals, dims, num_dims);
TF_AbstractTensor* at = TF_CreateAbstractTensorFromEagerTensor(
t, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* op = TF_NewAbstractOp(ctx);
TF_AbstractOpSetOpType(op, "MatMul", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractTensor* inputs[2] = {at, at};
TF_OutputList* o = TF_NewOutputList();
TF_OutputListSetNumOutputs(o, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(op, 2, inputs, o, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeleteAbstractOp(op);
TF_DeleteAbstractTensor(at);
ASSERT_EQ(1, TF_OutputListNumOutputs(o));
TF_AbstractTensor* result = TF_OutputListGet(o, 0);
TFE_TensorHandle* result_t =
TF_AbstractTensorGetEagerTensor(result, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* result_tensor = TFE_TensorHandleResolve(result_t, status.get());
float result_data[4] = {0};
memcpy(&result_data[0], TF_TensorData(result_tensor),
TF_TensorByteSize(result_tensor));
int data_len = 4;
for (int i = 0; i < data_len; i++) {
EXPECT_EQ(result_data[i], 0);
}
TF_DeleteTensor(result_tensor);
TF_DeleteAbstractTensor(result);
TF_DeleteOutputList(o);
TF_DeleteExecutionContext(ctx);
}
TEST_P(UnifiedCAPI, TestBasicEagerMatMul2) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
int64_t dims[] = {2, 2};
int num_dims = sizeof(dims) / sizeof(dims[0]);
float vals1[] = {1.0f, 2.0f, 3.0f, 4.0f};
TFE_Context* eager_ctx = TF_ExecutionContextGetTFEContext(ctx, status.get());
TFE_TensorHandle* t1 =
TestMatrixTensorHandleWithInput(eager_ctx, vals1, dims, num_dims);
TF_AbstractTensor* at1 = TF_CreateAbstractTensorFromEagerTensor(
t1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
float vals2[] = {5.0f, 6.0f, 7.0f, 8.0f};
TFE_TensorHandle* t2 =
TestMatrixTensorHandleWithInput(eager_ctx, vals2, dims, num_dims);
TF_AbstractTensor* at2 = TF_CreateAbstractTensorFromEagerTensor(
t2, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* op = TF_NewAbstractOp(ctx);
TF_AbstractOpSetOpType(op, "MatMul", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractTensor* inputs[2] = {at1, at2};
TF_OutputList* o = TF_NewOutputList();
TF_OutputListSetNumOutputs(o, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(op, 2, inputs, o, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeleteAbstractOp(op);
TF_DeleteAbstractTensor(at1);
TF_DeleteAbstractTensor(at2);
ASSERT_EQ(1, TF_OutputListNumOutputs(o));
TF_AbstractTensor* result = TF_OutputListGet(o, 0);
TFE_TensorHandle* result_t =
TF_AbstractTensorGetEagerTensor(result, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* result_tensor = TFE_TensorHandleResolve(result_t, status.get());
float result_data[4] = {0};
memcpy(&result_data[0], TF_TensorData(result_tensor),
TF_TensorByteSize(result_tensor));
float e_vals[] = {19.0f, 22.0f, 43.0f, 50.0f};
int data_len = 4;
for (int i = 0; i < data_len; i++) {
EXPECT_EQ(result_data[i], e_vals[i]);
}
TF_DeleteTensor(result_tensor);
TF_DeleteAbstractTensor(result);
TF_DeleteOutputList(o);
TF_DeleteExecutionContext(ctx);
}
TEST_P(UnifiedCAPI, TestBasicEagerMatAdd) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
int64_t dims[] = {2, 2};
int num_dims = sizeof(dims) / sizeof(dims[0]);
float vals1[] = {1.0f, 2.0f, 3.0f, 4.0f};
TFE_Context* eager_ctx = TF_ExecutionContextGetTFEContext(ctx, status.get());
TFE_TensorHandle* t1 =
TestMatrixTensorHandleWithInput(eager_ctx, vals1, dims, num_dims);
TF_AbstractTensor* at1 = TF_CreateAbstractTensorFromEagerTensor(
t1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
float vals2[] = {5.0f, 6.0f, 7.0f, 8.0f};
TFE_TensorHandle* t2 =
TestMatrixTensorHandleWithInput(eager_ctx, vals2, dims, num_dims);
TF_AbstractTensor* at2 = TF_CreateAbstractTensorFromEagerTensor(
t2, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* op = TF_NewAbstractOp(ctx);
TF_AbstractOpSetOpType(op, "Add", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractTensor* inputs[2] = {at1, at2};
TF_OutputList* o = TF_NewOutputList();
TF_OutputListSetNumOutputs(o, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(op, 2, inputs, o, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeleteAbstractOp(op);
TF_DeleteAbstractTensor(at1);
TF_DeleteAbstractTensor(at2);
ASSERT_EQ(1, TF_OutputListNumOutputs(o));
TF_AbstractTensor* result = TF_OutputListGet(o, 0);
TFE_TensorHandle* result_t =
TF_AbstractTensorGetEagerTensor(result, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* result_tensor = TFE_TensorHandleResolve(result_t, status.get());
float result_data[4] = {0};
memcpy(&result_data[0], TF_TensorData(result_tensor),
TF_TensorByteSize(result_tensor));
float e_vals[] = {6.0f, 8.0f, 10.0f, 12.0f};
int data_len = 4;
for (int i = 0; i < data_len; i++) {
EXPECT_EQ(result_data[i], e_vals[i]);
}
TF_DeleteTensor(result_tensor);
TF_DeleteAbstractTensor(result);
TF_DeleteOutputList(o);
TF_DeleteExecutionContext(ctx);
}
TEST_P(UnifiedCAPI, TestBasicGraph) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
string fn_name = "double";
TF_ExecutionContext* graph_ctx =
TF_CreateFunction(fn_name.c_str(), status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* placeholder_t =
TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* add_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(add_op, "Add", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOpSetOpName(add_op, "my_add", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractTensor* inputs[2] = {placeholder_t, placeholder_t};
TF_OutputList* add_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(add_op, 2, inputs, add_outputs, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto outs = unwrap(add_outputs);
auto h = outs->outputs[0];
ASSERT_NE(h, nullptr);
ASSERT_EQ(h->FullType().type_id(), TFT_UNSET);
ASSERT_EQ(unwrap(inputs[0])->FullType().type_id(), TFT_UNSET);
TF_DeleteAbstractOp(add_op);
TF_AbstractFunction* func =
TF_FinalizeFunction(graph_ctx, add_outputs, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeleteAbstractTensor(TF_OutputListGet(add_outputs, 0));
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetTfrt(opts, std::get<1>(GetParam()));
TF_ExecutionContext* eager_execution_ctx =
TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
TF_ExecutionContextRegisterFunction(eager_execution_ctx, func, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOp* fn_op = TF_NewAbstractOp(eager_execution_ctx);
TF_AbstractOpSetOpType(fn_op, fn_name.c_str(), status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_Context* eager_ctx =
TF_ExecutionContextGetTFEContext(eager_execution_ctx, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* input_eager = TestScalarTensorHandle(eager_ctx, 2.0f);
TF_AbstractTensor* input_t =
TF_CreateAbstractTensorFromEagerTensor(input_eager, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(fn_op, 1, &input_t, add_outputs, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_EQ(1, TF_OutputListNumOutputs(add_outputs));
TF_AbstractTensor* final_result = TF_OutputListGet(add_outputs, 0);
TFE_TensorHandle* final =
TF_AbstractTensorGetEagerTensor(final_result, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* f_t = TFE_TensorHandleResolve(final, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
float* f_value = static_cast<float*>(TF_TensorData(f_t));
ASSERT_EQ(*f_value, 4.0);
TF_DeleteOutputList(add_outputs);
TF_DeleteAbstractOp(fn_op);
TF_DeleteAbstractTensor(input_t);
TF_DeleteAbstractTensor(final_result);
TF_DeleteAbstractTensor(placeholder_t);
TF_DeleteTensor(f_t);
TF_DeleteAbstractFunction(func);
TF_DeleteExecutionContext(eager_execution_ctx);
}
TEST_P(UnifiedCAPI, TestBasicGraphMatMul) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
string fn_name = "matrix_multiply";
TF_ExecutionContext* graph_ctx =
TF_CreateFunction(fn_name.c_str(), status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* placeholder_t =
TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* matmul_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(matmul_op, "MatMul", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOpSetOpName(matmul_op, "my_matmul", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractTensor* inputs[2] = {placeholder_t, placeholder_t};
TF_OutputList* mm_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(mm_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(matmul_op, 2, inputs, mm_outputs, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeleteAbstractOp(matmul_op);
TF_AbstractFunction* func =
TF_FinalizeFunction(graph_ctx, mm_outputs, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_ContextOptions* opts = TFE_NewContextOptions();
TF_ExecutionContext* eager_execution_ctx =
TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
TF_ExecutionContextRegisterFunction(eager_execution_ctx, func, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOp* fn_op = TF_NewAbstractOp(eager_execution_ctx);
TF_AbstractOpSetOpType(fn_op, fn_name.c_str(), status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_Context* eager_ctx =
TF_ExecutionContextGetTFEContext(eager_execution_ctx, status.get());
float vals[] = {1.0f, 1.0f, 1.0f, 1.0f};
int64_t dims[] = {2, 2};
int num_dims = sizeof(dims) / sizeof(dims[0]);
TFE_TensorHandle* input_eager =
TestMatrixTensorHandleWithInput(eager_ctx, vals, dims, num_dims);
TF_AbstractTensor* input_t =
TF_CreateAbstractTensorFromEagerTensor(input_eager, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_OutputListSetNumOutputs(mm_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(fn_op, 1, &input_t, mm_outputs, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_EQ(1, TF_OutputListNumOutputs(mm_outputs));
TF_AbstractTensor* final_result = TF_OutputListGet(mm_outputs, 0);
TFE_TensorHandle* final =
TF_AbstractTensorGetEagerTensor(final_result, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* f_t = TFE_TensorHandleResolve(final, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
float result_data[4] = {0};
memcpy(&result_data[0], TF_TensorData(f_t), TF_TensorByteSize(f_t));
int data_len = 4;
for (int i = 0; i < data_len; i++) {
ASSERT_EQ(result_data[i], 2.0f);
}
TF_DeleteAbstractTensor(final_result);
TF_DeleteOutputList(mm_outputs);
TF_DeleteAbstractTensor(placeholder_t);
TF_DeleteAbstractOp(fn_op);
TF_DeleteAbstractTensor(input_t);
TF_DeleteTensor(f_t);
TF_DeleteAbstractFunction(func);
TF_DeleteExecutionContext(eager_execution_ctx);
}
TEST_P(UnifiedCAPI, TestMultiOutputGraph) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TF_Status* s = status.get();
string fn_name = "two_adds";
TF_ExecutionContext* graph_ctx = TF_CreateFunction(fn_name.c_str(), s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
auto* arg0 = TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
auto* arg1 = TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* add_output1;
{
auto* add_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(add_op, "Add", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOpSetOpName(add_op, "my_add", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* inputs[2] = {arg0, arg1};
TF_OutputList* add_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(add_op, 2, inputs, add_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(add_op);
add_output1 = TF_OutputListGet(add_outputs, 0);
TF_DeleteOutputList(add_outputs);
}
TF_AbstractTensor* add_output2;
{
auto* add_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(add_op, "Add", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOpSetOpName(add_op, "my_add", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* inputs[2] = {arg1, arg1};
TF_OutputList* add_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(add_op, 2, inputs, add_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(add_op);
add_output2 = TF_OutputListGet(add_outputs, 0);
TF_DeleteOutputList(add_outputs);
}
TF_DeleteAbstractTensor(arg0);
TF_DeleteAbstractTensor(arg1);
TF_AbstractFunction* func;
{
TF_OutputList* func_outputs = TF_NewOutputList();
TF_OutputListPushBack(func_outputs, add_output1, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_OutputListPushBack(func_outputs, add_output2, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
func = TF_FinalizeFunction(graph_ctx, func_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractTensor(add_output1);
TF_DeleteAbstractTensor(add_output2);
TF_DeleteOutputList(func_outputs);
}
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetTfrt(opts, std::get<1>(GetParam()));
TF_ExecutionContext* eager_execution_ctx =
TF_NewEagerExecutionContext(opts, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TFE_DeleteContextOptions(opts);
TF_ExecutionContextRegisterFunction(eager_execution_ctx, func, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOp* fn_op = TF_NewAbstractOp(eager_execution_ctx);
TF_AbstractOpSetOpType(fn_op, fn_name.c_str(), s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
std::vector<TF_AbstractTensor*> func_args;
{
TFE_Context* eager_ctx =
TF_ExecutionContextGetTFEContext(eager_execution_ctx, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* input_eager = TestScalarTensorHandle(eager_ctx, 2.0f);
func_args.push_back(TF_CreateAbstractTensorFromEagerTensor(input_eager, s));
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
input_eager = TestScalarTensorHandle(eager_ctx, 3.0f);
func_args.push_back(TF_CreateAbstractTensorFromEagerTensor(input_eager, s));
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
}
TF_OutputList* func_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(func_outputs, 2, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_ExecuteOperation(fn_op, func_args.size(), func_args.data(), func_outputs,
s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(fn_op);
for (TF_AbstractTensor* t : func_args) TF_DeleteAbstractTensor(t);
ASSERT_EQ(2, TF_OutputListNumOutputs(func_outputs));
float results[2];
for (int idx = 0; idx < 2; ++idx) {
TF_AbstractTensor* result = TF_OutputListGet(func_outputs, idx);
TFE_TensorHandle* handle = TF_AbstractTensorGetEagerTensor(result, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_Tensor* f_t = TFE_TensorHandleResolve(handle, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
results[idx] = *static_cast<float*>(TF_TensorData(f_t));
TF_DeleteTensor(f_t);
}
ASSERT_EQ(results[0], 5.0);
ASSERT_EQ(results[1], 6.0);
for (int idx = 0; idx < 2; ++idx) {
TF_AbstractTensor* result = TF_OutputListGet(func_outputs, idx);
TF_DeleteAbstractTensor(result);
}
TF_DeleteOutputList(func_outputs);
TF_DeleteExecutionContext(eager_execution_ctx);
TF_DeleteAbstractFunction(func);
}
TEST_P(UnifiedCAPI, TestMultiOutputGraphMatMul) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TF_Status* s = status.get();
string fn_name = "two_adds_and_matmul";
TF_ExecutionContext* graph_ctx = TF_CreateFunction(fn_name.c_str(), s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
auto* arg0 = TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
auto* arg1 = TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* add_output1;
{
auto* add_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(add_op, "Add", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOpSetOpName(add_op, "my_add1", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* inputs[2] = {arg0, arg1};
TF_OutputList* add_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(add_op, 2, inputs, add_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(add_op);
add_output1 = TF_OutputListGet(add_outputs, 0);
TF_DeleteOutputList(add_outputs);
}
TF_AbstractTensor* add_output2;
{
auto* add_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(add_op, "Add", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOpSetOpName(add_op, "my_add2", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* inputs[2] = {arg1, arg1};
TF_OutputList* add_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(add_op, 2, inputs, add_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(add_op);
add_output2 = TF_OutputListGet(add_outputs, 0);
TF_DeleteOutputList(add_outputs);
}
TF_AbstractTensor* mm_output;
{
auto* mm_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(mm_op, "MatMul", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOpSetOpName(mm_op, "mm", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* inputs[2] = {add_output1, add_output2};
TF_OutputList* mm_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(mm_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(mm_op, 2, inputs, mm_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(mm_op);
mm_output = TF_OutputListGet(mm_outputs, 0);
TF_DeleteOutputList(mm_outputs);
}
TF_AbstractFunction* func;
{
TF_OutputList* func_outputs = TF_NewOutputList();
TF_OutputListPushBack(func_outputs, add_output1, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_OutputListPushBack(func_outputs, add_output2, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_OutputListPushBack(func_outputs, mm_output, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
func = TF_FinalizeFunction(graph_ctx, func_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteOutputList(func_outputs);
}
TFE_ContextOptions* opts = TFE_NewContextOptions();
TF_ExecutionContext* eager_execution_ctx =
TF_NewEagerExecutionContext(opts, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TFE_DeleteContextOptions(opts);
TF_ExecutionContextRegisterFunction(eager_execution_ctx, func, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOp* fn_op = TF_NewAbstractOp(eager_execution_ctx);
TF_AbstractOpSetOpType(fn_op, fn_name.c_str(), s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
std::vector<TF_AbstractTensor*> func_args;
{
TFE_Context* eager_ctx =
TF_ExecutionContextGetTFEContext(eager_execution_ctx, s);
float vals1[] = {0.0f, 1.0f, 1.0f, 0.0f};
int64_t dims[] = {2, 2};
int num_dims = sizeof(dims) / sizeof(dims[0]);
TFE_TensorHandle* input_eager =
TestMatrixTensorHandleWithInput(eager_ctx, vals1, dims, num_dims);
func_args.push_back(TF_CreateAbstractTensorFromEagerTensor(input_eager, s));
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
float vals2[] = {1.0f, 0.0f, 0.0f, 1.0f};
input_eager =
TestMatrixTensorHandleWithInput(eager_ctx, vals2, dims, num_dims);
func_args.push_back(TF_CreateAbstractTensorFromEagerTensor(input_eager, s));
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
}
TF_OutputList* func_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(func_outputs, 3, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_ExecuteOperation(fn_op, func_args.size(), func_args.data(), func_outputs,
s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(fn_op);
for (TF_AbstractTensor* t : func_args) TF_DeleteAbstractTensor(t);
ASSERT_EQ(3, TF_OutputListNumOutputs(func_outputs));
float expected_outputs[3][4] = {{1.0f, 1.0f, 1.0f, 1.0f},
{2.0f, 0.0f, 0.0f, 2.0f},
{2.0f, 2.0f, 2.0f, 2.0f}};
float result_data[4];
for (int idx = 0; idx < 3; ++idx) {
TF_AbstractTensor* result = TF_OutputListGet(func_outputs, idx);
TFE_TensorHandle* handle = TF_AbstractTensorGetEagerTensor(result, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_Tensor* f_t = TFE_TensorHandleResolve(handle, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
memcpy(&result_data[0], TF_TensorData(f_t), TF_TensorByteSize(f_t));
for (int j = 0; j < 4; j++) {
ASSERT_EQ(result_data[j], expected_outputs[idx][j]);
}
TF_DeleteTensor(f_t);
}
for (int idx = 0; idx < 3; ++idx) {
TF_AbstractTensor* result = TF_OutputListGet(func_outputs, idx);
TF_DeleteAbstractTensor(result);
}
TF_DeleteOutputList(func_outputs);
TF_DeleteExecutionContext(eager_execution_ctx);
TF_DeleteAbstractFunction(func);
}
TEST_P(UnifiedCAPI, TF_ExecutionContextToFunctionWithEagerContextRaises) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetTfrt(opts, std::get<1>(GetParam()));
TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
TF_AbstractFunction* f = TF_FinalizeFunction(ctx, nullptr, status.get());
ASSERT_EQ(nullptr, f);
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
TF_DeleteExecutionContext(ctx);
}
TEST_P(UnifiedCAPI, TF_AbstractOpSetOpTypeAfterFinishingOpBuildingRaises) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TF_ExecutionContext* graph_ctx = TF_CreateFunction("some_func", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* placeholder_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(placeholder_op, "Placeholder", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOpSetOpName(placeholder_op, "my_ph", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOpSetOpType(placeholder_op, "Placeholder", status.get());
ASSERT_EQ(TF_FAILED_PRECONDITION, TF_GetCode(status.get()));
TF_DeleteAbstractOp(placeholder_op);
TF_DeleteExecutionContext(graph_ctx);
}
TEST_P(UnifiedCAPI, TF_AbstractOpSetOpNameAfterFinishingOpBuildingRaises) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TF_ExecutionContext* graph_ctx = TF_CreateFunction("some_func", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* placeholder_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(placeholder_op, "Placeholder", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOpSetOpName(placeholder_op, "my_ph", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOpSetOpName(placeholder_op, "my_ph", status.get());
ASSERT_EQ(TF_FAILED_PRECONDITION, TF_GetCode(status.get()));
TF_DeleteAbstractOp(placeholder_op);
TF_DeleteExecutionContext(graph_ctx);
}
TEST_P(UnifiedCAPI, TF_AbstractTensorGetEagerTensorOnGraphTensorRaises) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TF_ExecutionContext* graph_ctx = TF_CreateFunction("some_func", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto placeholder_t =
TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, status.get());
TF_AbstractTensorGetEagerTensor(placeholder_t, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
TF_DeleteAbstractTensor(placeholder_t);
TF_DeleteExecutionContext(graph_ctx);
}
TEST_P(UnifiedCAPI, TF_ExecutionContextGetTFEContextFromFunctionContextRaises) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TF_ExecutionContext* graph_ctx = TF_CreateFunction("some_func", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecutionContextGetTFEContext(graph_ctx, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
TF_DeleteExecutionContext(graph_ctx);
}
INSTANTIATE_TEST_SUITE_P(Tracing, UnifiedCAPI,
::testing::Combine(::testing::Values("graphdef",
"mlir"),
::testing::Values(false)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/c_api_unified_experimental.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/c_api_unified_experimental_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
daee26e5-7dff-4070-9fef-3d31284ec703 | cpp | google/quiche | hpack_example | quiche/http2/test_tools/hpack_example.cc | quiche/http2/test_tools/hpack_example_test.cc | #include "quiche/http2/test_tools/hpack_example.h"
#include <ctype.h>
#include <string>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
namespace test {
namespace {
void HpackExampleToStringOrDie(absl::string_view example, std::string* output) {
while (!example.empty()) {
const char c0 = example[0];
if (isxdigit(c0)) {
QUICHE_CHECK_GT(example.size(), 1u) << "Truncated hex byte?";
const char c1 = example[1];
QUICHE_CHECK(isxdigit(c1)) << "Found half a byte?";
std::string byte;
QUICHE_CHECK(absl::HexStringToBytes(example.substr(0, 2), &byte))
<< "Can't parse hex byte";
absl::StrAppend(output, byte);
example.remove_prefix(2);
continue;
}
if (isspace(c0)) {
example.remove_prefix(1);
continue;
}
if (!example.empty() && example[0] == '|') {
auto pos = example.find('\n');
if (pos == absl::string_view::npos) {
break;
}
example.remove_prefix(pos + 1);
continue;
}
QUICHE_BUG(http2_bug_107_1)
<< "Can't parse byte " << static_cast<int>(c0)
<< absl::StrCat(" (0x", absl::Hex(c0), ")") << "\nExample: " << example;
}
QUICHE_CHECK_LT(0u, output->size()) << "Example is empty.";
}
}
std::string HpackExampleToStringOrDie(absl::string_view example) {
std::string output;
HpackExampleToStringOrDie(example, &output);
return output;
}
}
} | #include "quiche/http2/test_tools/hpack_example.h"
#include <string>
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
namespace {
TEST(HpackExampleToStringOrDie, GoodInput) {
std::string bytes = HpackExampleToStringOrDie(R"(
40 | == Literal never indexed ==
| Blank lines are OK in example:
08 | Literal name (len = 8)
7061 7373 776f 7264 | password
06 | Literal value (len = 6)
7365 6372 6574 | secret
| -> password: secret
)");
const char kExpected[] = {
0x40,
0x08,
0x70, 0x61, 0x73, 0x73,
0x77, 0x6f, 0x72, 0x64,
0x06,
0x73, 0x65, 0x63, 0x72,
0x65, 0x74,
};
EXPECT_EQ(absl::string_view(kExpected, sizeof kExpected), bytes);
}
#ifdef GTEST_HAS_DEATH_TEST
TEST(HpackExampleToStringOrDie, InvalidInput) {
EXPECT_QUICHE_DEATH(HpackExampleToStringOrDie("4"), "Truncated");
EXPECT_QUICHE_DEATH(HpackExampleToStringOrDie("4x"), "half");
EXPECT_QUICHE_DEATH(HpackExampleToStringOrDie(""), "empty");
}
#endif
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/test_tools/hpack_example.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/test_tools/hpack_example_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
0c3d69ed-c6ad-4d0e-8604-67b24a8d7b7c | cpp | google/arolla | multi_loader | arolla/codegen/io/multi_loader.cc | arolla/codegen/io/multi_loader_test.cc | #include "arolla/codegen/io/multi_loader.h"
#include <algorithm>
#include <cstddef>
#include <optional>
#include <vector>
#include "absl/log/check.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
namespace arolla::codegen::io {
namespace multi_loader_internal {
void CreateHierarchicalRequestedInputs(
const std::vector<std::optional<TypedSlot>>& leaf_slots,
const std::vector<std::vector<size_t>>& tree,
HierarchicalRequestedInputsDataView output) {
CHECK_LT(leaf_slots.size(), 1 << 16)
<< "Too many input leaves for generated code";
std::vector<size_t> leaf_frame_offsets;
std::vector<char> node_requested;
node_requested.resize(tree.size(), false);
for (size_t node_id = 0; node_id != tree.size(); ++node_id) {
const std::vector<size_t>& children = tree[node_id];
if (children.empty()) {
size_t leaf_id = leaf_frame_offsets.size();
const std::optional<TypedSlot>& slot = leaf_slots[leaf_id];
size_t offset = slot.has_value() ? slot->byte_offset() : kSkippedOffset;
leaf_frame_offsets.push_back(offset);
node_requested[node_id] = slot.has_value();
} else {
node_requested[node_id] = false;
for (size_t child : children) {
CHECK_LT(child, node_id);
node_requested[node_id] |= node_requested[child];
}
}
}
std::copy(leaf_frame_offsets.begin(), leaf_frame_offsets.end(),
output.leaf_frame_offsets.begin());
size_t intermediate_id = 0;
for (size_t i = 0; i != tree.size(); ++i) {
if (tree[i].empty()) {
continue;
}
CHECK_LT(intermediate_id, output.node_requested.size());
output.node_requested[intermediate_id++] = node_requested[i];
}
}
void CreateHierarchicalSingleValueRequestedInputs(
const std::vector<std::optional<TypedSlot>>& leaf_slots,
const std::vector<size_t>& size_leaves,
const std::vector<std::vector<size_t>>& tree,
HierarchicalSingleValueRequestedInputsDataView output) {
CHECK_LT(leaf_slots.size(), 1 << 16)
<< "Too many input leaves for generated code";
std::vector<HierarchicalSingleValueClearInfo> node_optional_clear_infos;
std::vector<HierarchicalSingleValueClearInfo> node_size_clear_infos;
std::vector<size_t> presence_offsets;
std::vector<size_t> size_offsets;
node_optional_clear_infos.resize(tree.size(),
HierarchicalSingleValueClearInfo{});
node_size_clear_infos.resize(tree.size(), HierarchicalSingleValueClearInfo{});
size_t leaf_id = 0;
for (size_t node_id = 0; node_id != tree.size(); ++node_id) {
const std::vector<size_t>& children = tree[node_id];
auto& node_optional_clear_info = node_optional_clear_infos[node_id];
auto& node_size_clear_info = node_size_clear_infos[node_id];
if (children.empty()) {
const std::optional<TypedSlot>& slot = leaf_slots[leaf_id];
size_t offset = slot.has_value() ? slot->byte_offset() : kSkippedOffset;
node_optional_clear_info.range_begin = presence_offsets.size();
node_size_clear_info.range_begin = size_offsets.size();
if (offset != kSkippedOffset) {
if (std::binary_search(size_leaves.begin(), size_leaves.end(),
leaf_id)) {
size_offsets.push_back(offset);
} else if (::arolla::IsOptionalQType(slot->GetType())) {
presence_offsets.push_back(offset);
}
}
node_optional_clear_info.range_end = presence_offsets.size();
node_size_clear_info.range_end = size_offsets.size();
++leaf_id;
} else {
node_optional_clear_info.range_begin =
node_optional_clear_infos[children.front()].range_begin;
node_optional_clear_info.range_end =
node_optional_clear_infos[children.back()].range_end;
node_size_clear_info.range_begin =
node_size_clear_infos[children.front()].range_begin;
node_size_clear_info.range_end =
node_size_clear_infos[children.back()].range_end;
}
}
CHECK_GE(output.requested_offsets.size(),
presence_offsets.size() + size_offsets.size());
std::copy(presence_offsets.begin(), presence_offsets.end(),
output.requested_offsets.begin());
std::copy(size_offsets.begin(), size_offsets.end(),
output.requested_offsets.begin() + presence_offsets.size());
std::fill(output.requested_offsets.begin() + presence_offsets.size() +
size_offsets.size(),
output.requested_offsets.end(), kSkippedOffset);
size_t leaf_count = 0;
for (size_t i = 0; i != tree.size(); ++i) {
if (tree[i].empty()) {
++leaf_count;
continue;
}
size_t intermediate_id = i - leaf_count;
output.node_optional_clear_infos[intermediate_id] =
node_optional_clear_infos[i];
output.node_size_clear_infos[intermediate_id] = node_size_clear_infos[i];
output.node_size_clear_infos[intermediate_id].range_begin +=
presence_offsets.size();
output.node_size_clear_infos[intermediate_id].range_end +=
presence_offsets.size();
}
}
}
} | #include "arolla/codegen/io/multi_loader.h"
#include <optional>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/proto/testing/test.pb.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/typed_slot.h"
namespace arolla::codegen::io {
namespace {
using ::testing::ElementsAre;
TEST(SingleValueTest,
CreateHierarchicalSingleValueRequestedInputsTrivialAllRequested) {
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<OptionalValue<int>>();
auto b_slot = layout_builder.AddSlot<OptionalValue<int>>();
auto c_slot = layout_builder.AddSlot<OptionalValue<int>>();
HierarchicalSingleValueRequestedInputsData<3, 4> inputs;
CreateHierarchicalSingleValueRequestedInputs(
{TypedSlot::FromSlot(a_slot), TypedSlot::FromSlot(b_slot),
TypedSlot::FromSlot(c_slot)},
{1}, {{}, {}, {}, {0, 1, 2}}, &inputs);
EXPECT_THAT(inputs.common.leaf_frame_offsets,
ElementsAre(a_slot.byte_offset(), b_slot.byte_offset(),
c_slot.byte_offset()));
EXPECT_THAT(inputs.common.node_requested, ElementsAre(true));
EXPECT_THAT(inputs.requested_offsets,
ElementsAre(a_slot.byte_offset(), c_slot.byte_offset(),
b_slot.byte_offset()));
EXPECT_THAT(inputs.node_optional_clear_infos,
ElementsAre(HierarchicalSingleValueClearInfo{0, 2}));
EXPECT_THAT(inputs.node_size_clear_infos,
ElementsAre(HierarchicalSingleValueClearInfo{2, 3}));
}
TEST(SingleValueTest,
CreateHierarchicalSingleValueRequestedInputsTrivialNothingRequested) {
HierarchicalSingleValueRequestedInputsData<3, 4> inputs;
CreateHierarchicalSingleValueRequestedInputs(
{std::nullopt, std::nullopt, std::nullopt},
{1}, {{}, {}, {}, {0, 1, 2}}, &inputs);
EXPECT_THAT(inputs.common.leaf_frame_offsets,
ElementsAre(kSkippedOffset, kSkippedOffset, kSkippedOffset));
EXPECT_THAT(inputs.common.node_requested, ElementsAre(false));
EXPECT_THAT(inputs.requested_offsets,
ElementsAre(kSkippedOffset, kSkippedOffset, kSkippedOffset));
EXPECT_THAT(inputs.node_optional_clear_infos,
ElementsAre(HierarchicalSingleValueClearInfo{0, 0}));
EXPECT_THAT(inputs.node_size_clear_infos,
ElementsAre(HierarchicalSingleValueClearInfo{0, 0}));
}
TEST(SingleValueTest,
CreateHierarchicalSingleValueRequestedInputsTrivialSizeRequested) {
FrameLayout::Builder layout_builder;
auto b_slot = layout_builder.AddSlot<OptionalValue<int>>();
HierarchicalSingleValueRequestedInputsData<3, 4> inputs;
CreateHierarchicalSingleValueRequestedInputs(
{std::nullopt, TypedSlot::FromSlot(b_slot), std::nullopt},
{1}, {{}, {}, {}, {0, 1, 2}}, &inputs);
EXPECT_THAT(
inputs.common.leaf_frame_offsets,
ElementsAre(kSkippedOffset, b_slot.byte_offset(), kSkippedOffset));
EXPECT_THAT(inputs.common.node_requested, ElementsAre(true));
EXPECT_THAT(
inputs.requested_offsets,
ElementsAre(b_slot.byte_offset(), kSkippedOffset, kSkippedOffset));
EXPECT_THAT(inputs.node_optional_clear_infos,
ElementsAre(HierarchicalSingleValueClearInfo{0, 0}));
EXPECT_THAT(inputs.node_size_clear_infos,
ElementsAre(HierarchicalSingleValueClearInfo{0, 1}));
}
TEST(SingleValueTest,
CreateHierarchicalSingleValueRequestedInputsTrivialOptionalRequested) {
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<OptionalValue<int>>();
HierarchicalSingleValueRequestedInputsData<3, 4> inputs;
CreateHierarchicalSingleValueRequestedInputs(
{TypedSlot::FromSlot(a_slot), std::nullopt, std::nullopt},
{1}, {{}, {}, {}, {0, 1, 2}}, &inputs);
EXPECT_THAT(
inputs.common.leaf_frame_offsets,
ElementsAre(a_slot.byte_offset(), kSkippedOffset, kSkippedOffset));
EXPECT_THAT(inputs.common.node_requested, ElementsAre(true));
EXPECT_THAT(
inputs.requested_offsets,
ElementsAre(a_slot.byte_offset(), kSkippedOffset, kSkippedOffset));
EXPECT_THAT(inputs.node_optional_clear_infos,
ElementsAre(HierarchicalSingleValueClearInfo{0, 1}));
EXPECT_THAT(inputs.node_size_clear_infos,
ElementsAre(HierarchicalSingleValueClearInfo{1, 1}));
}
TEST(SingleValueTest,
CreateHierarchicalSingleValueRequestedInputsHierarchyAllRequested) {
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<OptionalValue<int>>();
auto b_slot = layout_builder.AddSlot<DenseArrayShape>();
auto c_slot = layout_builder.AddSlot<OptionalValue<int>>();
auto d_slot = layout_builder.AddSlot<OptionalValue<int>>();
HierarchicalSingleValueRequestedInputsData<4, 7> inputs;
CreateHierarchicalSingleValueRequestedInputs(
{TypedSlot::FromSlot(a_slot), TypedSlot::FromSlot(b_slot),
TypedSlot::FromSlot(c_slot), TypedSlot::FromSlot(d_slot)},
{1}, {{}, {}, {0, 1}, {}, {3}, {}, {2, 4, 5}},
&inputs);
EXPECT_THAT(inputs.common.leaf_frame_offsets,
ElementsAre(a_slot.byte_offset(), b_slot.byte_offset(),
c_slot.byte_offset(), d_slot.byte_offset()));
EXPECT_THAT(inputs.common.node_requested, ElementsAre(true, true, true));
EXPECT_THAT(inputs.requested_offsets,
ElementsAre(a_slot.byte_offset(), c_slot.byte_offset(),
d_slot.byte_offset(), b_slot.byte_offset()));
using CI = HierarchicalSingleValueClearInfo;
EXPECT_THAT(inputs.node_optional_clear_infos,
ElementsAre(CI{0, 1}, CI{1, 2}, CI{0, 3}));
EXPECT_THAT(inputs.node_size_clear_infos,
ElementsAre(CI{3, 4}, CI{4, 4}, CI{3, 4}));
}
TEST(SingleValueTest,
CreateHierarchicalSingleValueRequestedInputsAFewRequestedWithFullValue) {
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<OptionalValue<int>>();
auto c_slot = layout_builder.AddSlot<int>();
HierarchicalSingleValueRequestedInputsData<4, 7> inputs;
CreateHierarchicalSingleValueRequestedInputs(
{TypedSlot::FromSlot(a_slot), std::nullopt, TypedSlot::FromSlot(c_slot),
std::nullopt},
{1}, {{}, {}, {0, 1}, {}, {3}, {}, {2, 4, 5}},
&inputs);
EXPECT_THAT(inputs.common.leaf_frame_offsets,
ElementsAre(a_slot.byte_offset(), kSkippedOffset,
c_slot.byte_offset(), kSkippedOffset));
EXPECT_THAT(inputs.common.node_requested, ElementsAre(true, true, true));
EXPECT_THAT(inputs.requested_offsets,
ElementsAre(a_slot.byte_offset(), kSkippedOffset, kSkippedOffset,
kSkippedOffset));
using CI = HierarchicalSingleValueClearInfo;
EXPECT_THAT(inputs.node_optional_clear_infos,
ElementsAre(CI{0, 1}, CI{1, 1}, CI{0, 1}));
EXPECT_THAT(inputs.node_size_clear_infos,
ElementsAre(CI{1, 1}, CI{1, 1}, CI{1, 1}));
}
TEST(SingleValueTest,
CreateHierarchicalSingleValueRequestedInputsAllRequestedWithFullValue) {
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<OptionalValue<int>>();
auto b_slot = layout_builder.AddSlot<DenseArrayShape>();
auto c_slot = layout_builder.AddSlot<int>();
auto d_slot = layout_builder.AddSlot<OptionalValue<int>>();
HierarchicalSingleValueRequestedInputsData<4, 7> inputs;
CreateHierarchicalSingleValueRequestedInputs(
{TypedSlot::FromSlot(a_slot), TypedSlot::FromSlot(b_slot),
TypedSlot::FromSlot(c_slot), TypedSlot::FromSlot(d_slot)},
{1}, {{}, {}, {0, 1}, {}, {3}, {}, {2, 4, 5}},
&inputs);
EXPECT_THAT(inputs.common.leaf_frame_offsets,
ElementsAre(a_slot.byte_offset(), b_slot.byte_offset(),
c_slot.byte_offset(), d_slot.byte_offset()));
EXPECT_THAT(inputs.common.node_requested, ElementsAre(true, true, true));
EXPECT_THAT(inputs.requested_offsets,
ElementsAre(a_slot.byte_offset(), d_slot.byte_offset(),
b_slot.byte_offset(), kSkippedOffset));
using CI = HierarchicalSingleValueClearInfo;
EXPECT_THAT(inputs.node_optional_clear_infos,
ElementsAre(CI{0, 1}, CI{1, 1}, CI{0, 2}));
EXPECT_THAT(inputs.node_size_clear_infos,
ElementsAre(CI{2, 3}, CI{3, 3}, CI{2, 3}));
}
TEST(SingleValueTest,
CreateHierarchicalSingleValueRequestedInputsHierarchySizeRequested) {
FrameLayout::Builder layout_builder;
auto b_slot = layout_builder.AddSlot<OptionalValue<int>>();
HierarchicalSingleValueRequestedInputsData<4, 7> inputs;
CreateHierarchicalSingleValueRequestedInputs(
{std::nullopt, TypedSlot::FromSlot(b_slot), std::nullopt, std::nullopt},
{1}, {{}, {}, {0, 1}, {}, {3}, {}, {2, 4}},
&inputs);
EXPECT_THAT(inputs.common.leaf_frame_offsets,
ElementsAre(kSkippedOffset, b_slot.byte_offset(), kSkippedOffset,
kSkippedOffset));
EXPECT_THAT(inputs.common.node_requested, ElementsAre(true, false, true));
EXPECT_THAT(inputs.requested_offsets,
ElementsAre(b_slot.byte_offset(), kSkippedOffset, kSkippedOffset,
kSkippedOffset));
using CI = HierarchicalSingleValueClearInfo;
EXPECT_THAT(inputs.node_optional_clear_infos,
ElementsAre(CI{0, 0}, CI{0, 0}, CI{0, 0}));
EXPECT_THAT(inputs.node_size_clear_infos,
ElementsAre(CI{0, 1}, CI{1, 1}, CI{0, 1}));
}
TEST(SingleValueTest,
CreateHierarchicalSingleValueRequestedInputsHierarchyOptionalRequested) {
FrameLayout::Builder layout_builder;
auto c_slot = layout_builder.AddSlot<OptionalValue<int>>();
HierarchicalSingleValueRequestedInputsData<4, 7> inputs;
CreateHierarchicalSingleValueRequestedInputs(
{std::nullopt, std::nullopt, TypedSlot::FromSlot(c_slot), std::nullopt},
{1}, {{}, {}, {0, 1}, {}, {3}, {}, {2, 4}},
&inputs);
EXPECT_THAT(inputs.common.leaf_frame_offsets,
ElementsAre(kSkippedOffset, kSkippedOffset, c_slot.byte_offset(),
kSkippedOffset));
EXPECT_THAT(inputs.common.node_requested, ElementsAre(false, true, true));
EXPECT_THAT(inputs.requested_offsets,
ElementsAre(c_slot.byte_offset(), kSkippedOffset, kSkippedOffset,
kSkippedOffset));
using CI = HierarchicalSingleValueClearInfo;
EXPECT_THAT(inputs.node_optional_clear_infos,
ElementsAre(CI{0, 0}, CI{0, 1}, CI{0, 1}));
EXPECT_THAT(inputs.node_size_clear_infos,
ElementsAre(CI{1, 1}, CI{1, 1}, CI{1, 1}));
}
TEST(ResizeRepeatedProtoFieldTest, MessageResize) {
testing_namespace::Root root;
ResizeRepeatedProtoField(root.mutable_inners(), 5);
EXPECT_EQ(root.inners_size(), 5);
EXPECT_FALSE(root.inners(0).has_a());
root.mutable_inners(0)->set_a(13);
ResizeRepeatedProtoField(root.mutable_inners(), 7);
EXPECT_EQ(root.inners_size(), 7);
EXPECT_TRUE(root.inners(0).has_a());
EXPECT_EQ(root.inners(0).a(), 13);
ResizeRepeatedProtoField(root.mutable_inners(), 3);
EXPECT_EQ(root.inners_size(), 3);
EXPECT_TRUE(root.inners(0).has_a());
EXPECT_EQ(root.inners(0).a(), 13);
ResizeRepeatedProtoField(root.mutable_inners(), 3);
EXPECT_EQ(root.inners_size(), 3);
EXPECT_TRUE(root.inners(0).has_a());
EXPECT_EQ(root.inners(0).a(), 13);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/codegen/io/multi_loader.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/codegen/io/multi_loader_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
7c414036-30e3-4213-8071-6e2396c36412 | cpp | google/libaddressinput | address_field_util | cpp/src/address_field_util.cc | cpp/test/address_field_util_test.cc | #include "address_field_util.h"
#include <libaddressinput/address_field.h>
#include <algorithm>
#include <cassert>
#include <string>
#include <vector>
#include "format_element.h"
namespace i18n {
namespace addressinput {
namespace {
bool ParseFieldToken(char c, AddressField* field) {
assert(field != nullptr);
static const struct { char c; AddressField field; } kTokenMap[] = {
{ 'R', COUNTRY },
{ 'S', ADMIN_AREA },
{ 'C', LOCALITY },
{ 'D', DEPENDENT_LOCALITY },
{ 'X', SORTING_CODE },
{ 'Z', POSTAL_CODE },
{ 'A', STREET_ADDRESS },
{ 'O', ORGANIZATION },
{ 'N', RECIPIENT },
};
for (const auto& entry : kTokenMap) {
if (c == entry.c) {
*field = entry.field;
return true;
}
}
return false;
}
}
void ParseFormatRule(const std::string& format,
std::vector<FormatElement>* elements) {
assert(elements != nullptr);
elements->clear();
std::string::const_iterator prev = format.begin();
for (std::string::const_iterator next = format.begin();
next != format.end(); prev = ++next) {
if ((next = std::find(next, format.end(), '%')) == format.end()) {
break;
}
if (prev < next) {
elements->emplace_back(std::string(prev, next));
}
if ((prev = ++next) == format.end()) {
break;
}
AddressField field;
if (*next == 'n') {
elements->emplace_back();
} else if (ParseFieldToken(*next, &field)) {
elements->emplace_back(field);
}
}
if (prev != format.end()) {
elements->emplace_back(std::string(prev, format.end()));
}
}
void ParseAddressFieldsRequired(const std::string& required,
std::vector<AddressField>* fields) {
assert(fields != nullptr);
fields->clear();
for (char c : required) {
AddressField field;
if (ParseFieldToken(c, &field)) {
fields->push_back(field);
}
}
}
}
} | #include "address_field_util.h"
#include <libaddressinput/address_field.h>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "format_element.h"
namespace {
using i18n::addressinput::AddressField;
using i18n::addressinput::FormatElement;
using i18n::addressinput::ParseFormatRule;
using i18n::addressinput::COUNTRY;
using i18n::addressinput::LOCALITY;
using i18n::addressinput::POSTAL_CODE;
using i18n::addressinput::STREET_ADDRESS;
using i18n::addressinput::ORGANIZATION;
using i18n::addressinput::RECIPIENT;
TEST(AddressFieldUtilTest, FormatParseNewline) {
std::vector<FormatElement> actual;
ParseFormatRule("%O%n%N%n%A%nAX-%Z %C%nÅLAND", &actual);
const std::vector<FormatElement> expected{
FormatElement{ORGANIZATION},
FormatElement{},
FormatElement{RECIPIENT},
FormatElement{},
FormatElement{STREET_ADDRESS},
FormatElement{},
FormatElement{"AX-"},
FormatElement{POSTAL_CODE},
FormatElement{" "},
FormatElement{LOCALITY},
FormatElement{},
FormatElement{"ÅLAND"},
};
EXPECT_EQ(expected, actual);
}
TEST(AddressFieldUtilTest, FormatUnknownTokenIsIgnored) {
std::vector<FormatElement> actual;
ParseFormatRule("%1%R", &actual);
const std::vector<FormatElement> expected{FormatElement{COUNTRY}};
EXPECT_EQ(expected, actual);
}
TEST(AddressFieldUtilTest, FormatPrefixWithoutTokenIsIgnored) {
std::vector<FormatElement> actual;
ParseFormatRule("%", &actual);
EXPECT_TRUE(actual.empty());
}
TEST(AddressFieldUtilTest, FormatEmptyString) {
std::vector<FormatElement> fields;
ParseFormatRule(std::string(), &fields);
EXPECT_TRUE(fields.empty());
}
TEST(AddressFieldUtilTest, RequiredParseDefault) {
std::vector<AddressField> actual;
ParseAddressFieldsRequired("AC", &actual);
const std::vector<AddressField> expected{
STREET_ADDRESS,
LOCALITY,
};
EXPECT_EQ(expected, actual);
}
TEST(AddressFieldUtilTest, RequiredEmptyString) {
std::vector<AddressField> fields;
ParseAddressFieldsRequired(std::string(), &fields);
EXPECT_TRUE(fields.empty());
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/address_field_util.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/address_field_util_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
7db001ef-4305-4c62-9979-270514aeb36e | cpp | google/tensorstore | chunk_encoding | tensorstore/driver/neuroglancer_precomputed/chunk_encoding.cc | tensorstore/driver/neuroglancer_precomputed/chunk_encoding_test.cc | #include "tensorstore/driver/neuroglancer_precomputed/chunk_encoding.h"
#include <algorithm>
#include <array>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/neuroglancer_precomputed/metadata.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/compression/neuroglancer_compressed_segmentation.h"
#include "tensorstore/internal/data_type_endian_conversion.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/internal/image/image_info.h"
#include "tensorstore/internal/image/jpeg_reader.h"
#include "tensorstore/internal/image/jpeg_writer.h"
#include "tensorstore/internal/image/png_reader.h"
#include "tensorstore/internal/image/png_writer.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_neuroglancer_precomputed {
using ::tensorstore::internal_image::ImageInfo;
using ::tensorstore::internal_image::JpegWriterOptions;
using ::tensorstore::internal_image::PngWriterOptions;
Result<SharedArray<const void>> DecodeRawChunk(
DataType dtype, span<const Index, 4> shape,
StridedLayoutView<4> chunk_layout, absl::Cord buffer) {
const Index expected_bytes = ProductOfExtents(shape) * dtype.size();
if (expected_bytes != static_cast<Index>(buffer.size())) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Expected chunk length to be ", expected_bytes,
", but received ", buffer.size(), " bytes"));
}
auto flat_buffer = buffer.Flatten();
if (absl::c_equal(shape, chunk_layout.shape())) {
auto decoded_array = internal::TryViewCordAsArray(
buffer, 0, dtype, endian::little, chunk_layout);
if (decoded_array.valid()) return {std::in_place, decoded_array};
}
Array<const void, 4> source(
{static_cast<const void*>(flat_buffer.data()), dtype}, shape);
SharedArray<void> full_decoded_array(
internal::AllocateAndConstructSharedElements(chunk_layout.num_elements(),
value_init, dtype),
chunk_layout);
ArrayView<void> partial_decoded_array(
full_decoded_array.element_pointer(),
StridedLayoutView<>{shape, chunk_layout.byte_strides()});
internal::DecodeArray(source, endian::little, partial_decoded_array);
return full_decoded_array;
}
template <typename ImageReader>
Result<SharedArray<const void>> DecodeImageChunk(
DataType dtype, span<const Index, 4> partial_shape,
StridedLayoutView<4> chunk_layout, absl::Cord encoded_input) {
auto array = AllocateArray(
{partial_shape[1], partial_shape[2], partial_shape[3], partial_shape[0]},
c_order, default_init, dtype);
{
riegeli::CordReader<> cord_reader(&encoded_input);
ImageReader reader;
TENSORSTORE_RETURN_IF_ERROR(reader.Initialize(&cord_reader));
auto info = reader.GetImageInfo();
const Index num_elements = ProductOfExtents(partial_shape.subspan<1>());
size_t total_pixels;
if (internal::MulOverflow(static_cast<size_t>(info.width),
static_cast<size_t>(info.height),
&total_pixels) ||
num_elements == std::numeric_limits<Index>::max() ||
static_cast<Index>(total_pixels) != num_elements ||
static_cast<Index>(info.num_components) != partial_shape[0]) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Image dimensions (", info.width, ", ", info.height, ", ",
info.num_components,
") are not compatible with expected chunk shape ", partial_shape));
}
TENSORSTORE_RETURN_IF_ERROR(reader.Decode(
tensorstore::span(reinterpret_cast<unsigned char*>(array.data()),
ImageRequiredBytes(info))));
if (!cord_reader.Close()) {
return cord_reader.status();
}
}
if (partial_shape[0] == 1 &&
absl::c_equal(partial_shape, chunk_layout.shape())) {
return SharedArray<const void>(array.element_pointer(), chunk_layout);
}
SharedArray<void> full_decoded_array(
internal::AllocateAndConstructSharedElements(chunk_layout.num_elements(),
default_init, dtype),
chunk_layout);
Array<void, 4> partial_decoded_array(
full_decoded_array.element_pointer(),
StridedLayout<4>(
{partial_shape[1], partial_shape[2], partial_shape[3],
partial_shape[0]},
{chunk_layout.byte_strides()[1], chunk_layout.byte_strides()[2],
chunk_layout.byte_strides()[3], chunk_layout.byte_strides()[0]}));
CopyArray(array, partial_decoded_array);
return full_decoded_array;
}
Result<SharedArray<const void>> DecodeJpegChunk(
DataType dtype, span<const Index, 4> partial_shape,
StridedLayoutView<4> chunk_layout, absl::Cord encoded_input) {
return DecodeImageChunk<internal_image::JpegReader>(
dtype, partial_shape, chunk_layout, std::move(encoded_input));
}
Result<SharedArray<const void>> DecodePngChunk(
DataType dtype, span<const Index, 4> partial_shape,
StridedLayoutView<4> chunk_layout, absl::Cord encoded_input) {
return DecodeImageChunk<internal_image::PngReader>(
dtype, partial_shape, chunk_layout, std::move(encoded_input));
}
Result<SharedArray<const void>> DecodeCompressedSegmentationChunk(
DataType dtype, span<const Index, 4> shape,
StridedLayoutView<4> chunk_layout, std::array<Index, 3> block_size,
absl::Cord buffer) {
auto flat_buffer = buffer.Flatten();
SharedArray<void> full_decoded_array(
internal::AllocateAndConstructSharedElements(chunk_layout.num_elements(),
default_init, dtype),
chunk_layout);
std::ptrdiff_t output_shape_ptrdiff_t[4] = {shape[0], shape[1], shape[2],
shape[3]};
std::ptrdiff_t block_shape_ptrdiff_t[3] = {block_size[2], block_size[1],
block_size[0]};
std::ptrdiff_t output_byte_strides[4] = {
chunk_layout.byte_strides()[0], chunk_layout.byte_strides()[1],
chunk_layout.byte_strides()[2], chunk_layout.byte_strides()[3]};
bool success = false;
switch (dtype.id()) {
case DataTypeId::uint32_t:
success = neuroglancer_compressed_segmentation::DecodeChannels(
flat_buffer, block_shape_ptrdiff_t, output_shape_ptrdiff_t,
output_byte_strides,
static_cast<uint32_t*>(full_decoded_array.data()));
break;
case DataTypeId::uint64_t:
success = neuroglancer_compressed_segmentation::DecodeChannels(
flat_buffer, block_shape_ptrdiff_t, output_shape_ptrdiff_t,
output_byte_strides,
static_cast<uint64_t*>(full_decoded_array.data()));
break;
default:
ABSL_UNREACHABLE();
}
if (!success) {
return absl::InvalidArgumentError(
"Corrupted Neuroglancer compressed segmentation");
}
return full_decoded_array;
}
void GetChunkShape(span<const Index> chunk_indices,
const MultiscaleMetadata& metadata, size_t scale_index,
span<const Index, 4> full_chunk_shape,
span<Index, 4> partial_chunk_shape) {
const auto& scale = metadata.scales[scale_index];
partial_chunk_shape[0] = full_chunk_shape[0];
for (int i = 0; i < 3; ++i) {
const Index full_size = full_chunk_shape[3 - i];
partial_chunk_shape[3 - i] = std::min(
scale.box.shape()[i] - chunk_indices[i] * full_size, full_size);
}
}
Result<SharedArray<const void>> DecodeChunk(span<const Index> chunk_indices,
const MultiscaleMetadata& metadata,
size_t scale_index,
StridedLayoutView<4> chunk_layout,
absl::Cord buffer) {
const auto& scale_metadata = metadata.scales[scale_index];
std::array<Index, 4> chunk_shape;
GetChunkShape(chunk_indices, metadata, scale_index, chunk_layout.shape(),
chunk_shape);
switch (scale_metadata.encoding) {
case ScaleMetadata::Encoding::raw:
return DecodeRawChunk(metadata.dtype, chunk_shape, chunk_layout,
std::move(buffer));
case ScaleMetadata::Encoding::png:
return DecodePngChunk(metadata.dtype, chunk_shape, chunk_layout,
std::move(buffer));
case ScaleMetadata::Encoding::jpeg:
return DecodeJpegChunk(metadata.dtype, chunk_shape, chunk_layout,
std::move(buffer));
case ScaleMetadata::Encoding::compressed_segmentation:
return DecodeCompressedSegmentationChunk(
metadata.dtype, chunk_shape, chunk_layout,
scale_metadata.compressed_segmentation_block_size, std::move(buffer));
}
ABSL_UNREACHABLE();
}
absl::Cord EncodeRawChunk(DataType dtype, span<const Index, 4> shape,
const SharedArrayView<const void>& array) {
ArrayView<const void> partial_source(
array.element_pointer(),
StridedLayoutView<>(shape, array.byte_strides()));
internal::FlatCordBuilder buffer(ProductOfExtents(shape) * dtype.size());
Array<void, 4> encoded_array({static_cast<void*>(buffer.data()), dtype},
shape);
internal::EncodeArray(partial_source, encoded_array, endian::little);
return std::move(buffer).Build();
}
template <typename ImageWriter, typename Options>
Result<absl::Cord> EncodeImageChunk(Options options, DataType dtype,
span<const Index, 4> shape,
ArrayView<const void> array) {
Array<const void, 4> partial_source(
array.element_pointer(),
StridedLayout<4>({shape[1], shape[2], shape[3], shape[0]},
{array.byte_strides()[1], array.byte_strides()[2],
array.byte_strides()[3], array.byte_strides()[0]}));
auto contiguous_array = MakeCopy(partial_source, c_order);
absl::Cord buffer;
{
ImageWriter writer;
riegeli::CordWriter<> cord_writer(&buffer);
TENSORSTORE_RETURN_IF_ERROR(writer.Initialize(&cord_writer, options));
ImageInfo info{static_cast<int32_t>(shape[3]),
static_cast<int32_t>(shape[1] * shape[2]),
static_cast<int32_t>(shape[0]),
dtype};
TENSORSTORE_RETURN_IF_ERROR(writer.Encode(
info, tensorstore::span(reinterpret_cast<const unsigned char*>(
contiguous_array.data()),
contiguous_array.num_elements() *
contiguous_array.dtype().size())));
TENSORSTORE_RETURN_IF_ERROR(writer.Done());
}
return buffer;
}
Result<absl::Cord> EncodeJpegChunk(DataType dtype, int quality,
span<const Index, 4> shape,
ArrayView<const void> array) {
internal_image::JpegWriterOptions options;
options.quality = quality;
return EncodeImageChunk<internal_image::JpegWriter>(options, dtype, shape,
array);
}
Result<absl::Cord> EncodePngChunk(DataType dtype, int compression_level,
span<const Index, 4> shape,
ArrayView<const void> array) {
internal_image::PngWriterOptions options;
options.compression_level = compression_level;
return EncodeImageChunk<internal_image::PngWriter>(options, dtype, shape,
array);
}
Result<absl::Cord> EncodeCompressedSegmentationChunk(
DataType dtype, span<const Index, 4> shape, ArrayView<const void> array,
std::array<Index, 3> block_size) {
std::ptrdiff_t input_shape_ptrdiff_t[4] = {shape[0], shape[1], shape[2],
shape[3]};
std::ptrdiff_t block_shape_ptrdiff_t[3] = {block_size[2], block_size[1],
block_size[0]};
std::string out;
std::ptrdiff_t input_byte_strides[4] = {
array.byte_strides()[0], array.byte_strides()[1], array.byte_strides()[2],
array.byte_strides()[3]};
switch (dtype.id()) {
case DataTypeId::uint32_t:
neuroglancer_compressed_segmentation::EncodeChannels(
static_cast<const uint32_t*>(array.data()), input_shape_ptrdiff_t,
input_byte_strides, block_shape_ptrdiff_t, &out);
break;
case DataTypeId::uint64_t:
neuroglancer_compressed_segmentation::EncodeChannels(
static_cast<const uint64_t*>(array.data()), input_shape_ptrdiff_t,
input_byte_strides, block_shape_ptrdiff_t, &out);
break;
default:
ABSL_UNREACHABLE();
}
return absl::Cord(std::move(out));
}
Result<absl::Cord> EncodeChunk(span<const Index> chunk_indices,
const MultiscaleMetadata& metadata,
size_t scale_index,
const SharedArrayView<const void>& array) {
const auto& scale_metadata = metadata.scales[scale_index];
std::array<Index, 4> partial_chunk_shape;
GetChunkShape(chunk_indices, metadata, scale_index,
span<const Index, 4>(array.shape().data(), 4),
partial_chunk_shape);
switch (scale_metadata.encoding) {
case ScaleMetadata::Encoding::raw:
return EncodeRawChunk(metadata.dtype, partial_chunk_shape, array);
case ScaleMetadata::Encoding::jpeg:
return EncodeJpegChunk(metadata.dtype, scale_metadata.jpeg_quality,
partial_chunk_shape, array);
case ScaleMetadata::Encoding::png:
return EncodePngChunk(metadata.dtype, scale_metadata.png_level,
partial_chunk_shape, array);
case ScaleMetadata::Encoding::compressed_segmentation:
return EncodeCompressedSegmentationChunk(
metadata.dtype, partial_chunk_shape, array,
scale_metadata.compressed_segmentation_block_size);
}
ABSL_UNREACHABLE();
}
}
} | #include "tensorstore/driver/neuroglancer_precomputed/chunk_encoding.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/neuroglancer_precomputed/metadata.h"
#include "tensorstore/index.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_neuroglancer_precomputed::DecodeChunk;
using ::tensorstore::internal_neuroglancer_precomputed::EncodeChunk;
using ::tensorstore::internal_neuroglancer_precomputed::MultiscaleMetadata;
struct P {
::nlohmann::json metadata_json;
tensorstore::DataType dtype;
bool compare = true;
bool truncate = true;
};
class ChunkEncodingTest : public testing::TestWithParam<P> {
public:
template <typename T>
tensorstore::SharedArray<void> AllocateArrayImpl(Index num_channels) {
auto array = tensorstore::AllocateArray<T>({num_channels, 5, 4, 3});
for (Index i = 0, n = array.num_elements(); i < n; ++i) {
array.data()[i] = static_cast<T>(i);
}
return array;
}
tensorstore::SharedArray<void> GetArrayForDType(tensorstore::DataTypeId id,
Index num_channels) {
switch (id) {
case tensorstore::DataTypeId::uint8_t:
return AllocateArrayImpl<uint8_t>(num_channels);
case tensorstore::DataTypeId::uint16_t:
return AllocateArrayImpl<uint16_t>(num_channels);
case tensorstore::DataTypeId::uint32_t:
return AllocateArrayImpl<uint32_t>(num_channels);
case tensorstore::DataTypeId::uint64_t:
return AllocateArrayImpl<uint64_t>(num_channels);
default:
ABSL_UNREACHABLE();
}
}
};
TEST_P(ChunkEncodingTest, Roundtrip) {
auto metadata_json = GetParam().metadata_json;
auto dtype = GetParam().dtype;
metadata_json["data_type"] = dtype.name();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata,
MultiscaleMetadata::FromJson(metadata_json));
auto array = GetArrayForDType(dtype.id(), metadata.num_channels);
std::vector<Index> chunk_indices{0, 0, 0};
const size_t scale_index = 0;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
absl::Cord out, EncodeChunk(chunk_indices, metadata, scale_index, array));
tensorstore::StridedLayout chunk_layout(tensorstore::c_order, dtype.size(),
{metadata.num_channels, 5, 4, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decode_result,
DecodeChunk(chunk_indices, metadata, scale_index, chunk_layout, out));
if (!out.empty() && GetParam().truncate) {
auto corrupt = out.Subcord(0, out.size() - 1);
EXPECT_THAT(
DecodeChunk(chunk_indices, metadata, scale_index, chunk_layout,
corrupt),
testing::AnyOf(MatchesStatus(absl::StatusCode::kDataLoss),
MatchesStatus(absl::StatusCode::kInvalidArgument)));
}
if (GetParam().compare) {
EXPECT_THAT(decode_result, array);
}
}
std::vector<P> GenerateParams() {
std::vector<P> result;
for (const int num_channels : {1, 2, 3, 4}) {
P param;
param.metadata_json =
::nlohmann::json{{"@type", "neuroglancer_multiscale_volume"},
{"num_channels", num_channels},
{"scales",
{{{"chunk_sizes", {{3, 4, 5}}},
{"encoding", "raw"},
{"key", "k"},
{"resolution", {5, 6, 7}},
{"size", {10, 11, 12}}}}},
{"type", "image"}};
param.dtype = tensorstore::dtype_v<uint16_t>;
result.push_back(param);
param.truncate = false;
if (num_channels >= 1 && num_channels <= 4) {
param.metadata_json["scales"][0]["encoding"] = "png";
param.dtype = tensorstore::dtype_v<uint8_t>;
result.push_back(param);
if (num_channels == 1) {
param.dtype = tensorstore::dtype_v<uint16_t>;
result.push_back(param);
}
}
param.truncate = true;
param.compare = false;
if (num_channels == 1 || num_channels == 3) {
param.metadata_json["scales"][0]["encoding"] = "jpeg";
param.dtype = tensorstore::dtype_v<uint8_t>;
result.push_back(param);
}
param.compare = true;
param.metadata_json["scales"][0]["encoding"] = "compressed_segmentation";
param.metadata_json["scales"][0]["compressed_segmentation_block_size"] = {
2, 3, 4};
param.dtype = tensorstore::dtype_v<uint32_t>;
result.push_back(param);
param.dtype = tensorstore::dtype_v<uint64_t>;
result.push_back(param);
}
return result;
}
INSTANTIATE_TEST_SUITE_P(
All, ChunkEncodingTest, testing::ValuesIn(GenerateParams()),
[](const testing::TestParamInfo<P>& info) {
const auto& p = info.param;
auto encoding =
p.metadata_json["scales"][0]["encoding"].get<std::string>();
return tensorstore::StrCat(encoding, "_", p.metadata_json["num_channels"],
"_", p.dtype.name());
});
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/neuroglancer_precomputed/chunk_encoding.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/neuroglancer_precomputed/chunk_encoding_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
acbd2b8c-12d1-4da6-b36c-77c6289018e6 | cpp | tensorflow/tensorflow | ifrt_ir_program_serdes | third_party/xla/xla/python/ifrt/ir/ifrt_ir_program_serdes.cc | third_party/xla/xla/python/ifrt/ir/ifrt_ir_program_serdes_test.cc | #include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Bytecode/BytecodeWriter.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Support/LLVM.h"
#include "xla/mlir/utils/error_util.h"
#include "xla/python/ifrt/ir/ifrt_ir_program.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/support/module_parsing.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
class IfrtIRProgramSerDes
: public llvm::RTTIExtends<IfrtIRProgramSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::IfrtIRProgram";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const auto& program = llvm::cast<IfrtIRProgram>(serializable);
if (program.mlir_module == nullptr) {
return absl::InvalidArgumentError("Unable to serialize null MLIR module");
}
std::string serialized;
llvm::raw_string_ostream out(serialized);
mlir::BytecodeWriterConfig config;
mlir::BaseScopedDiagnosticHandler diagnostic_handler(
program.mlir_module->getContext());
if (mlir::failed(
mlir::writeBytecodeToFile(program.mlir_module, out, config))) {
return absl::InvalidArgumentError(
absl::StrFormat("Failed to serialize IFRT IR module string: %s",
diagnostic_handler.ConsumeStatus().message()));
}
return serialized;
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions>) override {
auto context = std::make_unique<mlir::MLIRContext>();
TF_ASSIGN_OR_RETURN(auto module,
support::ParseMlirModuleString(serialized, *context));
return std::make_unique<IfrtIRProgram>(std::move(context),
std::move(module));
}
static char ID;
};
char IfrtIRProgramSerDes::ID = 0;
bool register_ifrt_ir_program_serdes = ([]() {
RegisterSerDes<IfrtIRProgram>(std::make_unique<IfrtIRProgramSerDes>());
}(), true);
}
}
} | #include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "xla/python/ifrt/ir/ifrt_ir_program.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/support/module_parsing.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::HasSubstr;
using ::testing::Not;
using ::tsl::testing::StatusIs;
std::string PrintModule(mlir::ModuleOp module) {
std::string module_str;
llvm::raw_string_ostream os(module_str);
module->print(os, mlir::OpPrintingFlags().enableDebugInfo());
return module_str;
}
TEST(IfrtIRProgramSerDesTest, RoundTrip) {
static constexpr absl::string_view kMlirModuleStr = R"(
!array = !ifrt.array<tensor<2xi32>, #ifrt.sharding_param<1 to [0] on 1>, [0]>
module {
func.func @main(%arg0: !array) -> !array attributes {ifrt.function} {
%0, %ctrl_0 = ifrt.Call @add_one::@main(%arg0) on devices [0]
: (!array) -> !array
return %0 : !array
}
module @add_one {
func.func @main(%arg0: tensor<2xi32>) -> tensor<2xi32> {
%0 = mhlo.constant dense<1> : tensor<2xi32>
%1 = mhlo.add %arg0, %0 : tensor<2xi32>
return %1 : tensor<2xi32>
}
}
}
)";
Serialized serialized;
auto context = std::make_unique<mlir::MLIRContext>();
TF_ASSERT_OK_AND_ASSIGN(
mlir::OwningOpRef<mlir::ModuleOp> module,
support::ParseMlirModuleString(kMlirModuleStr, *context));
auto initial_program =
std::make_unique<IfrtIRProgram>(std::move(context), std::move(module));
TF_ASSERT_OK_AND_ASSIGN(serialized, Serialize(*initial_program));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<IfrtIRProgram> deserialized_program,
Deserialize<IfrtIRProgram>(serialized, nullptr));
EXPECT_EQ(PrintModule(initial_program->mlir_module),
PrintModule(deserialized_program->mlir_module));
}
TEST(IfrtIRProgramSerDesTest, DeserializationError) {
static constexpr absl::string_view kMlirModuleStr = R"(
!array = !ifrt.array<tensor<2xi32>, #ifrt.sharding_param<1 to [0] on 1>, [0]>
module {
func.func @main(%arg0: !array) -> !array attributes {ifrt.function} {
%0, %ctrl_0 = ifrt.Call @add_one::@main(%arg0) on devices [0]
: (!array) -> !array
return %0 : !array
}
module @add_one {
func.func @main(%arg0: tensor<2xi32>) -> tensor<2xi32> {
%0 = mhlo.constant dense<1> : tensor<2xi32>
%1 = mhlo.add %arg0, %0 : tensor<2xi32>
return %1 : tensor<2xi32>
}
}
}
)";
Serialized serialized;
{
auto context = std::make_unique<mlir::MLIRContext>();
TF_ASSERT_OK_AND_ASSIGN(
mlir::OwningOpRef<mlir::ModuleOp> module,
support::ParseMlirModuleString(kMlirModuleStr, *context));
auto program =
std::make_unique<IfrtIRProgram>(std::move(context), std::move(module));
TF_ASSERT_OK_AND_ASSIGN(serialized, Serialize(*program));
}
serialized.set_data("invalid data");
EXPECT_THAT(Deserialize<IfrtIRProgram>(serialized, nullptr),
StatusIs(Not(absl::StatusCode::kOk),
HasSubstr("Failed to parse IFRT IR module string")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/ir/ifrt_ir_program_serdes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/ir/ifrt_ir_program_serdes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1c0ad76b-c7c3-4d04-9d52-d416bdef5a84 | cpp | tensorflow/tensorflow | mutex | third_party/xla/third_party/tsl/tsl/platform/default/mutex.cc | third_party/xla/third_party/tsl/tsl/platform/mutex_test.cc | #include "tsl/platform/mutex.h"
#include <time.h>
#include <cstdint>
#include "nsync_cv.h"
#include "nsync_mu.h"
#include "nsync_mu_wait.h"
#include "nsync_time.h"
namespace tsl {
static_assert(sizeof(nsync::nsync_mu) <= sizeof(internal::MuData),
"tsl::internal::MuData needs to be bigger");
static inline nsync::nsync_mu *mu_cast(internal::MuData *mu) {
return reinterpret_cast<nsync::nsync_mu *>(mu);
}
static inline const nsync::nsync_mu *mu_cast(const internal::MuData *mu) {
return reinterpret_cast<const nsync::nsync_mu *>(mu);
}
mutex::mutex() { nsync::nsync_mu_init(mu_cast(&mu_)); }
void mutex::lock() { nsync::nsync_mu_lock(mu_cast(&mu_)); }
bool mutex::try_lock() { return nsync::nsync_mu_trylock(mu_cast(&mu_)) != 0; };
void mutex::unlock() { nsync::nsync_mu_unlock(mu_cast(&mu_)); }
void mutex::assert_held() const TF_ASSERT_EXCLUSIVE_LOCK() {
nsync::nsync_mu_assert_held(mu_cast(&mu_));
}
void mutex::lock_shared() { nsync::nsync_mu_rlock(mu_cast(&mu_)); }
bool mutex::try_lock_shared() {
return nsync::nsync_mu_rtrylock(mu_cast(&mu_)) != 0;
};
void mutex::unlock_shared() { nsync::nsync_mu_runlock(mu_cast(&mu_)); }
void mutex::assert_held_shared() const TF_ASSERT_SHARED_LOCK() {
nsync::nsync_mu_rassert_held(mu_cast(&mu_));
}
static int EvaluateCondition(const void *vcond) {
return static_cast<int>(static_cast<const Condition *>(vcond)->Eval());
}
void mutex::Await(const Condition &cond) {
nsync::nsync_mu_wait(mu_cast(&mu_), &EvaluateCondition, &cond, nullptr);
}
bool mutex::AwaitWithDeadline(const Condition &cond, uint64_t abs_deadline_ns) {
time_t seconds = abs_deadline_ns / (1000 * 1000 * 1000);
nsync::nsync_time abs_time = nsync::nsync_time_s_ns(
seconds, abs_deadline_ns - seconds * (1000 * 1000 * 1000));
return nsync::nsync_mu_wait_with_deadline(mu_cast(&mu_), &EvaluateCondition,
&cond, nullptr, abs_time,
nullptr) == 0;
}
static_assert(sizeof(nsync::nsync_cv) <= sizeof(internal::CVData),
"tsl::internal::CVData needs to be bigger");
static inline nsync::nsync_cv *cv_cast(internal::CVData *cv) {
return reinterpret_cast<nsync::nsync_cv *>(cv);
}
condition_variable::condition_variable() {
nsync::nsync_cv_init(cv_cast(&cv_));
}
void condition_variable::wait(mutex_lock &lock) {
nsync::nsync_cv_wait(cv_cast(&cv_), mu_cast(&lock.mutex()->mu_));
}
void condition_variable::notify_one() { nsync::nsync_cv_signal(cv_cast(&cv_)); }
void condition_variable::notify_all() {
nsync::nsync_cv_broadcast(cv_cast(&cv_));
}
namespace internal {
std::cv_status wait_until_system_clock(
CVData *cv_data, MuData *mu_data,
const std::chrono::system_clock::time_point timeout_time) {
int r = nsync::nsync_cv_wait_with_deadline(cv_cast(cv_data), mu_cast(mu_data),
timeout_time, nullptr);
return r ? std::cv_status::timeout : std::cv_status::no_timeout;
}
}
} | #include "tsl/platform/mutex.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tsl {
namespace {
class MutexTest : public ::testing::Test {
protected:
mutex_lock GetLock() TF_NO_THREAD_SAFETY_ANALYSIS {
return mutex_lock{mu_};
}
tf_shared_lock GetSharedLock() TF_NO_THREAD_SAFETY_ANALYSIS {
return tf_shared_lock{mu_};
}
bool test_try_lock() {
bool test = mu_.try_lock();
if (test) mu_.unlock();
return test;
}
bool test_try_lock_shared() {
bool test = mu_.try_lock_shared();
if (test) mu_.unlock_shared();
return test;
}
mutex mu_;
};
TEST_F(MutexTest, MovableMutexLockTest) {
EXPECT_TRUE(test_try_lock());
{
mutex_lock lock = GetLock();
EXPECT_FALSE(test_try_lock());
EXPECT_FALSE(test_try_lock_shared());
}
EXPECT_TRUE(test_try_lock());
}
TEST_F(MutexTest, SharedMutexLockTest) {
EXPECT_TRUE(test_try_lock());
{
tf_shared_lock lock = GetSharedLock();
EXPECT_FALSE(test_try_lock());
EXPECT_TRUE(test_try_lock_shared());
}
EXPECT_TRUE(test_try_lock());
}
TEST(ConditionVariableTest, WaitWithPredicate) {
constexpr int kNumThreads = 4;
mutex mu;
condition_variable cv;
bool ready = false;
int count = 0;
tsl::thread::ThreadPool pool(Env::Default(),
"condition_variable_test_wait_with_predicate",
kNumThreads);
for (int i = 0; i < kNumThreads; ++i) {
pool.Schedule([&mu, &cv, &ready, &count]() {
mutex_lock lock(mu);
cv.wait(lock, [&ready] { return ready; });
++count;
cv.notify_one();
});
}
{
mutex_lock lock(mu);
EXPECT_EQ(count, 0);
}
{
mutex_lock lock(mu);
ready = true;
cv.notify_all();
}
{
mutex_lock lock(mu);
cv.wait(lock, [&count, kNumThreads] { return count == kNumThreads; });
EXPECT_EQ(count, kNumThreads);
}
}
TEST(ConditionVariableTest, WaitWithTruePredicateDoesntBlock) {
mutex mu;
mutex_lock lock(mu);
condition_variable cv;
cv.wait(lock, [] { return true; });
EXPECT_TRUE(static_cast<bool>(lock));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/default/mutex.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/mutex_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
67ce4b54-0758-4b23-907b-85eb35a6a2ae | cpp | tensorflow/tensorflow | convolution_group_converter | third_party/xla/xla/service/convolution_group_converter.cc | third_party/xla/xla/service/convolution_group_converter_test.cc | #include "xla/service/convolution_group_converter.h"
#include <algorithm>
#include <memory>
#include <vector>
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
class ConvolutionVisitor : public DfsHloVisitorWithDefault {
public:
absl::Status DefaultAction(HloInstruction* ) override {
return absl::OkStatus();
}
absl::Status HandleConvolution(HloInstruction* convolution) override;
absl::Status HandleBatchGroupCount(HloInstruction* convolution);
static bool Run(HloComputation* computation,
std::function<bool(HloInstruction*)> should_expand,
std::function<bool(HloInstruction*)> is_cost_viable,
bool convert_batch_groups_only, bool filter_expansion);
const bool changed() const { return changed_; }
~ConvolutionVisitor() override = default;
private:
explicit ConvolutionVisitor(
HloComputation* computation,
std::function<bool(HloInstruction*)> should_expand,
std::function<bool(HloInstruction*)> is_cost_viable,
bool convert_batch_groups_only, bool filter_expansion)
: computation_(computation),
filter_expansion_(filter_expansion),
convert_batch_groups_only_(convert_batch_groups_only),
should_expand_(should_expand),
is_cost_viable_(is_cost_viable) {}
HloComputation* computation_;
bool changed_ = false;
bool filter_expansion_;
bool convert_batch_groups_only_;
std::function<bool(HloInstruction*)> should_expand_;
std::function<bool(HloInstruction*)> is_cost_viable_;
};
bool ConvolutionVisitor::Run(
HloComputation* computation,
std::function<bool(HloInstruction*)> should_expand,
std::function<bool(HloInstruction*)> is_cost_viable,
bool convert_batch_groups_only, bool filter_expansion) {
ConvolutionVisitor visitor(computation, should_expand, is_cost_viable,
convert_batch_groups_only, filter_expansion);
TF_CHECK_OK(computation->Accept(&visitor));
return visitor.changed_;
}
Shape ExpandedFilterShape(const Shape& shape, int64_t group_count,
int64_t input_feature_dim) {
int64_t num_dims = shape.dimensions_size();
CHECK_GE(num_dims, 2);
Shape expanded_shape = shape;
expanded_shape.set_dimensions(
input_feature_dim, shape.dimensions(input_feature_dim) * group_count);
return expanded_shape;
}
std::vector<int32_t> GetMaskIds(int64_t group_size, int64_t group_count) {
std::vector<int32_t> values;
values.reserve(group_count * group_size);
for (int i = 0; i < group_count; ++i) {
for (int j = 0; j < group_size; ++j) {
values.push_back(i);
}
}
return values;
}
HloInstruction* GetExpandedFilterMask(
const Shape& filter_shape, int64_t kernel_input_feature_dim,
int64_t kernel_output_feature_dim, int64_t group_count,
const std::function<HloInstruction*(std::unique_ptr<HloInstruction>)>&
add_instruction) {
Shape expanded_filter_shape =
ExpandedFilterShape(filter_shape, group_count, kernel_input_feature_dim);
Shape mask_shape =
ShapeUtil::MakeShape(S32, expanded_filter_shape.dimensions());
int64_t output_feature = filter_shape.dimensions(kernel_output_feature_dim);
int64_t group_size = filter_shape.dimensions(kernel_input_feature_dim);
const std::vector<int32_t> input_feature_filter_mask =
GetMaskIds(group_size, group_count);
const std::vector<int32_t> output_feature_filter_mask =
GetMaskIds(output_feature / group_count, group_count);
auto mask1 = add_instruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<int32_t>(input_feature_filter_mask)));
auto broadcasted_mask1 = add_instruction(HloInstruction::CreateBroadcast(
mask_shape, mask1, {kernel_input_feature_dim}));
auto mask2 = add_instruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<int32_t>(output_feature_filter_mask)));
auto broadcasted_mask2 = add_instruction(HloInstruction::CreateBroadcast(
mask_shape, mask2, {kernel_output_feature_dim}));
Shape predicate_shape =
ShapeUtil::MakeShape(PRED, expanded_filter_shape.dimensions());
return add_instruction(HloInstruction::CreateCompare(
predicate_shape, broadcasted_mask1, broadcasted_mask2,
ComparisonDirection::kEq));
}
absl::Status ConvolutionVisitor::HandleBatchGroupCount(
HloInstruction* convolution) {
auto dim_numbers = convolution->convolution_dimension_numbers();
auto activation = convolution->mutable_operand(0);
auto filter = convolution->mutable_operand(1);
int64_t batch_group_count = convolution->batch_group_count();
if (batch_group_count == 1 ||
(should_expand_ && !should_expand_(convolution))) {
return absl::OkStatus();
}
VLOG(2) << "Dealing with batch_group_count " << batch_group_count
<< " for convolution " << convolution->ToString() << "\n";
auto add = [&](std::unique_ptr<HloInstruction> inst) {
return computation_->AddInstruction(std::move(inst));
};
int64_t input_batch_dimension = dim_numbers.input_batch_dimension();
const int64_t input_feature_dimension = dim_numbers.input_feature_dimension();
int64_t output_batch_dimension = dim_numbers.output_batch_dimension();
int64_t output_feature_dimension = dim_numbers.output_feature_dimension();
const int64_t kernel_input_feature_dimension =
dim_numbers.kernel_input_feature_dimension();
const int64_t kernel_output_feature_dimension =
dim_numbers.kernel_output_feature_dimension();
const int64_t input_batch =
activation->shape().dimensions(input_batch_dimension);
const int64_t output_feature =
filter->shape().dimensions(kernel_output_feature_dimension);
if (output_feature != batch_group_count || input_batch != batch_group_count) {
std::vector<int64_t> input_sizes(activation->shape().dimensions().begin(),
activation->shape().dimensions().end());
input_sizes[input_batch_dimension] /= batch_group_count;
input_sizes.insert(input_sizes.begin() + input_batch_dimension,
batch_group_count);
activation = MakeReshapeHlo(input_sizes, activation).value();
for (auto& d : *dim_numbers.mutable_input_spatial_dimensions()) {
if (d > input_batch_dimension) {
++d;
}
}
dim_numbers.add_input_spatial_dimensions(input_batch_dimension);
dim_numbers.set_input_batch_dimension(input_batch_dimension + 1);
if (input_feature_dimension > input_batch_dimension) {
dim_numbers.set_input_feature_dimension(input_feature_dimension + 1);
}
std::vector<int64_t> kernel_sizes(filter->shape().dimensions().begin(),
filter->shape().dimensions().end());
kernel_sizes[kernel_output_feature_dimension] /= batch_group_count;
kernel_sizes.insert(kernel_sizes.begin() + kernel_output_feature_dimension,
batch_group_count);
filter = MakeReshapeHlo(kernel_sizes, filter).value();
for (auto& d : *dim_numbers.mutable_kernel_spatial_dimensions()) {
if (d > kernel_output_feature_dimension) {
++d;
}
}
dim_numbers.add_kernel_spatial_dimensions(kernel_output_feature_dimension);
dim_numbers.set_kernel_output_feature_dimension(
kernel_output_feature_dimension + 1);
if (kernel_input_feature_dimension > kernel_output_feature_dimension) {
dim_numbers.set_kernel_input_feature_dimension(
kernel_input_feature_dimension + 1);
}
for (auto& d : *dim_numbers.mutable_output_spatial_dimensions()) {
if (d > output_feature_dimension) {
++d;
}
}
dim_numbers.add_output_spatial_dimensions(output_feature_dimension);
dim_numbers.set_output_feature_dimension(output_feature_dimension + 1);
if (output_batch_dimension > output_feature_dimension) {
dim_numbers.set_output_batch_dimension(output_batch_dimension + 1);
}
Window window = convolution->window();
auto window_dim = window.add_dimensions();
window_dim->set_base_dilation(batch_group_count);
window_dim->set_size(batch_group_count);
window_dim->set_stride(batch_group_count - 1);
window_dim->set_padding_low(0);
window_dim->set_padding_high(0);
window_dim->set_window_reversal(false);
window_dim->set_window_dilation(1);
HloInstruction* new_convolution =
MakeConvolveHlo(
activation, filter, convolution->feature_group_count(),
1, window, dim_numbers,
convolution->precision_config(),
convolution->shape().element_type())
.value();
convolution->SetupDerivedInstruction(new_convolution);
TF_CHECK_OK(computation_->ReplaceInstruction(
convolution,
MakeReshapeHlo(convolution->shape(), new_convolution).value()));
changed_ = true;
return absl::OkStatus();
}
VLOG(2) << "is_cost_viable_ " << is_cost_viable_(convolution);
const bool cost_too_high = !is_cost_viable_(convolution);
if (cost_too_high || filter_expansion_) {
HloInstruction* filter_mask =
GetExpandedFilterMask(convolution->shape(), output_batch_dimension,
output_feature_dimension, batch_group_count, add);
auto expanded_filter_shape = ExpandedFilterShape(
convolution->shape(), batch_group_count, output_batch_dimension);
VLOG(2) << "output_batch_dimension " << output_batch_dimension;
VLOG(2) << "New output shape of convolution "
<< expanded_filter_shape.ToString();
auto new_convolution = add(HloInstruction::CreateConvolve(
expanded_filter_shape, activation, filter,
1, 1,
convolution->window(), dim_numbers, convolution->precision_config()));
VLOG(2) << "Expanded convolution " << new_convolution->ToString();
auto zero = add(HloInstruction::CreateConstant(
LiteralUtil::Zero(expanded_filter_shape.element_type())));
auto zero_filter =
add(HloInstruction::CreateBroadcast(expanded_filter_shape, zero, {}));
auto new_filter = add(HloInstruction::CreateTernary(
expanded_filter_shape, HloOpcode::kSelect, filter_mask, new_convolution,
zero_filter));
PrimitiveType reduce_type = new_filter->shape().element_type();
auto reduce_window_shape = new_convolution->shape();
reduce_window_shape.set_dimensions(output_batch_dimension, 1);
if (primitive_util::BitWidth(reduce_type) < primitive_util::BitWidth(F32)) {
reduce_type = F32;
reduce_window_shape.set_element_type(F32);
Shape convert_shape = new_filter->shape();
convert_shape.set_element_type(F32);
new_filter =
add(HloInstruction::CreateConvert(convert_shape, new_filter));
}
auto zero_literal = LiteralUtil::Zero(reduce_type);
auto zero_scalar =
add(HloInstruction::CreateConstant(std::move(zero_literal)));
auto reduce_function = [&]() -> HloComputation* {
HloComputation::Builder b("add_computation");
Shape shape = ShapeUtil::MakeShape(reduce_type, {});
auto lhs =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "lhs"));
auto rhs =
b.AddInstruction(HloInstruction::CreateParameter(1, shape, "rhs"));
auto scalar_op = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, lhs, rhs));
return computation_->parent()->AddEmbeddedComputation(b.Build(scalar_op));
};
Window window;
for (int64_t i = 0; i < new_convolution->shape().dimensions_size(); ++i) {
auto* dim = window.add_dimensions();
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
if (i == output_batch_dimension) {
dim->set_stride(batch_group_count);
dim->set_size(batch_group_count);
} else {
dim->set_stride(1);
dim->set_size(1);
}
}
auto reduce_window = add(HloInstruction::CreateReduceWindow(
reduce_window_shape, new_filter, zero_scalar, window,
reduce_function()));
Shape convert_back_shape = reduce_window->shape();
convert_back_shape.set_element_type(activation->shape().element_type());
auto reduce_window_converted =
HloInstruction::CreateConvert(convert_back_shape, reduce_window);
TF_CHECK_OK(computation_->ReplaceWithNewInstruction(
convolution, std::move(reduce_window_converted)));
changed_ = true;
}
return absl::OkStatus();
}
absl::Status ConvolutionVisitor::HandleConvolution(
HloInstruction* convolution) {
if (convert_batch_groups_only_) {
return HandleBatchGroupCount(convolution);
}
auto add = [&](std::unique_ptr<HloInstruction> inst) {
return computation_->AddInstruction(std::move(inst));
};
int64_t group_count = convolution->feature_group_count();
if (group_count == 1 || (should_expand_ && !should_expand_(convolution))) {
return absl::OkStatus();
}
changed_ = true;
ConvolutionDimensionNumbers dim_numbers =
convolution->convolution_dimension_numbers();
auto filter = convolution->mutable_operand(1);
int64_t kernel_input_feature_dim =
dim_numbers.kernel_input_feature_dimension();
int64_t group_size = filter->shape().dimensions(kernel_input_feature_dim);
int64_t kernel_output_feature_dim =
dim_numbers.kernel_output_feature_dimension();
auto expanded_filter_shape = ExpandedFilterShape(filter->shape(), group_count,
kernel_input_feature_dim);
HloInstruction* filter_mask =
GetExpandedFilterMask(filter->shape(), kernel_input_feature_dim,
kernel_output_feature_dim, group_count, add);
HloInstruction* expanded_filter;
if (group_size == 1) {
bool depthwise_separable =
(group_count == filter->shape().dimensions(kernel_output_feature_dim));
if (!filter_expansion_ && depthwise_separable) {
changed_ = false;
return absl::OkStatus();
}
VLOG(2) << "is_cost_viable_ " << is_cost_viable_(convolution);
if (!is_cost_viable_(convolution) || filter_expansion_) {
Shape reshaped_filter_shape =
ShapeUtil::DeleteDimension(kernel_input_feature_dim, filter->shape());
auto reshaped_filter =
add(HloInstruction::CreateReshape(reshaped_filter_shape, filter));
std::vector<int64_t> broadcast_dims;
for (int64_t i = 0; i < filter->shape().dimensions_size(); ++i) {
if (i == kernel_input_feature_dim) {
continue;
}
broadcast_dims.push_back(i);
}
expanded_filter = add(HloInstruction::CreateBroadcast(
expanded_filter_shape, reshaped_filter, broadcast_dims));
auto zero = add(HloInstruction::CreateConstant(
LiteralUtil::Zero(expanded_filter_shape.element_type())));
auto zero_filter =
add(HloInstruction::CreateBroadcast(expanded_filter_shape, zero, {}));
auto new_filter = add(HloInstruction::CreateTernary(
expanded_filter_shape, HloOpcode::kSelect, filter_mask,
expanded_filter, zero_filter));
auto new_convolution = HloInstruction::CreateConvolve(
convolution->shape(), convolution->mutable_operand(0), new_filter,
1, 1,
convolution->window(), dim_numbers, convolution->precision_config());
return computation_->ReplaceWithNewInstruction(
convolution, std::move(new_convolution));
}
std::vector<int64_t> new_filter_dimension;
new_filter_dimension.reserve(filter->shape().rank() + 1);
const int64_t depthwise_multiplier =
filter->shape().dimensions(kernel_output_feature_dim) / group_count;
for (int64_t i = 0; i < filter->shape().rank(); ++i) {
if (i == kernel_output_feature_dim) {
new_filter_dimension.push_back(group_count);
new_filter_dimension.push_back(depthwise_multiplier);
} else {
new_filter_dimension.push_back(filter->shape().dimensions(i));
}
}
if (kernel_input_feature_dim > kernel_output_feature_dim) {
dim_numbers.set_kernel_input_feature_dimension(kernel_input_feature_dim +
1);
}
for (auto& dim : *dim_numbers.mutable_kernel_spatial_dimensions()) {
if (dim > kernel_output_feature_dim) {
++dim;
}
}
dim_numbers.add_kernel_spatial_dimensions(kernel_output_feature_dim + 1);
HloInstruction* new_filter =
computation_->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(filter->shape().element_type(),
new_filter_dimension),
filter));
auto new_activation_shape = convolution->operand(0)->shape();
dim_numbers.add_input_spatial_dimensions(new_activation_shape.rank());
ShapeUtil::AppendMajorDimension(1, &new_activation_shape);
HloInstruction* new_activation =
computation_->AddInstruction(HloInstruction::CreateReshape(
new_activation_shape, convolution->mutable_operand(0)));
auto new_window = convolution->window();
auto new_dim = new_window.add_dimensions();
new_dim->set_size(depthwise_multiplier);
new_dim->set_window_reversal(true);
new_dim->set_padding_low(depthwise_multiplier - 1);
new_dim->set_padding_high(depthwise_multiplier - 1);
new_dim->set_stride(1);
new_dim->set_window_dilation(1);
new_dim->set_base_dilation(1);
std::vector<int64_t> new_output_dimension;
new_output_dimension.reserve(convolution->shape().rank() + 1);
for (int64_t i = 0; i < convolution->shape().rank(); ++i) {
if (i == dim_numbers.output_feature_dimension()) {
new_output_dimension.push_back(group_count);
new_output_dimension.push_back(depthwise_multiplier);
} else {
new_output_dimension.push_back(convolution->shape().dimensions(i));
}
}
if (dim_numbers.output_batch_dimension() >
dim_numbers.output_feature_dimension()) {
dim_numbers.set_output_batch_dimension(
dim_numbers.output_batch_dimension() + 1);
}
for (auto& dim : *dim_numbers.mutable_output_spatial_dimensions()) {
if (dim > dim_numbers.output_feature_dimension()) {
++dim;
}
}
dim_numbers.add_output_spatial_dimensions(
dim_numbers.output_feature_dimension() + 1);
auto new_convolution_output_shape = ShapeUtil::MakeShape(
convolution->shape().element_type(), new_output_dimension);
HloInstruction* new_convolution =
computation_->AddInstruction(HloInstruction::CreateConvolve(
new_convolution_output_shape, new_activation, new_filter,
group_count, 1,
new_window, dim_numbers, convolution->precision_config()));
return computation_->ReplaceWithNewInstruction(
convolution,
HloInstruction::CreateReshape(convolution->shape(), new_convolution));
}
HloInstruction* activation = convolution->mutable_operand(0);
std::vector<int64_t> input_sizes(activation->shape().dimensions().begin(),
activation->shape().dimensions().end());
const int64_t input_feature_dimension = dim_numbers.input_feature_dimension();
input_sizes[input_feature_dimension] /= group_count;
input_sizes.insert(input_sizes.begin() + input_feature_dimension,
group_count);
activation = MakeReshapeHlo(input_sizes, activation).value();
for (auto& d : *dim_numbers.mutable_input_spatial_dimensions()) {
if (d > input_feature_dimension) {
++d;
}
}
dim_numbers.add_input_spatial_dimensions(input_feature_dimension);
dim_numbers.set_input_feature_dimension(input_feature_dimension + 1);
if (dim_numbers.input_batch_dimension() > input_feature_dimension) {
dim_numbers.set_input_batch_dimension(dim_numbers.input_batch_dimension() +
1);
}
std::vector<int64_t> kernel_sizes(filter->shape().dimensions().begin(),
filter->shape().dimensions().end());
const int64_t kernel_output_feature_dimension =
dim_numbers.kernel_output_feature_dimension();
kernel_sizes[kernel_output_feature_dimension] /= group_count;
kernel_sizes.insert(kernel_sizes.begin() + kernel_output_feature_dimension,
group_count);
filter = MakeReshapeHlo(kernel_sizes, filter).value();
for (auto& d : *dim_numbers.mutable_kernel_spatial_dimensions()) {
if (d > kernel_output_feature_dimension) {
++d;
}
}
dim_numbers.add_kernel_spatial_dimensions(kernel_output_feature_dimension);
dim_numbers.set_kernel_output_feature_dimension(
kernel_output_feature_dimension + 1);
if (dim_numbers.kernel_input_feature_dimension() >
kernel_output_feature_dimension) {
dim_numbers.set_kernel_input_feature_dimension(
dim_numbers.kernel_input_feature_dimension() + 1);
}
const int64_t output_feature_dimension =
dim_numbers.output_feature_dimension();
for (auto& d : *dim_numbers.mutable_output_spatial_dimensions()) {
if (d > output_feature_dimension) {
++d;
}
}
dim_numbers.add_output_spatial_dimensions(output_feature_dimension);
dim_numbers.set_output_feature_dimension(output_feature_dimension + 1);
if (dim_numbers.output_batch_dimension() > output_feature_dimension) {
dim_numbers.set_output_batch_dimension(
dim_numbers.output_batch_dimension() + 1);
}
Window window = convolution->window();
auto window_dim = window.add_dimensions();
window_dim->set_base_dilation(group_count);
window_dim->set_size(group_count);
window_dim->set_stride(group_count - 1);
window_dim->set_padding_low(0);
window_dim->set_padding_high(0);
window_dim->set_window_reversal(false);
window_dim->set_window_dilation(1);
HloInstruction* new_convolution =
MakeConvolveHlo(
activation, filter, 1,
1, window, dim_numbers,
convolution->precision_config(),
convolution->shape().element_type())
.value();
convolution->SetupDerivedInstruction(new_convolution);
changed_ = true;
return computation_->ReplaceInstruction(
convolution,
MakeReshapeHlo(convolution->shape(), new_convolution).value());
}
}
absl::StatusOr<bool> ConvolutionGroupConverter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, "ConvolutionGroupConverter::Run(), before:\n" + module->ToString());
bool changed = false;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
if (ConvolutionVisitor::Run(comp, should_expand_, is_cost_viable_,
convert_batch_groups_only_,
filter_expansion_)) {
changed = true;
}
}
XLA_VLOG_LINES(
2, "ConvolutionGroupConverter::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/convolution_group_converter.h"
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
namespace xla {
namespace {
using ConvolutionGroupConverterTest = HloTestBase;
namespace op = testing::opcode_matchers;
TEST_F(ConvolutionGroupConverterTest,
ConvertFeatureGroupCountEqualToInputFeatureDim) {
std::string hlo_string = R"(HloModule Convolve1D1Window_0_module
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,2], filter: f32[1,1,2]) -> f32[1,2,2] {
%input = f32[1,2,2]{2,1,0} parameter(0)
%copy = f32[1,2,2]{2,0,1} copy(f32[1,2,2]{2,1,0} %input)
%filter = f32[1,1,2]{2,1,0} parameter(1)
ROOT %convolution = f32[1,2,2]{2,0,1} convolution(f32[1,2,2]{2,0,1} %copy, f32[1,1,2]{2,1,0} %filter), window={size=1}, dim_labels=b0f_0io->b0f, feature_group_count=2
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
auto should_expand = [](HloInstruction* conv) { return true; };
auto cost_model = [](HloInstruction* conv) { return true; };
ConvolutionGroupConverter converter(should_expand, cost_model,
false);
ASSERT_TRUE(converter.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->feature_group_count(), 1);
EXPECT_THAT(root->operand(1),
op::Select(op::Eq(op::Broadcast(op::Constant()),
op::Broadcast(op::Constant())),
op::Broadcast(op::Reshape(op::Parameter())),
op::Broadcast(op::Constant())));
}
TEST_F(ConvolutionGroupConverterTest,
ConvertFeatureGroupCountDivisorOfInputFeatureDim) {
std::string hlo_string = R"(HloModule Convolve1D1Window_0_module
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,4], filter: f32[1,2,2]) -> f32[1,2,2] {
%input = f32[1,2,4]{2,1,0} parameter(0)
%copy = f32[1,2,4]{2,0,1} copy(f32[1,2,4]{2,1,0} %input)
%filter = f32[1,2,2]{2,1,0} parameter(1)
ROOT %convolution = f32[1,2,2]{2,0,1} convolution(f32[1,2,4]{2,0,1} %copy, f32[1,2,2]{2,1,0} %filter), window={size=1}, dim_labels=b0f_0io->b0f, feature_group_count=2
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
auto should_expand = [](HloInstruction* conv) { return true; };
auto cost_model = [](HloInstruction* conv) { return true; };
ConvolutionGroupConverter converter(should_expand,
cost_model,
false);
ASSERT_TRUE(converter.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReshape);
EXPECT_EQ(root->operand(0)->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->operand(0)->feature_group_count(), 1);
EXPECT_EQ(root->operand(0)->shape().rank(), 4);
}
TEST_F(ConvolutionGroupConverterTest,
ConvertBatchGroupCountEqualToInputBatchDim) {
std::string hlo_string = R"(HloModule Convolve1D1Window_0_module
ENTRY %Convolve1D1Window_0.v3 (input: f32[16,19,19,512]{3,2,1,0}, filter: f32[16,19,19,512]{3,2,1,0}) -> f32[3,3,512,1]{3,2,1,0} {
%input = f32[16,19,19,512]{3,2,1,0} parameter(0)
%filter = f32[16,19,19,512]{3,2,1,0} parameter(1)
ROOT %convolution = f32[3,3,512,1]{3,2,1,0} convolution(f32[16,19,19,512]{3,2,1,0} %input, f32[16,19,19,512]{3,2,1,0} %filter), window={size=19x19 pad=1_1x1_1}, dim_labels=f01b_i01o->01fb, batch_group_count=512
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
auto should_expand = [](HloInstruction* conv) { return true; };
auto cost_model = [](HloInstruction* conv) { return false; };
ConvolutionGroupConverter converter(should_expand,
cost_model,
true);
ASSERT_TRUE(converter.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvert);
EXPECT_EQ(root->operand(0)->opcode(), HloOpcode::kReduceWindow);
}
TEST_F(ConvolutionGroupConverterTest,
ConvertBatchGroupCountNotEqualToInputBatchDim) {
std::string hlo_string = R"(HloModule m
ENTRY main {
%input = f32[1,1,1,4] parameter(0)
%filter = f32[1,1,1,2] parameter(1)
ROOT %convolution = f32[1,1,2,2] convolution(%input,%filter),
window={size=1x1}, dim_labels=f01b_i01o->01fb, batch_group_count=2
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
auto should_expand = [](HloInstruction* conv) { return true; };
auto cost_model = [](HloInstruction* conv) { return false; };
ConvolutionGroupConverter converter(should_expand,
cost_model,
true);
ASSERT_TRUE(converter.Run(module.get()).value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convolution_group_converter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convolution_group_converter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
44971ec1-3e87-4c7a-9137-ad2e8869f3ef | cpp | google/quiche | hybrid_slow_start | quiche/quic/core/congestion_control/hybrid_slow_start.cc | quiche/quic/core/congestion_control/hybrid_slow_start_test.cc | #include "quiche/quic/core/congestion_control/hybrid_slow_start.h"
#include <algorithm>
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
const int64_t kHybridStartLowWindow = 16;
const uint32_t kHybridStartMinSamples = 8;
const int kHybridStartDelayFactorExp = 3;
const int64_t kHybridStartDelayMinThresholdUs = 4000;
const int64_t kHybridStartDelayMaxThresholdUs = 16000;
HybridSlowStart::HybridSlowStart()
: started_(false),
hystart_found_(NOT_FOUND),
rtt_sample_count_(0),
current_min_rtt_(QuicTime::Delta::Zero()) {}
void HybridSlowStart::OnPacketAcked(QuicPacketNumber acked_packet_number) {
if (IsEndOfRound(acked_packet_number)) {
started_ = false;
}
}
void HybridSlowStart::OnPacketSent(QuicPacketNumber packet_number) {
last_sent_packet_number_ = packet_number;
}
void HybridSlowStart::Restart() {
started_ = false;
hystart_found_ = NOT_FOUND;
}
void HybridSlowStart::StartReceiveRound(QuicPacketNumber last_sent) {
QUIC_DVLOG(1) << "Reset hybrid slow start @" << last_sent;
end_packet_number_ = last_sent;
current_min_rtt_ = QuicTime::Delta::Zero();
rtt_sample_count_ = 0;
started_ = true;
}
bool HybridSlowStart::IsEndOfRound(QuicPacketNumber ack) const {
return !end_packet_number_.IsInitialized() || end_packet_number_ <= ack;
}
bool HybridSlowStart::ShouldExitSlowStart(QuicTime::Delta latest_rtt,
QuicTime::Delta min_rtt,
QuicPacketCount congestion_window) {
if (!started_) {
StartReceiveRound(last_sent_packet_number_);
}
if (hystart_found_ != NOT_FOUND) {
return true;
}
rtt_sample_count_++;
if (rtt_sample_count_ <= kHybridStartMinSamples) {
if (current_min_rtt_.IsZero() || current_min_rtt_ > latest_rtt) {
current_min_rtt_ = latest_rtt;
}
}
if (rtt_sample_count_ == kHybridStartMinSamples) {
int64_t min_rtt_increase_threshold_us =
min_rtt.ToMicroseconds() >> kHybridStartDelayFactorExp;
min_rtt_increase_threshold_us = std::min(min_rtt_increase_threshold_us,
kHybridStartDelayMaxThresholdUs);
QuicTime::Delta min_rtt_increase_threshold =
QuicTime::Delta::FromMicroseconds(std::max(
min_rtt_increase_threshold_us, kHybridStartDelayMinThresholdUs));
if (current_min_rtt_ > min_rtt + min_rtt_increase_threshold) {
hystart_found_ = DELAY;
}
}
return congestion_window >= kHybridStartLowWindow &&
hystart_found_ != NOT_FOUND;
}
} | #include "quiche/quic/core/congestion_control/hybrid_slow_start.h"
#include <memory>
#include <utility>
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
class HybridSlowStartTest : public QuicTest {
protected:
HybridSlowStartTest()
: one_ms_(QuicTime::Delta::FromMilliseconds(1)),
rtt_(QuicTime::Delta::FromMilliseconds(60)) {}
void SetUp() override { slow_start_ = std::make_unique<HybridSlowStart>(); }
const QuicTime::Delta one_ms_;
const QuicTime::Delta rtt_;
std::unique_ptr<HybridSlowStart> slow_start_;
};
TEST_F(HybridSlowStartTest, Simple) {
QuicPacketNumber packet_number(1);
QuicPacketNumber end_packet_number(3);
slow_start_->StartReceiveRound(end_packet_number);
EXPECT_FALSE(slow_start_->IsEndOfRound(packet_number++));
EXPECT_FALSE(slow_start_->IsEndOfRound(packet_number));
EXPECT_FALSE(slow_start_->IsEndOfRound(packet_number++));
EXPECT_TRUE(slow_start_->IsEndOfRound(packet_number++));
EXPECT_TRUE(slow_start_->IsEndOfRound(packet_number++));
end_packet_number = QuicPacketNumber(20);
slow_start_->StartReceiveRound(end_packet_number);
while (packet_number < end_packet_number) {
EXPECT_FALSE(slow_start_->IsEndOfRound(packet_number++));
}
EXPECT_TRUE(slow_start_->IsEndOfRound(packet_number++));
}
TEST_F(HybridSlowStartTest, Delay) {
const int kHybridStartMinSamples = 8;
QuicPacketNumber end_packet_number(1);
slow_start_->StartReceiveRound(end_packet_number++);
for (int n = 0; n < kHybridStartMinSamples; ++n) {
EXPECT_FALSE(slow_start_->ShouldExitSlowStart(
rtt_ + QuicTime::Delta::FromMilliseconds(n), rtt_, 100));
}
slow_start_->StartReceiveRound(end_packet_number++);
for (int n = 1; n < kHybridStartMinSamples; ++n) {
EXPECT_FALSE(slow_start_->ShouldExitSlowStart(
rtt_ + QuicTime::Delta::FromMilliseconds(n + 10), rtt_, 100));
}
EXPECT_TRUE(slow_start_->ShouldExitSlowStart(
rtt_ + QuicTime::Delta::FromMilliseconds(10), rtt_, 100));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/congestion_control/hybrid_slow_start.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/congestion_control/hybrid_slow_start_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
965ee7f5-f994-44d4-af96-e70540dd901a | cpp | google/cel-cpp | dyn_type | common/types/dyn_type.h | common/types/dyn_type_test.cc | #ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_DYN_TYPE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_DYN_TYPE_H_
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "common/type_kind.h"
namespace cel {
class Type;
class TypeParameters;
class DynType final {
public:
static constexpr TypeKind kKind = TypeKind::kDyn;
static constexpr absl::string_view kName = "dyn";
DynType() = default;
DynType(const DynType&) = default;
DynType(DynType&&) = default;
DynType& operator=(const DynType&) = default;
DynType& operator=(DynType&&) = default;
static TypeKind kind() { return kKind; }
static absl::string_view name() { return kName; }
static TypeParameters GetParameters();
static std::string DebugString() { return std::string(name()); }
constexpr void swap(DynType&) noexcept {}
};
inline constexpr void swap(DynType& lhs, DynType& rhs) noexcept {
lhs.swap(rhs);
}
inline constexpr bool operator==(DynType, DynType) { return true; }
inline constexpr bool operator!=(DynType lhs, DynType rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, DynType) {
return std::move(state);
}
inline std::ostream& operator<<(std::ostream& out, const DynType& type) {
return out << type.DebugString();
}
}
#endif | #include <sstream>
#include "absl/hash/hash.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
TEST(DynType, Kind) {
EXPECT_EQ(DynType().kind(), DynType::kKind);
EXPECT_EQ(Type(DynType()).kind(), DynType::kKind);
}
TEST(DynType, Name) {
EXPECT_EQ(DynType().name(), DynType::kName);
EXPECT_EQ(Type(DynType()).name(), DynType::kName);
}
TEST(DynType, DebugString) {
{
std::ostringstream out;
out << DynType();
EXPECT_EQ(out.str(), DynType::kName);
}
{
std::ostringstream out;
out << Type(DynType());
EXPECT_EQ(out.str(), DynType::kName);
}
}
TEST(DynType, Hash) {
EXPECT_EQ(absl::HashOf(DynType()), absl::HashOf(DynType()));
}
TEST(DynType, Equal) {
EXPECT_EQ(DynType(), DynType());
EXPECT_EQ(Type(DynType()), DynType());
EXPECT_EQ(DynType(), Type(DynType()));
EXPECT_EQ(Type(DynType()), Type(DynType()));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/dyn_type.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/dyn_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
112ffa20-b592-45ff-bca5-897f16c2a502 | cpp | tensorflow/tensorflow | embedding_lookup | tensorflow/lite/kernels/embedding_lookup.cc | tensorflow/lite/kernels/embedding_lookup_test.cc | #include <stdint.h>
#include <cstring>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace embedding_lookup {
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* lookup;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &lookup));
TF_LITE_ENSURE_EQ(context, NumDimensions(lookup), 1);
TF_LITE_ENSURE_EQ(context, lookup->type, kTfLiteInt32);
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &value));
TF_LITE_ENSURE(context, NumDimensions(value) >= 2);
if (value->quantization.type == kTfLiteAffineQuantization) {
const auto qparams = static_cast<const TfLiteAffineQuantization*>(
value->quantization.params);
TF_LITE_ENSURE(context, qparams->scale != nullptr);
TF_LITE_ENSURE(context, qparams->zero_point != nullptr);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
if ((value->type == kTfLiteUInt8 || value->type == kTfLiteInt8 ||
value->type == kTfLiteInt4) &&
(output->type == kTfLiteFloat32)) {
TF_LITE_ENSURE(context, qparams->zero_point->data[0] == 0);
}
if (qparams->scale->size > 1 || qparams->zero_point->size > 1) {
TF_LITE_ENSURE(context, value->type == kTfLiteUInt8 ||
value->type == kTfLiteInt8 ||
value->type == kTfLiteInt4);
TF_LITE_ENSURE(context, output->type == kTfLiteFloat32);
TF_LITE_ENSURE(context, qparams->quantized_dimension == 0);
const int row_size = SizeOfDimension(value, 0);
TF_LITE_ENSURE(context, qparams->scale->size == row_size);
TF_LITE_ENSURE(context, qparams->zero_point->size == row_size);
}
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TfLiteIntArray* output_size = TfLiteIntArrayCreate(NumDimensions(value));
output_size->data[0] = SizeOfDimension(lookup, 0);
output_size->data[1] = SizeOfDimension(value, 1);
for (int i = 2; i < NumDimensions(value); i++) {
output_size->data[i] = SizeOfDimension(value, i);
}
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus EvalSimple(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* lookup, const TfLiteTensor* value,
TfLiteTensor* output) {
const int row_size = SizeOfDimension(value, 0);
if (row_size == 0) {
return kTfLiteOk;
}
const int64_t row_bytes = value->bytes / row_size;
char* output_raw = GetTensorData<char>(output);
const char* value_raw = GetTensorData<char>(value);
const int32_t* lookup_data = GetTensorData<int32_t>(lookup);
for (int i = 0; i < SizeOfDimension(lookup, 0); i++) {
int64_t idx = lookup_data[i];
if (idx >= row_size || idx < 0) {
TF_LITE_KERNEL_LOG(context,
"Embedding Lookup: index out of bounds. "
"Got %d, and bounds are [0, %d]",
idx, row_size - 1);
return kTfLiteError;
} else {
std::memcpy(output_raw + i * row_bytes, value_raw + idx * row_bytes,
row_bytes);
}
}
return kTfLiteOk;
}
TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* lookup, const TfLiteTensor* value,
TfLiteTensor* output) {
const int row_size = SizeOfDimension(value, 0);
int col_size = 1;
for (int i = 1; i < NumDimensions(value); i++) {
col_size *= SizeOfDimension(value, i);
}
float* output_ptr = GetTensorData<float>(output);
const int8_t* value_ptr = GetTensorData<int8_t>(value);
const int32_t* lookup_data = GetTensorData<int32_t>(lookup);
for (int i = 0; i < SizeOfDimension(lookup, 0); i++) {
int idx = lookup_data[i];
if (idx >= row_size || idx < 0) {
TF_LITE_KERNEL_LOG(context,
"Embedding Lookup: index out of bounds. "
"Got %d, and bounds are [0, %d]",
idx, row_size - 1);
return kTfLiteError;
} else {
double scaling_factor = value->params.scale;
if (value->quantization.type == kTfLiteAffineQuantization) {
const auto qparams = static_cast<const TfLiteAffineQuantization*>(
value->quantization.params);
if (qparams->scale->size > 1) {
scaling_factor = qparams->scale->data[idx];
}
}
if (value->type == kTfLiteInt4) {
for (int j = 0; j < col_size; j++) {
int i8_idx = j + idx * col_size;
int i4_idx = i8_idx / 2;
bool even = i8_idx % 2 == 0;
int8_t i4_val = value_ptr[i4_idx];
int8_t i8_val =
even ? static_cast<int8_t>(i4_val << 4) >> 4 : i4_val >> 4;
output_ptr[j + i * col_size] = i8_val * scaling_factor;
}
} else {
for (int j = 0; j < col_size; j++) {
output_ptr[j + i * col_size] =
value_ptr[j + idx * col_size] * scaling_factor;
}
}
}
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* lookup;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &lookup));
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &value));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
switch (value->type) {
case kTfLiteFloat32:
return EvalSimple(context, node, lookup, value, output);
case kTfLiteInt4:
return EvalHybrid(context, node, lookup, value, output);
case kTfLiteUInt8:
case kTfLiteInt8:
if (output->type == kTfLiteFloat32) {
return EvalHybrid(context, node, lookup, value, output);
} else {
return EvalSimple(context, node, lookup, value, output);
}
default:
TF_LITE_KERNEL_LOG(context, "Type not currently supported.");
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_EMBEDDING_LOOKUP() {
static TfLiteRegistration r = {nullptr, nullptr, embedding_lookup::Prepare,
embedding_lookup::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <functional>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
float kTestTolerance = 7.41e-03;
using ::testing::ElementsAreArray;
class BaseEmbeddingLookupOpModel : public SingleOpModel {
public:
BaseEmbeddingLookupOpModel(
std::initializer_list<int> index_shape,
std::initializer_list<int> weight_shape,
TensorType weight_type = TensorType_FLOAT32,
TensorType output_type = TensorType_FLOAT32,
const std::vector<float>& per_channel_quantization_scales = {}) {
input_ = AddInput(TensorType_INT32);
if (per_channel_quantization_scales.empty()) {
weight_ = AddInput(weight_type);
} else {
std::vector<int64_t> per_channel_quantization_offsets(
per_channel_quantization_scales.size(), 0);
weight_ = AddInput({weight_type, weight_shape, 0, 0, 0, 0, true,
per_channel_quantization_scales,
per_channel_quantization_offsets, 0});
}
output_ = AddOutput(output_type);
SetBuiltinOp(BuiltinOperator_EMBEDDING_LOOKUP, BuiltinOptions_NONE, 0);
BuildInterpreter({index_shape, weight_shape});
}
void SetInput(std::initializer_list<int> data) {
PopulateTensor(input_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
protected:
int input_;
int weight_;
int output_;
};
class EmbeddingLookupOpModel : public BaseEmbeddingLookupOpModel {
public:
using BaseEmbeddingLookupOpModel::BaseEmbeddingLookupOpModel;
template <typename T>
void Set3DWeightMatrix(const std::function<T(int, int, int)>& function) {
TfLiteTensor* tensor = interpreter_->tensor(weight_);
int rows = tensor->dims->data[0];
int columns = tensor->dims->data[1];
int features = tensor->dims->data[2];
T* data = GetTensorData<T>(tensor);
for (int i = 0; i < rows; i++) {
for (int j = 0; j < columns; j++) {
for (int k = 0; k < features; k++) {
data[(i * columns + j) * features + k] = function(i, j, k);
}
}
}
}
template <typename T>
void Set2DWeightMatrix(const std::function<T(int, int)>& function) {
TfLiteTensor* tensor = interpreter_->tensor(weight_);
int64_t rows = tensor->dims->data[0];
int64_t columns = tensor->dims->data[1];
T* data = GetTensorData<T>(tensor);
for (int64_t i = 0; i < rows; i++) {
for (int64_t j = 0; j < columns; j++) {
data[i * columns + j] = function(i, j);
}
}
}
};
class HybridEmbeddingLookupOpModel : public BaseEmbeddingLookupOpModel {
public:
HybridEmbeddingLookupOpModel(std::initializer_list<int> index_shape,
std::initializer_list<int> weight_shape,
TensorType type)
: BaseEmbeddingLookupOpModel(index_shape, weight_shape, type) {}
void SetWeight(std::initializer_list<float> data) {
SymmetricQuantizeAndPopulate(weight_, data);
}
void SetSignedWeight(std::initializer_list<float> data) {
SignedSymmetricQuantizeAndPopulate(weight_, data);
}
};
class PerAxisHybridEmbeddingLookupOpModel : public BaseEmbeddingLookupOpModel {
public:
PerAxisHybridEmbeddingLookupOpModel(
std::initializer_list<int> index_shape,
std::initializer_list<int> weight_shape,
const std::vector<float>& per_channel_quantization_scales,
TensorType type)
: BaseEmbeddingLookupOpModel(index_shape, weight_shape, type,
TensorType_FLOAT32,
per_channel_quantization_scales) {}
void SetSignedWeight(std::initializer_list<float> data) {
PerChannelSymmetricQuantizeAndPopulate(weight_, data);
}
};
TEST(EmbeddingLookupOpTest, SimpleTest) {
EmbeddingLookupOpModel m({3}, {3, 2, 4});
m.SetInput({1, 0, 2});
m.Set3DWeightMatrix<float>(
[](int i, int j, int k) -> float { return i + j / 10.0f + k / 100.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({
1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
})));
}
#if !defined(MEMORY_SANITIZER) && !defined(GOOGLE_UNSUPPORTED_OS_LOONIX) && \
defined(__LP64__)
TEST(EmbeddingLookupOpTest, LargeTableTest) {
EmbeddingLookupOpModel m({1}, {256000, 9216});
m.SetInput({235248});
m.Set2DWeightMatrix<float>(
[](int i, int j) -> float { return j + i / 100.; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<float> exp(9216);
for (int s = 0; s < exp.size(); s++) {
exp[s] = static_cast<float>(s) + 2352.48f;
}
EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear(exp)));
}
#endif
TEST(HybridEmbeddingLookupHybridOpTest, Simple2DTestUint8) {
HybridEmbeddingLookupOpModel m({3}, {3, 8}, TensorType_UINT8);
m.SetInput({1, 0, 2});
m.SetWeight({
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
},
kTestTolerance)));
}
TEST(HybridEmbeddingLookupHybridOpTest, Simple3DTestUint8) {
HybridEmbeddingLookupOpModel m({3}, {3, 2, 4}, TensorType_UINT8);
m.SetInput({1, 0, 2});
m.SetWeight({
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
},
kTestTolerance)));
}
TEST(HybridEmbeddingLookupHybridOpTest, Simple4DTestUint8) {
HybridEmbeddingLookupOpModel m({3}, {3, 2, 2, 2}, TensorType_UINT8);
m.SetInput({1, 0, 2});
m.SetWeight({
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
},
kTestTolerance)));
}
TEST(HybridEmbeddingLookupHybridOpTest, Simple2DTestInt8) {
HybridEmbeddingLookupOpModel m({3}, {3, 8}, TensorType_INT8);
m.SetInput({1, 0, 2});
m.SetSignedWeight({
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
},
kTestTolerance)));
}
TEST(HybridEmbeddingLookupHybridOpTest, Simple3DTestInt8) {
HybridEmbeddingLookupOpModel m({3}, {3, 2, 4}, TensorType_INT8);
m.SetInput({1, 0, 2});
m.SetSignedWeight({
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
},
kTestTolerance)));
}
TEST(HybridEmbeddingLookupHybridOpTest, Simple4DTestInt8) {
HybridEmbeddingLookupOpModel m({3}, {3, 2, 2, 2}, TensorType_INT8);
m.SetInput({1, 0, 2});
m.SetSignedWeight({
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
},
kTestTolerance)));
}
TEST(EmbeddingLookupHybridOpTest, Simple3DTestQuantized) {
EmbeddingLookupOpModel m({3}, {3, 2, 4}, TensorType_UINT8, TensorType_INT8);
m.SetInput({1, 0, 2});
m.Set3DWeightMatrix<uint8_t>(
[](int i, int j, int k) -> uint8_t { return 100 * i + 10 * j + k; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int8_t>(),
ElementsAreArray({
100, 101, 102, 103, 110, 111, 112, 113,
0, 1, 2, 3, 10, 11, 12, 13,
200, 201, 202, 203, 210, 211, 212, 213,
}));
}
TEST(PerAxisHybridEmbeddingLookupHybridOpTest, PerAxisSimple2DTestInt8) {
PerAxisHybridEmbeddingLookupOpModel m(
{3}, {3, 8}, {0.00102, 0.0089, 0.016772}, TensorType_INT8);
m.SetInput({1, 0, 2});
m.SetSignedWeight({
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
},
kTestTolerance)));
}
TEST(PerAxisHybridEmbeddingLookupHybridOpTest, PerAxisSimple3DTestInt8) {
PerAxisHybridEmbeddingLookupOpModel m(
{3}, {3, 2, 4}, {0.00102, 0.0089, 0.016772}, TensorType_INT8);
m.SetInput({1, 0, 2});
m.SetSignedWeight({
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
},
kTestTolerance)));
}
TEST(PerAxisHybridEmbeddingLookupHybridOpTest, PerAxisSimple4DTestInt8) {
PerAxisHybridEmbeddingLookupOpModel m(
{3}, {3, 2, 2, 2}, {0.00102, 0.0089, 0.016772}, TensorType_INT8);
m.SetInput({1, 0, 2});
m.SetSignedWeight({
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
},
kTestTolerance)));
}
TEST(PerAxisHybridEmbeddingLookupHybridOpTest, PerAxisSimple2DTestInt4) {
PerAxisHybridEmbeddingLookupOpModel m({3}, {3, 8}, {0.001, 0.02, 0.3},
TensorType_INT4);
m.SetInput({1, 0, 2});
m.SetSignedWeight({
0.00, 0.007, 0.006, 0.005, 0.004, 0.003, 0.002, 0.001,
0.02, -0.02, 0.04, 0.06, 0.08, -0.04, -0.08, -0.06,
0.3, 0.6, 0.9, 1.2, 1.5, -0.3, -0.6, -0.9,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
0.02, -0.02, 0.04, 0.06, 0.08, -0.04, -0.08, -0.06,
0.00, 0.007, 0.006, 0.005, 0.004, 0.003, 0.002, 0.001,
0.3, 0.6, 0.9, 1.2, 1.5, -0.3, -0.6, -0.9,
},
kTestTolerance)));
}
TEST(PerAxisHybridEmbeddingLookupHybridOpTest, PerAxisSimple3DTestInt4) {
PerAxisHybridEmbeddingLookupOpModel m({3}, {3, 2, 4}, {0.001, 0.02, 0.3},
TensorType_INT4);
m.SetInput({1, 0, 2});
m.SetSignedWeight({
0.00, 0.007, 0.006, 0.005, 0.004, 0.003, 0.002, 0.001,
0.02, -0.02, 0.04, 0.06, 0.08, -0.04, -0.08, -0.06,
0.3, 0.6, 0.9, 1.2, 1.5, -0.3, -0.6, -0.9,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
0.02, -0.02, 0.04, 0.06, 0.08, -0.04, -0.08, -0.06,
0.00, 0.007, 0.006, 0.005, 0.004, 0.003, 0.002, 0.001,
0.3, 0.6, 0.9, 1.2, 1.5, -0.3, -0.6, -0.9,
},
kTestTolerance)));
}
TEST(PerAxisHybridEmbeddingLookupHybridOpTest, PerAxisSimple4DTestInt4) {
PerAxisHybridEmbeddingLookupOpModel m({3}, {3, 2, 2, 2}, {0.001, 0.02, 0.3},
TensorType_INT4);
m.SetInput({1, 0, 2});
m.SetSignedWeight({
0.00, 0.007, 0.006, 0.005, 0.004, 0.003, 0.002, 0.001,
0.02, -0.02, 0.04, 0.06, 0.08, -0.04, -0.08, -0.06,
0.3, 0.6, 0.9, 1.2, 1.5, -0.3, -0.6, -0.9,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
0.02, -0.02, 0.04, 0.06, 0.08, -0.04, -0.08, -0.06,
0.00, 0.007, 0.006, 0.005, 0.004, 0.003, 0.002, 0.001,
0.3, 0.6, 0.9, 1.2, 1.5, -0.3, -0.6, -0.9,
},
kTestTolerance)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/embedding_lookup.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/embedding_lookup_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
21efe5f5-c57a-45c6-ac99-949f2a346f49 | cpp | tensorflow/tensorflow | flags | tensorflow/compiler/aot/flags.cc | tensorflow/core/config/flags_test.cc | #include "tensorflow/compiler/aot/flags.h"
namespace tensorflow {
namespace tfcompile {
void AppendMainFlags(std::vector<Flag>* flag_list, MainFlags* flags) {
const std::vector<Flag> tmp = {
{"graph", &flags->graph,
"Input GraphDef file. If the file ends in '.pbtxt' it is expected to "
"be in the human-readable proto text format, otherwise it is expected "
"to be in the proto binary format."},
{"debug_info", &flags->debug_info,
"Graph debug info file. If the file ends in '.pbtxt' it is expected to "
"be in the human-readable proto text format, otherwise it is expected "
"to be in the proto binary format."},
{"debug_info_path_begin_marker", &flags->debug_info_path_begin_marker,
"If not none, only keep the file path in the debug information after the"
" marker. The default value is empty"},
{"config", &flags->config,
"Input file containing Config proto. If the file ends in '.pbtxt' it "
"is expected to be in the human-readable proto text format, otherwise "
"it is expected to be in the proto binary format."},
{"dump_fetch_nodes", &flags->dump_fetch_nodes,
"If set, only flags related to fetches are processed, and the resulting "
"fetch nodes will be dumped to stdout in a comma-separated list. "
"Typically used to format arguments for other tools, e.g. "
"freeze_graph."},
{"target_triple", &flags->target_triple,
"Target platform, similar to the clang -target flag. The general "
"format is <arch><sub>-<vendor>-<sys>-<abi>. "
"http:
{"target_cpu", &flags->target_cpu,
"Target cpu, similar to the clang -mcpu flag. "
"http:
{"target_features", &flags->target_features,
"Target features, e.g. +avx2, +neon, etc."},
{"entry_point", &flags->entry_point,
"Name of the generated function. If multiple generated object files "
"will be linked into the same binary, each will need a unique entry "
"point."},
{"cpp_class", &flags->cpp_class,
"Name of the generated C++ class, wrapping the generated function. The "
"syntax of this flag is [[<optional_namespace>::],...]<class_name>. "
"This mirrors the C++ syntax for referring to a class, where multiple "
"namespaces may precede the class name, separated by double-colons. "
"The class will be generated in the given namespace(s), or if no "
"namespaces are given, within the global namespace."},
{"out_function_object", &flags->out_function_object,
"Output object file containing the generated function for the "
"TensorFlow model."},
{"out_header", &flags->out_header, "Output header file name."},
{"out_metadata_object", &flags->out_metadata_object,
"Output object file name containing optional metadata for the generated "
"function."},
{"out_session_module", &flags->out_session_module,
"Output session module proto."},
{"mlir_components", &flags->mlir_components,
"The MLIR components to enable. Currently only Bridge is supported."},
{"experimental_quantize", &flags->experimental_quantize,
"If set, quantization passes will run and dump the result before HLO "
"code generation."},
{"sanitize_dataflow", &flags->sanitize_dataflow,
"Enable DataFlow Sanitizer pass."},
{"sanitize_abilists_dataflow", &flags->sanitize_abilists_dataflow,
"Comma separated list of ABIList file paths."},
{"gen_name_to_index", &flags->gen_name_to_index,
"Generate name-to-index data for Lookup{Arg,Result}Index methods."},
{"gen_program_shape", &flags->gen_program_shape,
"Generate program shape data for the ProgramShape method."},
};
flag_list->insert(flag_list->end(), tmp.begin(), tmp.end());
}
}
} | #include "tensorflow/core/config/flags.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(TFFlags, ReadFlagValue) {
EXPECT_TRUE(flags::Global().test_only_experiment_1.value());
EXPECT_FALSE(flags::Global().test_only_experiment_2.value());
}
TEST(TFFlags, ResetFlagValue) {
EXPECT_TRUE(flags::Global().test_only_experiment_1.value());
flags::Global().test_only_experiment_1.reset(false);
EXPECT_FALSE(flags::Global().test_only_experiment_1.value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/aot/flags.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/config/flags_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6a5f5f26-d97a-447b-bd03-5b3dc414fadb | cpp | tensorflow/tensorflow | insert_logging | tensorflow/tools/graph_transforms/insert_logging.cc | tensorflow/tools/graph_transforms/insert_logging_test.cc | #include "tensorflow/core/common_runtime/constant_folding.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/fold_constants_lib.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status InsertLogging(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
std::unordered_set<string> ops;
bool has_ops;
if (context.params.count("op")) {
has_ops = true;
for (const string& op : context.params.at("op")) {
ops.insert(op);
}
} else {
has_ops = false;
}
std::unordered_set<string> prefixes;
bool has_prefixes;
if (context.params.count("prefix")) {
has_prefixes = true;
for (const string& prefix : context.params.at("prefix")) {
prefixes.insert(prefix);
}
} else {
has_prefixes = false;
}
string message;
TF_RETURN_IF_ERROR(context.GetOneStringParameter("message", "", &message));
bool show_name;
TF_RETURN_IF_ERROR(
context.GetOneBoolParameter("show_name", false, &show_name));
bool show_op;
TF_RETURN_IF_ERROR(context.GetOneBoolParameter("show_op", false, &show_op));
int32_t first_n;
TF_RETURN_IF_ERROR(context.GetOneInt32Parameter("first_n", -1, &first_n));
int32_t summarize;
TF_RETURN_IF_ERROR(
context.GetOneInt32Parameter("summarize", 1024, &summarize));
std::unordered_map<string, std::set<int>> node_outputs;
for (const NodeDef& node : input_graph_def.node()) {
for (const string& input : node.input()) {
const string canonical_input = CanonicalInputName(input);
string prefix;
string name;
string suffix;
NodeNamePartsFromInput(canonical_input, &prefix, &name, &suffix);
const string output_index_string = suffix.substr(1, suffix.size() - 1);
int32_t output_index;
if (!strings::safe_strto32(output_index_string, &output_index)) {
return errors::InvalidArgument("Couldn't understand output number in ",
input);
}
node_outputs[name].insert(output_index);
}
}
std::map<string, string> inputs_to_rename;
std::unordered_set<string> ignore_when_renaming;
GraphDef logged_graph_def;
for (const NodeDef& node : input_graph_def.node()) {
NodeDef* new_node = logged_graph_def.mutable_node()->Add();
*new_node = node;
if (node_outputs[node.name()].empty()) {
continue;
}
const bool op_matches = (ops.count(node.op()) > 0);
bool prefix_matches = false;
for (const string& prefix : prefixes) {
if (absl::StartsWith(node.name(), prefix)) {
prefix_matches = true;
}
}
if ((!has_ops || op_matches) && (!has_prefixes || prefix_matches)) {
const string name_suffix = "__print__";
DataTypeVector input_types;
DataTypeVector output_types;
TF_RETURN_IF_ERROR(GetInOutTypes(node, &input_types, &output_types));
NodeDef* print_node = logged_graph_def.mutable_node()->Add();
print_node->set_op("Print");
print_node->set_name(strings::StrCat(node.name(), name_suffix));
string node_message;
if (show_op) {
node_message += ";" + node.op() + ";";
}
if (show_name) {
node_message += ";" + print_node->name() + ";";
}
node_message += message;
SetNodeAttr("message", node_message, print_node);
SetNodeAttr("first_n", first_n, print_node);
SetNodeAttr("summarize", summarize, print_node);
print_node->add_input(node.name() + ":0");
SetNodeAttr("T", output_types[0], print_node);
for (int output_index : node_outputs[node.name()]) {
print_node->add_input(strings::StrCat(node.name(), ":", output_index));
}
SetNodeAttr("U", output_types, print_node);
ignore_when_renaming.insert(print_node->name());
inputs_to_rename[node.name() + ":0"] =
strings::StrCat(node.name(), name_suffix, ":0");
}
}
output_graph_def->Clear();
return RenameNodeInputs(logged_graph_def, inputs_to_rename,
ignore_when_renaming, output_graph_def);
}
REGISTER_GRAPH_TRANSFORM("insert_logging", InsertLogging);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status InsertLogging(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
class InsertLoggingTest : public ::testing::Test {
protected:
void CheckGraphCanRun(const GraphDef& graph_def,
const std::vector<string>& output_names) {
std::unique_ptr<Session> session(NewSession(SessionOptions()));
TF_ASSERT_OK(session->Create(graph_def));
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({}, output_names, {}, &outputs));
}
void TestInsertLogging() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor const_tensor(DT_FLOAT, TensorShape({10}));
test::FillIota<float>(&const_tensor, 1.0f);
Output const_node1 =
Const(root.WithOpName("const_node1"), Input::Initializer(const_tensor));
Output const_node2 =
Const(root.WithOpName("const_node2"), Input::Initializer(const_tensor));
Output const_node3 =
Const(root.WithOpName("const_node3"), Input::Initializer(const_tensor));
Output add_node2 =
Add(root.WithOpName("add_node2"), const_node1, const_node2);
Output add_node3 =
Add(root.WithOpName("add_node3"), const_node1, const_node3);
Output mul_node1 = Mul(root.WithOpName("mul_node1"), add_node2, add_node3);
Output add_node4 =
Add(root.WithOpName("add_node4"), mul_node1, const_node3);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
CheckGraphCanRun(graph_def, {"add_node4"});
GraphDef result;
TransformFuncContext context;
context.input_names = {};
context.output_names = {"add_node4"};
TF_ASSERT_OK(InsertLogging(graph_def, context, &result));
CheckGraphCanRun(result, {"add_node4"});
std::unordered_set<string> print_inputs;
for (const NodeDef& node : result.node()) {
if (node.op() == "Print") {
print_inputs.insert(node.input(0));
}
}
EXPECT_EQ(6, print_inputs.size());
EXPECT_EQ(1, print_inputs.count("mul_node1:0"));
EXPECT_EQ(1, print_inputs.count("add_node2:0"));
EXPECT_EQ(1, print_inputs.count("add_node3:0"));
EXPECT_EQ(0, print_inputs.count("add_node4:0"));
EXPECT_EQ(1, print_inputs.count("const_node1:0"));
EXPECT_EQ(1, print_inputs.count("const_node2:0"));
EXPECT_EQ(1, print_inputs.count("const_node3:0"));
}
void TestInsertLoggingByOpType() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor const_tensor(DT_FLOAT, TensorShape({10}));
test::FillIota<float>(&const_tensor, 1.0f);
Output const_node1 =
Const(root.WithOpName("const_node1"), Input::Initializer(const_tensor));
Output const_node2 =
Const(root.WithOpName("const_node2"), Input::Initializer(const_tensor));
Output const_node3 =
Const(root.WithOpName("const_node3"), Input::Initializer(const_tensor));
Output add_node2 =
Add(root.WithOpName("add_node2"), const_node1, const_node2);
Output add_node3 =
Add(root.WithOpName("add_node3"), const_node1, const_node3);
Output mul_node1 = Mul(root.WithOpName("mul_node1"), add_node2, add_node3);
Output add_node4 =
Add(root.WithOpName("add_node4"), mul_node1, const_node3);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
CheckGraphCanRun(graph_def, {"add_node4"});
GraphDef result;
TransformFuncContext context;
context.input_names = {};
context.output_names = {"add_node4"};
context.params.insert(
std::pair<string, std::vector<string>>({"op", {"Mul", "Add"}}));
TF_ASSERT_OK(InsertLogging(graph_def, context, &result));
CheckGraphCanRun(result, {"add_node4"});
std::unordered_set<string> print_inputs;
for (const NodeDef& node : result.node()) {
if (node.op() == "Print") {
print_inputs.insert(node.input(0));
}
}
EXPECT_EQ(3, print_inputs.size());
EXPECT_EQ(1, print_inputs.count("mul_node1:0"));
EXPECT_EQ(1, print_inputs.count("add_node2:0"));
EXPECT_EQ(1, print_inputs.count("add_node3:0"));
EXPECT_EQ(0, print_inputs.count("add_node4:0"));
EXPECT_EQ(0, print_inputs.count("const_node1:0"));
EXPECT_EQ(0, print_inputs.count("const_node2:0"));
EXPECT_EQ(0, print_inputs.count("const_node3:0"));
}
void TestInsertLoggingByPrefix() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor const_tensor(DT_FLOAT, TensorShape({10}));
test::FillIota<float>(&const_tensor, 1.0f);
Output const_node1 =
Const(root.WithOpName("const_node1"), Input::Initializer(const_tensor));
Output const_node2 =
Const(root.WithOpName("const_node2"), Input::Initializer(const_tensor));
Output const_node3 =
Const(root.WithOpName("const_node3"), Input::Initializer(const_tensor));
Output add_node2 =
Add(root.WithOpName("add_node2"), const_node1, const_node2);
Output add_node3 =
Add(root.WithOpName("add_node3"), const_node1, const_node3);
Output mul_node1 = Mul(root.WithOpName("mul_node1"), add_node2, add_node3);
Output add_node4 =
Add(root.WithOpName("add_node4"), mul_node1, const_node3);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
CheckGraphCanRun(graph_def, {"add_node4"});
GraphDef result;
TransformFuncContext context;
context.input_names = {};
context.output_names = {"add_node4"};
context.params.insert(
std::pair<string, std::vector<string>>({"prefix", {"add_node"}}));
TF_ASSERT_OK(InsertLogging(graph_def, context, &result));
CheckGraphCanRun(result, {"add_node4"});
std::unordered_set<string> print_inputs;
for (const NodeDef& node : result.node()) {
if (node.op() == "Print") {
print_inputs.insert(node.input(0));
}
}
EXPECT_EQ(2, print_inputs.size());
EXPECT_EQ(0, print_inputs.count("mul_node1:0"));
EXPECT_EQ(1, print_inputs.count("add_node2:0"));
EXPECT_EQ(1, print_inputs.count("add_node3:0"));
EXPECT_EQ(0, print_inputs.count("add_node4:0"));
EXPECT_EQ(0, print_inputs.count("const_node1:0"));
EXPECT_EQ(0, print_inputs.count("const_node2:0"));
EXPECT_EQ(0, print_inputs.count("const_node3:0"));
}
};
TEST_F(InsertLoggingTest, TestInsertLogging) { TestInsertLogging(); }
TEST_F(InsertLoggingTest, TestInsertLoggingByOpType) {
TestInsertLoggingByOpType();
}
TEST_F(InsertLoggingTest, TestInsertLoggingByPrefix) {
TestInsertLoggingByPrefix();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/insert_logging.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/insert_logging_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0fe8e421-3b6a-41a7-a4c4-85093705cb50 | cpp | tensorflow/tensorflow | tf_pjrt_client | third_party/xla/xla/pjrt/tf_pjrt_client.cc | third_party/xla/xla/pjrt/tf_pjrt_client_test.cc | #include "xla/pjrt/tf_pjrt_client.h"
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/synchronization/mutex.h"
#include "xla/pjrt/pjrt_client.h"
namespace xla {
TfPjRtBuffer::TfPjRtBuffer(TfPjRtClient* client,
std::unique_ptr<PjRtBuffer> wrapped)
: client_(client), wrapped_(std::move(wrapped)) {
client_->TrackBuffer(this);
}
TfPjRtBuffer::~TfPjRtBuffer() { client_->UntrackBuffer(this); }
PjRtClient* TfPjRtBuffer::client() const { return client_; }
PjRtClient* TfPjRtExecutable::client() const { return client_; }
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfPjRtBuffer::CopyToDevice(
PjRtDevice* dst_device) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtBuffer> result,
wrapped_->CopyToDevice(dst_device));
return std::unique_ptr<PjRtBuffer>(
std::make_unique<TfPjRtBuffer>(client_, std::move(result)));
}
TfPjRtExecutable::TfPjRtExecutable(
TfPjRtClient* client, std::unique_ptr<PjRtLoadedExecutable> wrapped)
: client_(client), wrapped_(std::move(wrapped)) {}
absl::StatusOr<std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>>
TfPjRtExecutable::Execute(
absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
const ExecuteOptions& options,
std::optional<std::vector<PjRtFuture<>>>& returned_futures) {
std::vector<std::vector<PjRtBuffer*>> unwrapped_argument_handles;
unwrapped_argument_handles.reserve(argument_handles.size());
for (auto& handles : argument_handles) {
unwrapped_argument_handles.emplace_back();
auto& unwrapped_handles = unwrapped_argument_handles.back();
unwrapped_handles.reserve(handles.size());
for (PjRtBuffer* buffer : handles) {
unwrapped_handles.push_back(
tensorflow::down_cast<TfPjRtBuffer*>(buffer)->wrapped());
}
}
TF_ASSIGN_OR_RETURN(auto out, wrapped_->Execute(unwrapped_argument_handles,
options, returned_futures));
for (auto& buffer_list : out) {
for (std::unique_ptr<PjRtBuffer>& buffer : buffer_list) {
buffer = std::make_unique<TfPjRtBuffer>(client_, std::move(buffer));
}
}
return out;
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
TfPjRtExecutable::ExecuteSharded(absl::Span<PjRtBuffer* const> argument_handles,
PjRtDevice* device,
const ExecuteOptions& options,
std::optional<PjRtFuture<>>& returned_future,
bool fill_future) {
std::vector<PjRtBuffer*> unwrapped_argument_handles;
unwrapped_argument_handles.reserve(argument_handles.size());
for (PjRtBuffer* buffer : argument_handles) {
unwrapped_argument_handles.push_back(
tensorflow::down_cast<TfPjRtBuffer*>(buffer)->wrapped());
}
TF_ASSIGN_OR_RETURN(auto out, wrapped_->ExecuteSharded(
unwrapped_argument_handles, device, options,
returned_future, fill_future));
for (std::unique_ptr<PjRtBuffer>& buffer : out) {
buffer = std::make_unique<TfPjRtBuffer>(client_, std::move(buffer));
}
return out;
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
TfPjRtExecutable::ExecutePortable(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options, std::optional<PjRtFuture<>>& returned_future,
bool fill_future) {
std::vector<PjRtBuffer*> unwrapped_argument_handles;
unwrapped_argument_handles.reserve(argument_handles.size());
for (PjRtBuffer* buffer : argument_handles) {
unwrapped_argument_handles.push_back(
tensorflow::down_cast<TfPjRtBuffer*>(buffer)->wrapped());
}
TF_ASSIGN_OR_RETURN(auto out, wrapped_->ExecutePortable(
unwrapped_argument_handles, device, options,
returned_future, fill_future));
for (std::unique_ptr<PjRtBuffer>& buffer : out) {
buffer = std::make_unique<TfPjRtBuffer>(client_, std::move(buffer));
}
return out;
}
TfPjRtClient::TfPjRtClient(std::unique_ptr<PjRtClient> wrapped)
: wrapped_(std::move(wrapped)) {
LOG(INFO) << "TfPjRtClient created.";
int num_mutexes = wrapped_->addressable_device_count();
alive_buffers_ = std::vector<DeviceBuffers>(num_mutexes);
for (int i = 0; i < num_mutexes; ++i) {
mutex_id_from_device_id_.insert(
{wrapped_->addressable_devices()[i]->id(), i});
}
}
TfPjRtClient::~TfPjRtClient() { LOG(INFO) << "TfPjRtClient destroyed."; }
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfPjRtClient::WrapBuffer(
absl::StatusOr<std::unique_ptr<PjRtBuffer>> to_wrap) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtBuffer> buffer, std::move(to_wrap));
return std::unique_ptr<PjRtBuffer>(
std::make_unique<TfPjRtBuffer>(this, std::move(buffer)));
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
TfPjRtClient::WrapExecutable(
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>> to_wrap) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtLoadedExecutable> executable,
std::move(to_wrap));
return std::unique_ptr<PjRtLoadedExecutable>(
std::make_unique<TfPjRtExecutable>(this, std::move(executable)));
}
static int GetMutexId(
const TfPjRtBuffer* buffer,
const absl::flat_hash_map<int, int>& mutex_id_from_device_id) {
auto iters = mutex_id_from_device_id.find(buffer->wrapped()->device()->id());
CHECK(iters != mutex_id_from_device_id.end())
<< "Mutex id not found for device id: "
<< buffer->wrapped()->device()->id();
return iters->second;
}
void TfPjRtClient::TrackBuffer(TfPjRtBuffer* buffer) {
int mutex_id = GetMutexId(buffer, mutex_id_from_device_id_);
{
absl::MutexLock lock(&alive_buffers_[mutex_id].mu);
alive_buffers_[mutex_id].alive_buffers.insert(buffer);
}
}
void TfPjRtClient::UntrackBuffer(const TfPjRtBuffer* buffer) {
if (buffer->wrapped() == nullptr) {
return;
}
int mutex_id = GetMutexId(buffer, mutex_id_from_device_id_);
{
absl::MutexLock lock(&alive_buffers_[mutex_id].mu);
alive_buffers_[mutex_id].alive_buffers.erase(buffer);
}
}
void TfPjRtClient::DestroyWrappedBuffersAndClient() {
int num_mutexes = alive_buffers_.size();
for (int i = 0; i < num_mutexes; ++i) {
absl::MutexLock lock(&alive_buffers_[i].mu);
for (auto* buffer : alive_buffers_[i].alive_buffers) {
buffer->DestroyWrappedBuffer();
}
}
wrapped_.reset(nullptr);
LOG(INFO) << "TfPjRtClient::DestroyWrappedBuffersAndClient completed.";
}
std::unique_ptr<TfPjRtClient> TfPjRtClient::CreateTfPjRtClient(
std::unique_ptr<PjRtClient> wrapped) {
return std::make_unique<TfPjRtClient>(std::move(wrapped));
}
} | #include "xla/pjrt/tf_pjrt_client.h"
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "xla/literal_util.h"
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/service/hlo_parser.h"
#include "tsl/platform/env.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
TEST(TfClientTest, ExecuteAndHloSnapshot) {
constexpr char kProgram[] = R"(
HloModule add
ENTRY add {
x = f32[3,2] parameter(0)
y = f32[3,2] parameter(1)
ROOT add = f32[3,2] add(x, y)
})";
TF_ASSERT_OK_AND_ASSIGN(auto client, GetTfrtCpuClient(true));
client = TfPjRtClient::CreateTfPjRtClient(std::move(client));
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnUnverifiedModule(kProgram, {}));
std::string dir = tsl::testing::TmpDir();
xla::CompileOptions options;
auto* debug_opts = options.executable_build_options.mutable_debug_options();
debug_opts->set_xla_dump_to(dir);
debug_opts->set_xla_dump_hlo_snapshots(true);
XlaComputation xla_computation(hlo_module->ToProto());
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_executable,
client->Compile(xla_computation, options));
std::vector<float> data1{1.0, 2.0, 3.0, 4.0, 5.0, 6.0};
std::vector<float> data2{10.0, 20.0, 30.0, 40.0, 50.0, 60.0};
Shape shape = ShapeUtil::MakeShape(F32, {3, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer1,
client->BufferFromHostBuffer(
data1.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
TF_ASSERT_OK_AND_ASSIGN(
auto buffer2,
client->BufferFromHostBuffer(
data2.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
auto result = pjrt_executable->Execute(
{{buffer1.get(), buffer2.get()}},
{});
ASSERT_TRUE(result.ok());
tsl::FileSystem* fs;
ASSERT_TRUE(tsl::Env::Default()->GetFileSystemForFile(dir, &fs).ok());
std::vector<std::string> paths;
ASSERT_TRUE(fs->GetMatchingPaths(dir + "/*.snapshot.*.pb", &paths).ok());
ASSERT_EQ(paths.size(), 1);
HloSnapshot snapshot;
ASSERT_TRUE(
tsl::ReadBinaryProto(tsl::Env::Default(), paths[0], &snapshot).ok());
ASSERT_EQ(*Literal::CreateFromProto(snapshot.arguments(0)),
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}}));
ASSERT_EQ(
*Literal::CreateFromProto(snapshot.arguments(1)),
LiteralUtil::CreateR2<float>({{10.0, 20.0}, {30.0, 40.0}, {50.0, 60.0}}));
ASSERT_EQ(
*Literal::CreateFromProto(snapshot.result()),
LiteralUtil::CreateR2<float>({{11.0, 22.0}, {33.0, 44.0}, {55.0, 66.0}}));
auto* tf_pjrt_client =
tensorflow::down_cast<xla::TfPjRtClient*>(client.get());
tf_pjrt_client->DestroyWrappedBuffersAndClient();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/tf_pjrt_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/tf_pjrt_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3cd770ea-afe3-4a25-b3c7-1b35a92ac8d0 | cpp | tensorflow/tensorflow | buffer_use | third_party/xla/xla/runtime/buffer_use.cc | third_party/xla/xla/runtime/buffer_use_test.cc | #include "xla/runtime/buffer_use.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/types/span.h"
#include "xla/service/buffer_assignment.h"
namespace xla {
BufferUse::ReadWriteSet::ReadWriteSet() = default;
void BufferUse::ReadWriteSet::Add(BufferUse use) {
switch (use.access()) {
case BufferUse::kRead:
AddRead(use.slice());
break;
case BufferUse::kWrite:
AddWrite(use.slice());
break;
}
}
void BufferUse::ReadWriteSet::AddRead(BufferAllocation::Slice slice) {
read_.insert(slice);
}
void BufferUse::ReadWriteSet::AddWrite(BufferAllocation::Slice slice) {
write_.insert(slice);
}
void BufferUse::ReadWriteSet::AddAll(absl::Span<const BufferUse> uses) {
for (const auto& use : uses) Add(use);
}
bool BufferUse::ReadWriteSet::HasConflicts(const BufferUse& use) const {
auto overlaps = [](const absl::flat_hash_set<BufferAllocation::Slice>& set,
const BufferUse& use) {
return set.contains(use.slice()) ||
absl::c_any_of(set, [&](const BufferAllocation::Slice& slice) {
return slice.OverlapsWith(use.slice());
});
};
return use.access() == MemoryAccess::kWrite
? overlaps(write_, use) || overlaps(read_, use)
: overlaps(write_, use);
}
bool BufferUse::ReadWriteSet::HasConflicts(const ReadWriteSet& other) {
return absl::c_any_of(other.read_,
[&](const BufferAllocation::Slice& slice) {
return HasConflicts(BufferUse::Read(slice));
}) ||
absl::c_any_of(other.write_,
[&](const BufferAllocation::Slice& slice) {
return HasConflicts(BufferUse::Write(slice));
});
}
} | #include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
TEST(BufferUseTest, Equality) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice slice0(&alloc, 0, 10);
BufferUse use0(slice0, BufferUse::MemoryAccess::kRead);
BufferUse use1(slice0, BufferUse::MemoryAccess::kWrite);
BufferUse use2(slice0, BufferUse::MemoryAccess::kRead);
EXPECT_NE(use0, use1);
EXPECT_EQ(use0, use2);
}
TEST(BufferUseTest, ReadWriteSet) {
BufferUse::ReadWriteSet rwset;
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice slice0(&alloc, 0, 10);
BufferAllocation::Slice slice1(&alloc, 5, 10);
BufferAllocation::Slice slice2(&alloc, 10, 10);
rwset.Add(BufferUse::Read(slice0));
EXPECT_FALSE(rwset.HasConflicts({BufferUse::Read(slice1)}));
EXPECT_TRUE(rwset.HasConflicts({BufferUse::Write(slice1)}));
EXPECT_FALSE(rwset.HasConflicts({BufferUse::Write(slice2)}));
rwset.Add(BufferUse::Read(slice1));
EXPECT_TRUE(rwset.HasConflicts({BufferUse::Write(slice2)}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/runtime/buffer_use.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/runtime/buffer_use_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9be20d07-7939-4106-9f23-e493d6c48dba | cpp | tensorflow/tensorflow | retrying_utils | third_party/xla/third_party/tsl/tsl/platform/retrying_utils.cc | third_party/xla/third_party/tsl/tsl/platform/retrying_utils_test.cc | #include "tsl/platform/retrying_utils.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <limits>
#include "absl/time/time.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/random.h"
namespace tsl {
namespace {
bool IsRetriable(absl::StatusCode code) {
switch (code) {
case absl::StatusCode::kUnavailable:
case absl::StatusCode::kDeadlineExceeded:
case absl::StatusCode::kUnknown:
return true;
default:
return false;
}
}
double GenerateUniformRandomNumber() {
return random::New64() * (1.0 / std::numeric_limits<uint64_t>::max());
}
double GenerateUniformRandomNumberBetween(double a, double b) {
if (a == b) return a;
DCHECK_LT(a, b);
return a + GenerateUniformRandomNumber() * (b - a);
}
}
absl::Status RetryingUtils::CallWithRetries(
const std::function<absl::Status()>& f, const RetryConfig& config) {
return CallWithRetries(
f,
[](int64_t micros) {
return Env::Default()->SleepForMicroseconds(micros);
},
config);
}
absl::Status RetryingUtils::CallWithRetries(
const std::function<absl::Status()>& f,
const std::function<void(int64_t)>& sleep_usec, const RetryConfig& config) {
int retries = 0;
while (true) {
auto status = f();
if (!IsRetriable(status.code())) {
return status;
}
if (retries >= config.max_retries) {
return absl::Status(
absl::StatusCode::kAborted,
strings::StrCat(
"All ", config.max_retries,
" retry attempts failed. The last failure: ", status.message()));
}
int64_t delay_micros = 0;
if (config.init_delay_time_us > 0) {
const int64_t random_micros = random::New64() % 1000000;
delay_micros = std::min(config.init_delay_time_us << retries,
config.max_delay_time_us) +
random_micros;
}
VLOG(1) << "The operation failed and will be automatically retried in "
<< (delay_micros / 1000000.0) << " seconds (attempt "
<< (retries + 1) << " out of " << config.max_retries
<< "), caused by: " << status.ToString();
sleep_usec(delay_micros);
retries++;
}
}
absl::Status RetryingUtils::DeleteWithRetries(
const std::function<absl::Status()>& delete_func,
const RetryConfig& config) {
bool is_retried = false;
return RetryingUtils::CallWithRetries(
[delete_func, &is_retried]() {
const absl::Status status = delete_func();
if (is_retried && status.code() == error::NOT_FOUND) {
return absl::OkStatus();
}
is_retried = true;
return status;
},
config);
}
absl::Duration ComputeRetryBackoff(int current_retry_attempt,
absl::Duration min_delay,
absl::Duration max_delay) {
DCHECK_GE(current_retry_attempt, 0);
constexpr double kBackoffBase = 1.3;
constexpr double kBackoffRandMult = 0.4;
const absl::Duration first_term = min_delay * kBackoffRandMult;
absl::Duration uncapped_second_term =
min_delay * std::pow(kBackoffBase, current_retry_attempt);
absl::Duration second_term =
std::min(uncapped_second_term, max_delay - first_term);
second_term *=
GenerateUniformRandomNumberBetween(1.0 - kBackoffRandMult, 1.0);
return std::max(first_term + second_term, min_delay);
}
} | #include "tsl/platform/retrying_utils.h"
#include <cmath>
#include <fstream>
#include "absl/time/time.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
TEST(RetryingUtilsTest, CallWithRetries_RetryDelays) {
std::vector<double> requested_delays;
std::function<void(int64_t)> sleep = [&requested_delays](int64_t delay) {
requested_delays.emplace_back(delay / 1000000.0);
};
std::function<absl::Status()> f = []() {
return errors::Unavailable("Failed.");
};
const auto& status = RetryingUtils::CallWithRetries(
f, sleep, RetryConfig(500000 ));
EXPECT_TRUE(errors::IsAborted(status));
EXPECT_TRUE(absl::StrContains(
status.message(),
"All 10 retry attempts failed. The last failure: Failed."))
<< status;
EXPECT_EQ(10, requested_delays.size());
EXPECT_NEAR(0.5, requested_delays[0], 1.0);
EXPECT_NEAR(1.0, requested_delays[1], 1.0);
EXPECT_NEAR(2.0, requested_delays[2], 1.0);
EXPECT_NEAR(4.0, requested_delays[3], 1.0);
EXPECT_NEAR(8.0, requested_delays[4], 1.0);
EXPECT_NEAR(16.0, requested_delays[5], 1.0);
EXPECT_NEAR(32.0, requested_delays[6], 1.0);
EXPECT_NEAR(32.0, requested_delays[7], 1.0);
EXPECT_NEAR(32.0, requested_delays[8], 1.0);
EXPECT_NEAR(32.0, requested_delays[9], 1.0);
}
TEST(RetryingUtilsTest, CallWithRetries_NotFoundIsNotRetried) {
std::vector<absl::Status> results(
{errors::Unavailable("Failed."), errors::NotFound("Not found.")});
std::function<absl::Status()> f = [&results]() {
auto result = results[0];
results.erase(results.begin());
return result;
};
EXPECT_TRUE(errors::IsNotFound(RetryingUtils::CallWithRetries(
f, RetryConfig(0 ))));
}
TEST(RetryingUtilsTest, CallWithRetries_ImmediateSuccess) {
std::vector<absl::Status> results({absl::OkStatus()});
std::function<void(int64_t)> sleep = [](int64_t delay) {
ADD_FAILURE() << "Unexpected call to sleep.";
};
std::function<absl::Status()> f = [&results]() {
auto result = results[0];
results.erase(results.begin());
return result;
};
TF_EXPECT_OK(RetryingUtils::CallWithRetries(
f, sleep, RetryConfig(1L )));
}
TEST(RetryingUtilsTest, CallWithRetries_EventualSuccess) {
std::vector<absl::Status> results({errors::Unavailable("Failed."),
errors::Unavailable("Failed again."),
absl::OkStatus()});
std::function<absl::Status()> f = [&results]() {
auto result = results[0];
results.erase(results.begin());
return result;
};
TF_EXPECT_OK(RetryingUtils::CallWithRetries(
f, RetryConfig(0 )));
}
TEST(RetryingUtilsTest, DeleteWithRetries_ImmediateSuccess) {
std::vector<absl::Status> delete_results({absl::OkStatus()});
const auto delete_func = [&delete_results]() {
auto result = delete_results[0];
delete_results.erase(delete_results.begin());
return result;
};
TF_EXPECT_OK(RetryingUtils::DeleteWithRetries(
delete_func, RetryConfig(0 )));
}
TEST(RetryingUtilsTest, DeleteWithRetries_EventualSuccess) {
std::vector<absl::Status> delete_results(
{errors::Unavailable(""), absl::OkStatus()});
const auto delete_func = [&delete_results]() {
auto result = delete_results[0];
delete_results.erase(delete_results.begin());
return result;
};
TF_EXPECT_OK(RetryingUtils::DeleteWithRetries(
delete_func, RetryConfig(0 )));
}
TEST(RetryingUtilsTest, DeleteWithRetries_PermissionDeniedNotRetried) {
std::vector<absl::Status> delete_results(
{errors::Unavailable(""), errors::PermissionDenied("")});
const auto delete_func = [&delete_results]() {
auto result = delete_results[0];
delete_results.erase(delete_results.begin());
return result;
};
EXPECT_TRUE(errors::IsPermissionDenied(RetryingUtils::DeleteWithRetries(
delete_func, RetryConfig(0 ))));
}
TEST(RetryingUtilsTest, DeleteWithRetries_SuccessThroughFileNotFound) {
std::vector<absl::Status> delete_results(
{errors::Unavailable(""), errors::NotFound("")});
const auto delete_func = [&delete_results]() {
auto result = delete_results[0];
delete_results.erase(delete_results.begin());
return result;
};
TF_EXPECT_OK(RetryingUtils::DeleteWithRetries(
delete_func, RetryConfig(0 )));
}
TEST(RetryingUtilsTest, DeleteWithRetries_FirstNotFoundReturnedAsIs) {
std::vector<absl::Status> delete_results({errors::NotFound("")});
const auto delete_func = [&delete_results]() {
auto result = delete_results[0];
delete_results.erase(delete_results.begin());
return result;
};
EXPECT_EQ(error::NOT_FOUND,
RetryingUtils::DeleteWithRetries(
delete_func, RetryConfig(0 ))
.code());
}
TEST(RetryingUtilsTest, ComputeRetryBackoff) {
for (int i = 0; i < 30; ++i) {
EXPECT_LE(0.4 * absl::Milliseconds(1) +
0.6 * absl::Milliseconds(1) * std::pow(1.3, i),
ComputeRetryBackoff(i));
EXPECT_LE(
ComputeRetryBackoff(i),
0.4 * absl::Milliseconds(1) + absl::Milliseconds(1) * std::pow(1.3, i));
}
}
TEST(RetryingUtilsTest, ComputeRetryBackoff_MinMaxDelays) {
for (int i = 0; i < 30; ++i) {
EXPECT_EQ(ComputeRetryBackoff(i,
absl::Seconds(10)),
absl::Seconds(10));
EXPECT_EQ(ComputeRetryBackoff(i,
absl::Microseconds(1),
absl::Microseconds(1)),
absl::Microseconds(1));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/retrying_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/retrying_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6024a006-606f-4cb0-881b-383587845c64 | cpp | tensorflow/tensorflow | uniform_quantize_op | tensorflow/core/kernels/uniform_quant_ops/uniform_quantize_op.cc | tensorflow/core/kernels/uniform_quant_ops/uniform_quantize_op_test.cc | #include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/uniform_quant_ops/math_utils.h"
#include "tensorflow/core/kernels/uniform_quant_ops/tensor_utils.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace {
using tensorflow::errors::InvalidArgument;
template <typename Tin, typename Tout>
void EvalPerTensorQuantize(const Tensor& input, float scale, int32_t zero_point,
int32_t quantization_min_val,
int32_t quantization_max_val, Tensor& output) {
const float inv_scale = 1.0f / scale;
AffineQuantize(input.flat<Tin>(), inv_scale, zero_point, quantization_min_val,
quantization_max_val, output.flat<Tout>());
}
template <typename Tin, typename Tout>
void EvalPerChannelQuantize(const Tensor& input, const Tensor& scales,
const Tensor& zero_points, int quantization_axis,
int32_t quantization_min_val,
int32_t quantization_max_val, Tensor& output) {
DCHECK(input.IsSameSize(output));
const float* scales_data = scales.flat<float>().data();
const int32_t* zero_points_data = zero_points.flat<int32_t>().data();
auto input_tensor =
input.template flat_inner_outer_dims<Tin, 3>(quantization_axis - 1);
auto output_tensor =
output.template flat_inner_outer_dims<Tout, 3>(quantization_axis - 1);
for (int i = 0; i < output.dim_size(quantization_axis); ++i) {
const float inv_scale = 1.0f / scales_data[i];
AffineQuantize(input_tensor.template chip<1>(i), inv_scale,
zero_points_data[i], quantization_min_val,
quantization_max_val, output_tensor.template chip<1>(i));
}
}
template <typename Tin, typename Tout>
void EvalQuantize(const Tensor& input, const Tensor& scales,
const Tensor& zero_points, int quantization_axis,
int32_t quantization_min_val, int32_t quantization_max_val,
Tensor& output) {
if (quantization_axis >= 0) {
EvalPerChannelQuantize<Tin, Tout>(input, scales, zero_points,
quantization_axis, quantization_min_val,
quantization_max_val, output);
} else {
EvalPerTensorQuantize<Tin, Tout>(
input, scales.scalar<float>()(), zero_points.scalar<int32>()(),
quantization_min_val, quantization_max_val, output);
}
}
}
class UniformQuantizeOp : public OpKernel {
public:
explicit UniformQuantizeOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("Tin", &tin_));
OP_REQUIRES(context, tin_ == DataType::DT_FLOAT,
InvalidArgument("Unsupported input type."));
OP_REQUIRES_OK(context, context->GetAttr("Tout", &tout_));
OP_REQUIRES(context,
tout_ == DataType::DT_QINT8 || tout_ == DataType::DT_QINT32,
InvalidArgument("Unsupported output type."));
OP_REQUIRES_OK(context, context->GetAttr("quantization_min_val",
&quantization_min_val_));
OP_REQUIRES_OK(context, context->GetAttr("quantization_max_val",
&quantization_max_val_));
OP_REQUIRES_OK(context,
context->GetAttr("quantization_axis", &quantization_axis_));
OP_REQUIRES(context, (quantization_axis_ >= -1),
InvalidArgument("quantization_axis must be >= -1, given: ",
quantization_axis_));
}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& scales = context->input(1);
const Tensor& zero_points = context->input(2);
OP_REQUIRES_OK(context, (QuantizationAxisAndShapeValid(
input.shape(), scales.shape(),
zero_points.shape(), quantization_axis_)));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
if (tout_ == DataType::DT_QINT8) {
EvalQuantize<float, qint8>(input, scales, zero_points, quantization_axis_,
quantization_min_val_, quantization_max_val_,
*output);
} else {
EvalQuantize<float, qint32>(input, scales, zero_points,
quantization_axis_, quantization_min_val_,
quantization_max_val_, *output);
}
}
private:
DataType tin_, tout_;
int quantization_axis_;
int32_t quantization_min_val_;
int32_t quantization_max_val_;
};
REGISTER_KERNEL_BUILDER(Name("UniformQuantize")
.Device(DEVICE_CPU)
.TypeConstraint<float>("Tin")
.TypeConstraint("Tout", {DT_QINT8, DT_QINT32}),
UniformQuantizeOp);
} | #include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
class UniformQuantizeOpsTest : public OpsTestBase {
protected:
};
TEST_F(UniformQuantizeOpsTest, QuantizeInvalidQuantizationAxis) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantize")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_FLOAT)
.Attr("Tout", DT_QINT8)
.Attr("quantization_axis", -2)
.Attr("quantization_min_val", -127)
.Attr("quantization_max_val", 127)
.Finalize(node_def()));
EXPECT_TRUE(absl::IsInvalidArgument(InitOp()));
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantize")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_FLOAT)
.Attr("Tout", DT_QINT8)
.Attr("quantization_axis", 2)
.Attr("quantization_min_val", -127)
.Attr("quantization_max_val", 127)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 3}), {0, 0, 0, 0, 0, 0});
AddInputFromArray<float>(TensorShape({}), {1.0});
AddInputFromArray<int32>(TensorShape({}), {0});
EXPECT_TRUE(absl::IsInvalidArgument(RunOpKernel()));
}
TEST_F(UniformQuantizeOpsTest, PerTensorQuantize) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantize")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_FLOAT)
.Attr("Tout", DT_QINT8)
.Attr("quantization_axis", -1)
.Attr("quantization_min_val", -127)
.Attr("quantization_max_val", 127)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 3}),
{-27.0, -20.0, 0.0, 1.0, 5.0, 10.0});
AddInputFromArray<float>(TensorShape({}), {0.25});
AddInputFromArray<int32>(TensorShape({}), {-20});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT8, TensorShape({2, 3}));
test::FillValues<qint8>(&expected, {-127, -100, -20, -16, 0, 20});
test::ExpectTensorEqual<qint8>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizeOpsTest, PerChannelQuantize) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantize")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_FLOAT)
.Attr("Tout", DT_QINT8)
.Attr("quantization_axis", 0)
.Attr("quantization_min_val", -127)
.Attr("quantization_max_val", 127)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 3}),
{-27.0, -20.0, 0.0, 1.0, 5.0, 10.0});
AddInputFromArray<float>(TensorShape({2}), {0.25, 0.5});
AddInputFromArray<int32>(TensorShape({2}), {-20, -10});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT8, TensorShape({2, 3}));
test::FillValues<qint8>(&expected, {-127, -100, -20, -8, 0, 10});
test::ExpectTensorEqual<qint8>(expected, *GetOutput(0));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_quantize_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_quantize_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
665eee65-107c-4ecb-aba4-6b8f68295dca | cpp | abseil/abseil-cpp | cord_buffer | absl/strings/cord_buffer.cc | absl/strings/cord_buffer_test.cc | #include "absl/strings/cord_buffer.h"
#include <cstddef>
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr size_t CordBuffer::kDefaultLimit;
constexpr size_t CordBuffer::kCustomLimit;
#endif
ABSL_NAMESPACE_END
} | #include "absl/strings/cord_buffer.h"
#include <algorithm>
#include <cstring>
#include <limits>
#include <string>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_flat.h"
#include "absl/strings/internal/cord_rep_test_util.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
using testing::Eq;
using testing::Ge;
using testing::Le;
using testing::Ne;
namespace absl {
ABSL_NAMESPACE_BEGIN
class CordBufferTestPeer {
public:
static cord_internal::CordRep* ConsumeValue(CordBuffer& buffer,
absl::string_view& short_value) {
return buffer.ConsumeValue(short_value);
}
};
namespace {
using ::absl::cordrep_testing::CordToString;
constexpr size_t kInlinedSize = sizeof(CordBuffer) - 1;
constexpr size_t kDefaultLimit = CordBuffer::kDefaultLimit;
constexpr size_t kCustomLimit = CordBuffer::kCustomLimit;
constexpr size_t kMaxFlatSize = cord_internal::kMaxFlatSize;
constexpr size_t kMaxFlatLength = cord_internal::kMaxFlatLength;
constexpr size_t kFlatOverhead = cord_internal::kFlatOverhead;
constexpr size_t k8KiB = 8 << 10;
constexpr size_t k16KiB = 16 << 10;
constexpr size_t k64KiB = 64 << 10;
constexpr size_t k1MB = 1 << 20;
class CordBufferTest : public testing::TestWithParam<size_t> {};
INSTANTIATE_TEST_SUITE_P(MediumSize, CordBufferTest,
testing::Values(1, kInlinedSize - 1, kInlinedSize,
kInlinedSize + 1, kDefaultLimit - 1,
kDefaultLimit));
TEST_P(CordBufferTest, MaximumPayload) {
EXPECT_THAT(CordBuffer::MaximumPayload(), Eq(kMaxFlatLength));
EXPECT_THAT(CordBuffer::MaximumPayload(512), Eq(512 - kFlatOverhead));
EXPECT_THAT(CordBuffer::MaximumPayload(k64KiB), Eq(k64KiB - kFlatOverhead));
EXPECT_THAT(CordBuffer::MaximumPayload(k1MB), Eq(k64KiB - kFlatOverhead));
}
TEST(CordBufferTest, ConstructDefault) {
CordBuffer buffer;
EXPECT_THAT(buffer.capacity(), Eq(sizeof(CordBuffer) - 1));
EXPECT_THAT(buffer.length(), Eq(0));
EXPECT_THAT(buffer.data(), Ne(nullptr));
EXPECT_THAT(buffer.available().data(), Eq(buffer.data()));
EXPECT_THAT(buffer.available().size(), Eq(buffer.capacity()));
memset(buffer.data(), 0xCD, buffer.capacity());
}
TEST(CordBufferTest, CreateSsoWithDefaultLimit) {
CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(3);
EXPECT_THAT(buffer.capacity(), Ge(3));
EXPECT_THAT(buffer.capacity(), Le(sizeof(CordBuffer)));
EXPECT_THAT(buffer.length(), Eq(0));
memset(buffer.data(), 0xCD, buffer.capacity());
memcpy(buffer.data(), "Abc", 3);
buffer.SetLength(3);
EXPECT_THAT(buffer.length(), Eq(3));
absl::string_view short_value;
EXPECT_THAT(CordBufferTestPeer::ConsumeValue(buffer, short_value),
Eq(nullptr));
EXPECT_THAT(absl::string_view(buffer.data(), 3), Eq("Abc"));
EXPECT_THAT(short_value, Eq("Abc"));
}
TEST_P(CordBufferTest, Available) {
const size_t requested = GetParam();
CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
EXPECT_THAT(buffer.available().data(), Eq(buffer.data()));
EXPECT_THAT(buffer.available().size(), Eq(buffer.capacity()));
buffer.SetLength(2);
EXPECT_THAT(buffer.available().data(), Eq(buffer.data() + 2));
EXPECT_THAT(buffer.available().size(), Eq(buffer.capacity() - 2));
}
TEST_P(CordBufferTest, IncreaseLengthBy) {
const size_t requested = GetParam();
CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
buffer.IncreaseLengthBy(2);
EXPECT_THAT(buffer.length(), Eq(2));
buffer.IncreaseLengthBy(5);
EXPECT_THAT(buffer.length(), Eq(7));
}
TEST_P(CordBufferTest, AvailableUpTo) {
const size_t requested = GetParam();
CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
size_t expected_up_to = std::min<size_t>(3, buffer.capacity());
EXPECT_THAT(buffer.available_up_to(3).data(), Eq(buffer.data()));
EXPECT_THAT(buffer.available_up_to(3).size(), Eq(expected_up_to));
buffer.SetLength(2);
expected_up_to = std::min<size_t>(3, buffer.capacity() - 2);
EXPECT_THAT(buffer.available_up_to(3).data(), Eq(buffer.data() + 2));
EXPECT_THAT(buffer.available_up_to(3).size(), Eq(expected_up_to));
}
size_t MaxCapacityFor(size_t block_size, size_t requested) {
requested = (std::min)(requested, cord_internal::kMaxLargeFlatSize);
return block_size - kFlatOverhead;
}
TEST_P(CordBufferTest, CreateWithDefaultLimit) {
const size_t requested = GetParam();
CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
EXPECT_THAT(buffer.capacity(), Ge(requested));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(kMaxFlatSize, requested)));
EXPECT_THAT(buffer.length(), Eq(0));
memset(buffer.data(), 0xCD, buffer.capacity());
std::string data(requested - 1, 'x');
memcpy(buffer.data(), data.c_str(), requested);
buffer.SetLength(requested);
EXPECT_THAT(buffer.length(), Eq(requested));
EXPECT_THAT(absl::string_view(buffer.data()), Eq(data));
}
TEST(CordBufferTest, CreateWithDefaultLimitAskingFor2GB) {
constexpr size_t k2GiB = 1U << 31;
CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(k2GiB);
EXPECT_THAT(buffer.capacity(), Le(2 * CordBuffer::kDefaultLimit));
EXPECT_THAT(buffer.length(), Eq(0));
EXPECT_THAT(buffer.data(), Ne(nullptr));
memset(buffer.data(), 0xCD, buffer.capacity());
}
TEST_P(CordBufferTest, MoveConstruct) {
const size_t requested = GetParam();
CordBuffer from = CordBuffer::CreateWithDefaultLimit(requested);
const size_t capacity = from.capacity();
memcpy(from.data(), "Abc", 4);
from.SetLength(4);
CordBuffer to(std::move(from));
EXPECT_THAT(to.capacity(), Eq(capacity));
EXPECT_THAT(to.length(), Eq(4));
EXPECT_THAT(absl::string_view(to.data()), Eq("Abc"));
EXPECT_THAT(from.length(), Eq(0));
}
TEST_P(CordBufferTest, MoveAssign) {
const size_t requested = GetParam();
CordBuffer from = CordBuffer::CreateWithDefaultLimit(requested);
const size_t capacity = from.capacity();
memcpy(from.data(), "Abc", 4);
from.SetLength(4);
CordBuffer to;
to = std::move(from);
EXPECT_THAT(to.capacity(), Eq(capacity));
EXPECT_THAT(to.length(), Eq(4));
EXPECT_THAT(absl::string_view(to.data()), Eq("Abc"));
EXPECT_THAT(from.length(), Eq(0));
}
TEST_P(CordBufferTest, ConsumeValue) {
const size_t requested = GetParam();
CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
memcpy(buffer.data(), "Abc", 4);
buffer.SetLength(3);
absl::string_view short_value;
if (cord_internal::CordRep* rep =
CordBufferTestPeer::ConsumeValue(buffer, short_value)) {
EXPECT_THAT(CordToString(rep), Eq("Abc"));
cord_internal::CordRep::Unref(rep);
} else {
EXPECT_THAT(short_value, Eq("Abc"));
}
EXPECT_THAT(buffer.length(), Eq(0));
}
TEST_P(CordBufferTest, CreateWithCustomLimitWithinDefaultLimit) {
const size_t requested = GetParam();
CordBuffer buffer =
CordBuffer::CreateWithCustomLimit(kMaxFlatSize, requested);
EXPECT_THAT(buffer.capacity(), Ge(requested));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(kMaxFlatSize, requested)));
EXPECT_THAT(buffer.length(), Eq(0));
memset(buffer.data(), 0xCD, buffer.capacity());
std::string data(requested - 1, 'x');
memcpy(buffer.data(), data.c_str(), requested);
buffer.SetLength(requested);
EXPECT_THAT(buffer.length(), Eq(requested));
EXPECT_THAT(absl::string_view(buffer.data()), Eq(data));
}
TEST(CordLargeBufferTest, CreateAtOrBelowDefaultLimit) {
CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k64KiB, kDefaultLimit);
EXPECT_THAT(buffer.capacity(), Ge(kDefaultLimit));
EXPECT_THAT(buffer.capacity(),
Le(MaxCapacityFor(kMaxFlatSize, kDefaultLimit)));
buffer = CordBuffer::CreateWithCustomLimit(k64KiB, 3178);
EXPECT_THAT(buffer.capacity(), Ge(3178));
}
TEST(CordLargeBufferTest, CreateWithCustomLimit) {
ASSERT_THAT((kMaxFlatSize & (kMaxFlatSize - 1)) == 0, "Must be power of 2");
for (size_t size = kMaxFlatSize; size <= kCustomLimit; size *= 2) {
CordBuffer buffer = CordBuffer::CreateWithCustomLimit(size, size);
size_t expected = size - kFlatOverhead;
ASSERT_THAT(buffer.capacity(), Ge(expected));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(size, expected)));
}
}
TEST(CordLargeBufferTest, CreateWithTooLargeLimit) {
CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k64KiB, k1MB);
ASSERT_THAT(buffer.capacity(), Ge(k64KiB - kFlatOverhead));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(k64KiB, k1MB)));
}
TEST(CordLargeBufferTest, CreateWithHugeValueForOverFlowHardening) {
for (size_t dist_from_max = 0; dist_from_max <= 32; ++dist_from_max) {
size_t capacity = std::numeric_limits<size_t>::max() - dist_from_max;
CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(capacity);
ASSERT_THAT(buffer.capacity(), Ge(kDefaultLimit));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(kMaxFlatSize, capacity)));
for (size_t limit = kMaxFlatSize; limit <= kCustomLimit; limit *= 2) {
CordBuffer buffer = CordBuffer::CreateWithCustomLimit(limit, capacity);
ASSERT_THAT(buffer.capacity(), Ge(limit - kFlatOverhead));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(limit, capacity)));
}
}
}
TEST(CordLargeBufferTest, CreateWithSmallLimit) {
CordBuffer buffer = CordBuffer::CreateWithCustomLimit(512, 1024);
ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 1024)));
buffer = CordBuffer::CreateWithCustomLimit(512, 512);
ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 512)));
buffer = CordBuffer::CreateWithCustomLimit(512, 511);
ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 511)));
buffer = CordBuffer::CreateWithCustomLimit(512, 498);
ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 498)));
}
TEST(CordLargeBufferTest, CreateWasteFull) {
const size_t requested = (15 << 10);
CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k16KiB, requested);
ASSERT_THAT(buffer.capacity(), Ge(k8KiB - kFlatOverhead));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(k8KiB, requested)));
}
TEST(CordLargeBufferTest, CreateSmallSlop) {
const size_t requested = k16KiB - 2 * kFlatOverhead;
CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k16KiB, requested);
ASSERT_THAT(buffer.capacity(), Ge(k16KiB - kFlatOverhead));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(k16KiB, requested)));
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/cord_buffer.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/cord_buffer_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
66e616d3-0a41-4d1a-8c15-b0138948b0b7 | cpp | tensorflow/tensorflow | xnnpack_plugin | tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.cc | tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin_test.cc | #include "tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.h"
#include <memory>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/acceleration/configuration/c/delegate_plugin.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
extern "C" {
static TfLiteDelegate* CreateDelegate(const void* settings) {
const ::tflite::TFLiteSettings* tflite_settings =
static_cast<const ::tflite::TFLiteSettings*>(settings);
auto options(TfLiteXNNPackDelegateOptionsDefault());
const auto* xnnpack_settings = tflite_settings->xnnpack_settings();
if (xnnpack_settings) {
options.num_threads = xnnpack_settings->num_threads();
if (xnnpack_settings->flags()) {
options.flags = xnnpack_settings->flags();
}
if (xnnpack_settings->weight_cache_file_path()) {
options.weight_cache_file_path =
xnnpack_settings->weight_cache_file_path()->c_str();
}
}
return TfLiteXNNPackDelegateCreate(&options);
}
static void DestroyDelegate(TfLiteDelegate* delegate) {
TfLiteXNNPackDelegateDelete(delegate);
}
static int DelegateErrno(TfLiteDelegate* from_delegate) { return 0; }
static constexpr TfLiteDelegatePlugin kPluginCApi{
CreateDelegate,
DestroyDelegate,
DelegateErrno,
};
const TfLiteDelegatePlugin* TfLiteXnnpackDelegatePluginCApi() {
return &kPluginCApi;
}
} | #include "tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.h"
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "pthreadpool.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
class XnnpackTest : public testing::Test {
public:
static constexpr int kNumThreadsForTest = 7;
void SetUp() override {
XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);
xnnpack_settings_builder.add_num_threads(kNumThreadsForTest);
flatbuffers::Offset<XNNPackSettings> xnnpack_settings =
xnnpack_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
}
~XnnpackTest() override = default;
protected:
flatbuffers::FlatBufferBuilder flatbuffer_builder_;
const TFLiteSettings *settings_;
};
constexpr int XnnpackTest::kNumThreadsForTest;
TEST_F(XnnpackTest, CanCreateAndDestroyDelegate) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
EXPECT_NE(delegate, nullptr);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, CanGetDelegateErrno) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int error_number =
TfLiteXnnpackDelegatePluginCApi()->get_delegate_errno(delegate);
EXPECT_EQ(error_number, 0);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, SetsCorrectThreadCount) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
pthreadpool_t threadpool =
static_cast<pthreadpool_t>(TfLiteXNNPackDelegateGetThreadPool(delegate));
int thread_count = pthreadpool_get_threads_count(threadpool);
EXPECT_EQ(thread_count, kNumThreadsForTest);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, UsesDefaultFlagsByDefault) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int flags = TfLiteXNNPackDelegateGetFlags(delegate);
EXPECT_EQ(flags, TfLiteXNNPackDelegateOptionsDefault().flags);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, UsesSpecifiedFlagsWhenNonzero) {
XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);
xnnpack_settings_builder.add_flags(
tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QU8);
flatbuffers::Offset<XNNPackSettings> xnnpack_settings =
xnnpack_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int flags = TfLiteXNNPackDelegateGetFlags(delegate);
EXPECT_EQ(flags, tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QU8);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, UsesDefaultFlagsWhenZero) {
XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);
xnnpack_settings_builder.add_flags(
tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS);
flatbuffers::Offset<XNNPackSettings> xnnpack_settings =
xnnpack_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int flags = TfLiteXNNPackDelegateGetFlags(delegate);
EXPECT_EQ(flags, TfLiteXNNPackDelegateOptionsDefault().flags);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ef4420c7-b3f9-4685-bc2a-8478f1719c21 | cpp | tensorflow/tensorflow | memory_types | tensorflow/core/common_runtime/memory_types.cc | tensorflow/core/common_runtime/memory_types_test.cc | #include "tensorflow/core/common_runtime/memory_types.h"
#include <utility>
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/memory_types.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
struct Endpoint {
int node_id;
int output_index;
};
struct EndpointHash {
uint32 operator()(const Endpoint& x) const {
return Hash32(reinterpret_cast<const char*>(&x.node_id), sizeof(int),
x.output_index);
}
};
struct EndpointEq {
uint32 operator()(const Endpoint& x, const Endpoint& y) const {
return (x.node_id == y.node_id) && (x.output_index == y.output_index);
}
};
static Status ProcessMemoryTypes(
const DeviceType& device_type, const Graph* g,
const std::function<Status(const Edge*, MemoryType, MemoryType)>& fn) {
if (device_type != DEVICE_GPU &&
!DeviceFactory::IsPluggableDevice(device_type.type_string())) {
return absl::OkStatus();
}
typedef std::unordered_map<Endpoint, MemoryType, EndpointHash, EndpointEq>
MemTypeMap;
MemTypeMap inp;
MemTypeMap out;
MemoryTypeVector inp_mvec;
MemoryTypeVector out_mvec;
for (const Node* n : g->nodes()) {
TF_RETURN_IF_ERROR(MemoryTypesForNode(g->op_registry(), device_type,
n->def(), &inp_mvec, &out_mvec));
for (size_t i = 0; i < inp_mvec.size(); ++i) {
VLOG(2) << "inp mvec " << n->id() << " " << i << " " << inp_mvec[i];
inp[{n->id(), static_cast<int>(i)}] = inp_mvec[i];
}
for (size_t i = 0; i < out_mvec.size(); ++i) {
VLOG(2) << "out mvec " << n->id() << " " << i << " " << out_mvec[i];
out[{n->id(), static_cast<int>(i)}] = out_mvec[i];
}
}
for (const Edge* e : g->edges()) {
if (e->IsControlEdge()) {
continue;
}
MemoryType sm = gtl::FindWithDefault(out, {e->src()->id(), e->src_output()},
DEVICE_MEMORY);
MemoryType dm = gtl::FindWithDefault(inp, {e->dst()->id(), e->dst_input()},
DEVICE_MEMORY);
VLOG(1) << e->src()->id() << ":" << e->src_output() << " -> "
<< e->dst()->id() << ":" << e->dst_input() << ": " << sm << " -> "
<< dm;
TF_RETURN_IF_ERROR(fn(e, sm, dm));
}
return absl::OkStatus();
}
Status ValidateMemoryTypes(const DeviceType& device_type, const Graph* g) {
return ProcessMemoryTypes(
device_type, g, [](const Edge* e, MemoryType sm, MemoryType dm) {
if (sm == dm) {
return absl::OkStatus();
}
return errors::Internal("Memory type mismatch (", sm, " ", dm,
") between :", e->src()->id(), ":",
e->src_output(), " and ", e->dst()->id(), ":",
e->dst_input(), " : from ",
FormatNodeForError(*e->src()), " to ",
FormatNodeForError(*e->dst()));
});
}
static string GetTensorName(const Edge* edge) {
static std::atomic<int64_t> counter(0);
return strings::StrCat("memtype_", counter.fetch_add(1), "_",
edge->src()->name());
}
static Node* Send(Graph* g, const string& tensor_name,
const string& device_name, bool host, const Edge* edge) {
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), host ? "_HostSend" : "_Send")
.Input(edge->src(), edge->src_output())
.Attr("tensor_name", tensor_name)
.Attr("send_device", device_name)
.Attr("send_device_incarnation", 0)
.Attr("recv_device", device_name)
.Attr("_hostmem_sendrecv", true)
.Attr("_src", edge->src()->name())
.Attr("_dst", edge->dst()->name())
.Finalize(g, &ret));
return ret;
}
static Node* Recv(Graph* g, const string& tensor_name,
const string& device_name, bool host, const Edge* edge) {
Node* ret;
TF_CHECK_OK(
NodeBuilder(g->NewName("n"), host ? "_HostRecv" : "_Recv")
.Attr("tensor_type", edge->src()->output_type(edge->src_output()))
.Attr("tensor_name", tensor_name)
.Attr("send_device", device_name)
.Attr("send_device_incarnation", 0)
.Attr("recv_device", device_name)
.Attr("_hostmem_sendrecv", true)
.Attr("_src", edge->src()->name())
.Attr("_dst", edge->dst()->name())
.Finalize(g, &ret));
return ret;
}
Status EnsureMemoryTypes(const DeviceType& device_type,
const string& device_name, Graph* g) {
struct Item {
const Edge* edge;
MemoryType sm;
MemoryType dm;
};
std::vector<Item> edges;
TF_RETURN_IF_ERROR(ProcessMemoryTypes(
device_type, g, [&edges](const Edge* e, MemoryType sm, MemoryType dm) {
if (sm == dm) {
return absl::OkStatus();
}
if (((sm == HOST_MEMORY) && (dm == DEVICE_MEMORY)) ||
((sm == DEVICE_MEMORY) && (dm == HOST_MEMORY))) {
edges.push_back({e, sm, dm});
return absl::OkStatus();
}
return errors::Internal("Unexpected memory type pair on an edge: ", sm,
" vs. ", dm);
}));
if (!edges.empty()) {
std::unordered_map<Endpoint, Node*, EndpointHash, EndpointEq> recv_nodes;
for (const auto& item : edges) {
const Edge* e = item.edge;
const bool has_ref = IsRefType(e->src()->output_type(e->src_output()));
Node* recv = nullptr;
Endpoint key{e->src()->id(), e->src_output()};
auto iter = recv_nodes.find(key);
if (iter == recv_nodes.end()) {
const string tensor_name = GetTensorName(e);
Node* send =
Send(g, tensor_name, device_name, (item.sm == HOST_MEMORY), e);
recv = Recv(g, tensor_name, device_name, (item.dm == HOST_MEMORY), e);
if (!has_ref) {
recv_nodes[key] = recv;
}
g->AddControlEdge(send, recv);
} else {
recv = iter->second;
}
g->AddEdge(recv, 0, e->dst(), e->dst_input());
g->RemoveEdge(e);
}
}
if (VLOG_IS_ON(2)) {
VLOG(2) << "Dumped graph after EnsureMemoryTypes to "
<< DumpGraphToFile("EnsureMemoryTypes", *g);
}
return ValidateMemoryTypes(device_type, g);
}
Status MemoryTypeForOutput(const DeviceType& device_type, const Graph* g,
const Node* n, int index, MemoryType* memory_type) {
MemoryTypeVector inp_mvec;
MemoryTypeVector out_mvec;
TF_RETURN_IF_ERROR(MemoryTypesForNode(g->op_registry(), device_type, n->def(),
&inp_mvec, &out_mvec));
if (out_mvec.size() <= index) {
return errors::Internal("Trying to get the memory type for ", index,
"'th output of node ", FormatNodeForError(*n),
" that has only ", out_mvec.size(), " outputs");
}
*memory_type = out_mvec[index];
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/memory_types.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
TEST(MemoryTypeChecker, Int32OK) {
Graph* g = new Graph(OpRegistry::Global());
Tensor v(DT_INT32, {});
v.scalar<int32>().setZero();
auto in0 = test::graph::Constant(g, v);
auto in1 = test::graph::Constant(g, v);
test::graph::Add(g, in0, in1);
TF_EXPECT_OK(ValidateMemoryTypes(DEVICE_CPU, g));
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
TF_EXPECT_OK(ValidateMemoryTypes(DEVICE_GPU, g));
#endif
delete g;
}
TEST(MemoryTypeChecker, Int32NotOk) {
Graph* g = new Graph(OpRegistry::Global());
Tensor v(DT_INT32, {});
v.scalar<int32>().setZero();
auto x = test::graph::Constant(g, v);
test::graph::Cast(g, x, DT_FLOAT);
TF_EXPECT_OK(ValidateMemoryTypes(DEVICE_CPU, g));
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
EXPECT_TRUE(errors::IsInternal(ValidateMemoryTypes(DEVICE_GPU, g)));
TF_EXPECT_OK(EnsureMemoryTypes(DEVICE_GPU, "/device:GPU:0", g));
TF_EXPECT_OK(ValidateMemoryTypes(DEVICE_GPU, g));
#endif
delete g;
}
TEST(MemoryTypeChecker, MemoryTypeForOutput) {
Graph* g = new Graph(OpRegistry::Global());
Tensor vb(DT_BOOL);
Tensor vi(DT_INT32);
Tensor vf(DT_FLOAT);
auto pred = test::graph::Constant(g, vb);
auto sf = test::graph::Switch(g, test::graph::Constant(g, vf), pred);
MemoryType memory_type;
TF_EXPECT_OK(MemoryTypeForOutput(DEVICE_CPU, g, sf, 0, &memory_type));
EXPECT_EQ(memory_type, DEVICE_MEMORY);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
auto si = test::graph::Switch(g, test::graph::Constant(g, vi), pred);
TF_EXPECT_OK(MemoryTypeForOutput(DEVICE_GPU, g, si, 0, &memory_type));
EXPECT_EQ(memory_type, HOST_MEMORY);
#endif
delete g;
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/memory_types.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/memory_types_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0ec28ff5-400f-4eb8-b45c-475999016815 | cpp | tensorflow/tensorflow | hlo_module_group | third_party/xla/xla/hlo/ir/hlo_module_group.cc | third_party/xla/xla/service/hlo_module_group_test.cc | #include "xla/hlo/ir/hlo_module_group.h"
#include <memory>
#include <ostream>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
namespace xla {
HloModuleGroup::HloModuleGroup(std::unique_ptr<HloModule> module)
: name_(module->name()) {
push_back(std::move(module));
}
HloModuleGroup::HloModuleGroup(absl::string_view name,
absl::Span<std::unique_ptr<HloModule>> modules)
: name_(name) {
for (auto& module : modules) {
push_back(std::move(module));
}
}
HloModuleGroup::HloModuleGroup(
absl::string_view name, std::vector<std::unique_ptr<HloModule>>&& modules)
: name_(name) {
for (auto& module : modules) {
push_back(std::move(module));
}
}
std::vector<std::unique_ptr<HloModule>> HloModuleGroup::ConsumeModules() {
std::vector<std::unique_ptr<HloModule>> ret_modules = std::move(modules_);
modules_.clear();
module_ptrs_.clear();
return ret_modules;
}
std::string HloModuleGroup::ToString() const {
std::ostringstream s;
s << "HloModuleGroup " << name() << "\n\n";
for (const HloModule* module : modules()) {
s << module->ToString() << "\n";
}
return s.str();
}
HloModuleGroupProto HloModuleGroup::ToProto() const {
HloModuleGroupProto proto;
proto.set_name(name());
for (const HloModule* module : modules()) {
*proto.add_hlo_modules() = module->ToProto();
}
return proto;
}
absl::StatusOr<HloModuleGroup> HloModuleGroup::CreateFromProto(
const HloModuleGroupProto& proto,
absl::Span<const HloModuleConfig> module_configs) {
TF_RET_CHECK(!proto.name().empty()) << "Module group name cannot be empty";
TF_RET_CHECK(proto.hlo_modules_size() > 0)
<< "Module group must have at least one HLO module";
TF_RET_CHECK(proto.hlo_modules_size() == module_configs.size());
std::vector<std::unique_ptr<HloModule>> modules;
for (int i = 0; i < proto.hlo_modules_size(); ++i) {
const HloModuleProto& module_proto = proto.hlo_modules(i);
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> module,
HloModule::CreateFromProto(module_proto, module_configs[i]));
modules.push_back(std::move(module));
}
return HloModuleGroup(proto.name(), absl::MakeSpan(modules));
}
void HloModuleGroup::push_back(std::unique_ptr<HloModule> module) {
module->metadata()->set_module_group_name(name());
modules_.push_back(std::move(module));
module_ptrs_.push_back(modules_.back().get());
}
void HloModuleGroup::ReplaceModule(int index,
std::unique_ptr<HloModule> module) {
modules_.at(index)->MoveMetadataToModule(module.get());
modules_.at(index) = std::move(module);
module_ptrs_.at(index) = modules_.at(index).get();
}
std::ostream& operator<<(std::ostream& out, const HloModuleGroup& group) {
out << group.ToString();
return out;
}
} | #include "xla/hlo/ir/hlo_module_group.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_group_metadata.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
using ::testing::Property;
using ::testing::StrEq;
class HloModuleGroupTest : public HloTestBase {
protected:
HloModuleGroupTest() = default;
};
TEST_F(HloModuleGroupTest, SingleModule) {
const std::string text = R"(
HloModule simple_module
ENTRY %entry (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(text));
HloModuleGroup group(std::move(module));
EXPECT_EQ(group.modules().size(), 1);
EXPECT_THAT(
group.module(0).entry_computation()->instructions(),
::testing::ElementsAre(op::Parameter(), op::Parameter(), op::Add()));
TF_ASSERT_OK_AND_ASSIGN(HloModuleGroup group_copy,
HloModuleGroup::CreateFromProto(
group.ToProto(), {group.module(0).config()}));
EXPECT_EQ(group_copy.modules().size(), 1);
EXPECT_THAT(
group_copy.module(0).entry_computation()->instructions(),
::testing::ElementsAre(op::Parameter(), op::Parameter(), op::Add()));
std::vector<std::unique_ptr<HloModule>> modules = group.ConsumeModules();
EXPECT_EQ(modules.size(), 1);
EXPECT_EQ(group.modules().size(), 0);
}
TEST_F(HloModuleGroupTest, MultipleModules) {
const std::string text_0 = R"(
HloModule module0
ENTRY %entry (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
)";
const std::string text_1 = R"(
HloModule module1
ENTRY %entry (a: f32[]) -> f32[] {
ROOT %a = f32[] parameter(0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module_0,
ParseAndReturnVerifiedModule(text_0));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module_1,
ParseAndReturnVerifiedModule(text_1));
std::vector<std::unique_ptr<HloModule>> modules;
modules.push_back(std::move(module_0));
modules.push_back(std::move(module_1));
HloModuleGroup group(TestName(), absl::MakeSpan(modules));
EXPECT_EQ(group.modules().size(), 2);
EXPECT_THAT(
group.module(0).entry_computation()->instructions(),
::testing::ElementsAre(op::Parameter(), op::Parameter(), op::Add()));
EXPECT_THAT(group.module(1).entry_computation()->instructions(),
::testing::ElementsAre(op::Parameter()));
TF_ASSERT_OK_AND_ASSIGN(HloModuleGroup group_copy,
HloModuleGroup::CreateFromProto(
group.ToProto(), {group.module(0).config(),
group.module(1).config()}));
EXPECT_EQ(group_copy.modules().size(), 2);
}
TEST_F(HloModuleGroupTest, BuildModuleGroupByPushBack) {
const std::string text_0 = R"(
HloModule module0
ENTRY %entry (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
)";
const std::string text_1 = R"(
HloModule module1
ENTRY %entry (a: f32[]) -> f32[] {
ROOT %a = f32[] parameter(0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module_0,
ParseAndReturnVerifiedModule(text_0));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module_1,
ParseAndReturnVerifiedModule(text_1));
HloModuleGroup group(TestName());
group.push_back(std::move(module_0));
group.push_back(std::move(module_1));
EXPECT_EQ(group.modules().size(), 2);
EXPECT_THAT(
group.module(0).entry_computation()->instructions(),
::testing::ElementsAre(op::Parameter(), op::Parameter(), op::Add()));
EXPECT_THAT(group.module(1).entry_computation()->instructions(),
::testing::ElementsAre(op::Parameter()));
}
TEST_F(HloModuleGroupTest, ModuleGroupCompanionOrder) {
constexpr char text[] = R"(
HloModule module_%d
while_cond {
param = s32[] parameter(0)
ROOT p = pred[] constant(true)
}
while_body {
param = s32[] parameter(0)
token.s = token[] after-all()
token.r = token[] after-all()
send = (s32[], u32[], token[]) send(param, token.s), channel_id=%d
send-done = token[] send-done(send), channel_id=%d
recv = (s32[], u32[], token[]) recv(token.r), channel_id=%d
recv-done = (s32[], token[]) recv-done(recv), channel_id=%d
ROOT data = s32[] get-tuple-element(recv-done), index=0
}
ENTRY entry {
while_init = s32[] constant(1)
ROOT while = s32[] while(while_init), condition=while_cond, body=while_body
}
)";
const int64_t kTrialCount = 5;
const int64_t kDeviceCount = 10;
std::vector<int64_t> companion_order;
for (int64_t t = 0; t < kTrialCount; ++t) {
HloModuleGroup group(TestName());
for (int64_t i = 0; i < kDeviceCount; ++i) {
const int64_t send_channel = i;
const int64_t recv_channel = i == 0 ? kDeviceCount - 1 : i - 1;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(absl::StrFormat(
text, i, send_channel, send_channel,
recv_channel, recv_channel)));
group.push_back(std::move(module));
}
ASSERT_EQ(group.modules().size(), kDeviceCount);
TF_ASSERT_OK_AND_ASSIGN(auto metadata,
HloModuleGroupMetadata::Build(group.modules()));
ASSERT_EQ(metadata->companion_sets().size(), 1);
std::vector<int64_t> module_ids;
const auto& companion_sets = *metadata->companion_sets()[0];
module_ids.reserve(companion_sets.size());
for (HloInstruction* companion : companion_sets) {
module_ids.push_back(metadata->GetModuleId(companion->GetModule()));
}
if (t == 0) {
companion_order = module_ids;
} else {
EXPECT_TRUE(absl::c_equal(companion_order, module_ids));
}
}
}
TEST_F(HloModuleGroupTest, ReplaceModuleMetadata) {
auto old_module = CreateNewVerifiedModule();
int old_module_id = old_module->unique_id();
old_module->metadata()->RecordPassStart();
TF_EXPECT_OK(old_module->metadata()->set_current_pass_name("fake pass"));
HloModuleGroup group(std::move(old_module));
EXPECT_EQ(group.module(0).metadata()->proto().module_group_name(),
group.name());
auto new_module = CreateNewVerifiedModule();
group.ReplaceModule(0, std::move(new_module));
EXPECT_NE(group.module(0).unique_id(), old_module_id);
const HloModuleMetadataProto& module_metadata =
group.module(0).metadata()->proto();
EXPECT_EQ(module_metadata.canonical_module_id(), old_module_id);
const HloPassMetadata& pass_metadata =
*module_metadata.pass_metadata().rbegin();
EXPECT_THAT(pass_metadata,
Property(&HloPassMetadata::pass_name, StrEq("fake pass")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_module_group.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_module_group_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
deabf8d9-6a66-4e47-89da-b1da47e70165 | cpp | google/cel-cpp | legacy_type_reflector | common/values/legacy_type_reflector.h | common/legacy_type_reflector_test.cc | #ifndef THIRD_PARTY_CEL_CPP_COMMON_VALUES_LEGACY_TYPE_REFLECTOR_H_
#define THIRD_PARTY_CEL_CPP_COMMON_VALUES_LEGACY_TYPE_REFLECTOR_H_
#include "common/type_reflector.h"
#endif | #include <utility>
#include "absl/status/status.h"
#include "common/legacy_value.h"
#include "common/memory.h"
#include "common/type_reflector.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "common/values/legacy_value_manager.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::cel::common_internal::LegacyValueManager;
using ::cel::interop_internal::TestOnly_IsLegacyListBuilder;
using ::cel::interop_internal::TestOnly_IsLegacyMapBuilder;
class TypeReflectorLegacyTest
: public common_internal::ThreadCompatibleValueTest<> {};
TEST_P(TypeReflectorLegacyTest, NewListValueBuilderLegacyOptimized) {
LegacyValueManager manager(memory_manager(), TypeReflector::LegacyBuiltin());
ASSERT_OK_AND_ASSIGN(auto builder,
manager.NewListValueBuilder(manager.GetDynListType()));
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_TRUE(TestOnly_IsLegacyListBuilder(*builder));
break;
case MemoryManagement::kReferenceCounting:
EXPECT_FALSE(TestOnly_IsLegacyListBuilder(*builder));
break;
}
}
TEST_P(TypeReflectorLegacyTest, NewMapValueBuilderLegacyOptimized) {
LegacyValueManager manager(memory_manager(), TypeReflector::LegacyBuiltin());
ASSERT_OK_AND_ASSIGN(auto builder,
manager.NewMapValueBuilder(manager.GetDynDynMapType()));
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_TRUE(TestOnly_IsLegacyMapBuilder(*builder));
break;
case MemoryManagement::kReferenceCounting:
EXPECT_FALSE(TestOnly_IsLegacyMapBuilder(*builder));
break;
}
}
TEST_P(TypeReflectorLegacyTest, ListImplementationNext) {
LegacyValueManager manager(memory_manager(), TypeReflector::LegacyBuiltin());
ASSERT_OK_AND_ASSIGN(auto builder,
manager.NewListValueBuilder(manager.GetDynListType()));
EXPECT_OK(builder->Add(IntValue(1)));
EXPECT_OK(builder->Add(IntValue(2)));
EXPECT_OK(builder->Add(IntValue(3)));
EXPECT_EQ(builder->Size(), 3);
EXPECT_FALSE(builder->IsEmpty());
auto value = std::move(*builder).Build();
EXPECT_THAT(value.Size(), IsOkAndHolds(3));
ASSERT_OK_AND_ASSIGN(auto iterator, value.NewIterator(manager));
while (iterator->HasNext()) {
EXPECT_OK(iterator->Next(manager));
}
EXPECT_FALSE(iterator->HasNext());
EXPECT_THAT(iterator->Next(manager),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
INSTANTIATE_TEST_SUITE_P(Default, TypeReflectorLegacyTest,
testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting));
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/legacy_type_reflector.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/legacy_type_reflector_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
5f7e1e07-e422-4212-9406-7b918c7a1893 | cpp | tensorflow/tensorflow | simulator | third_party/xla/xla/service/memory_space_assignment/simulator.cc | third_party/xla/xla/service/memory_space_assignment/simulator_test.cc | #include "xla/service/memory_space_assignment/simulator.h"
#include <algorithm>
#include <cstdint>
#include <list>
#include <memory>
#include <optional>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/layout.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/shape_util.h"
#include "xla/util.h"
namespace xla {
namespace memory_space_assignment {
void RuntimeSimulator::InitializeAlternateMemoryMap(
const AllocationSequence& allocations) {
outputs_in_alternate_memory_map_.clear();
operands_in_alternate_memory_map_.clear();
for (auto& allocation : allocations) {
if (!allocation->is_copy_allocation()) {
if (allocation->memory_space() == MemorySpace::kAlternate) {
const HloInstruction* defining_instruction =
allocation->defining_position().instruction;
outputs_in_alternate_memory_map_[defining_instruction].push_back(
allocation->defining_position().index);
}
}
for (auto& hlo_use : allocation->uses()) {
const HloInstruction* use_instruction = hlo_use.instruction;
operands_in_alternate_memory_map_[use_instruction].push_back(
std::make_pair(hlo_use.operand_number, hlo_use.operand_index));
}
}
}
float RuntimeSimulator::SimulateElapsedTimeWithoutAsyncCopyLikes(
const HloLiveRange& hlo_live_range, const AllocationSequence& allocations) {
InitializeAlternateMemoryMap(allocations);
const auto& instruction_sequence =
hlo_live_range.flattened_instruction_sequence().instructions();
float total_elapsed = 0.0;
for (const HloInstruction* instruction : instruction_sequence) {
if (instruction->opcode() == HloOpcode::kWhile) {
continue;
}
absl::Span<const ShapeIndex> outputs_in_alternate_memory;
auto output_it = outputs_in_alternate_memory_map_.find(instruction);
if (output_it != outputs_in_alternate_memory_map_.end()) {
outputs_in_alternate_memory = absl::MakeSpan(output_it->second);
}
absl::Span<const std::pair<int64_t, ShapeIndex>>
operands_in_alternate_memory;
auto operand_it = operands_in_alternate_memory_map_.find(instruction);
if (operand_it != operands_in_alternate_memory_map_.end()) {
operands_in_alternate_memory = absl::MakeSpan(operand_it->second);
}
float instruction_elapsed_per_invoke =
cost_analysis_->GetInstructionElapsedInAlternateMemory(
*instruction, operands_in_alternate_memory,
outputs_in_alternate_memory);
float total_trip_count = cost_analysis_->CalculateNestTripCount(
instruction, &cost_analysis_cache_);
total_elapsed += total_trip_count * instruction_elapsed_per_invoke;
}
return total_elapsed;
}
bool IsAsyncCopyLikeStart(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kCopyStart ||
(instruction->opcode() == HloOpcode::kAsyncStart &&
instruction->async_wrapped_instruction()->opcode() ==
HloOpcode::kSlice);
}
bool IsAsyncCopyLikeDone(const HloInstruction* instruction) {
return (instruction->opcode() == HloOpcode::kCopyDone ||
(instruction->opcode() == HloOpcode::kAsyncDone &&
instruction->async_wrapped_instruction()->opcode() ==
HloOpcode::kSlice));
}
MemoryTransferDirection GetAsyncCopyLikeDirection(
const HloInstruction* async_copy_like_start,
int64_t alternate_memory_space) {
CHECK(IsAsyncCopyLikeStart(async_copy_like_start));
int64_t operand_memory_space =
async_copy_like_start->operand(0)->shape().layout().memory_space();
std::optional<int64_t> output_memory_space;
for (const HloInstruction* user : async_copy_like_start->users()) {
if (user->opcode() == HloOpcode::kCopyDone ||
user->opcode() == HloOpcode::kAsyncDone) {
output_memory_space.emplace(user->shape().layout().memory_space());
break;
}
}
if (!output_memory_space.has_value()) {
return MemoryTransferDirection::kUnsupported;
}
if (operand_memory_space == xla::Layout::kDefaultMemorySpace &&
output_memory_space == alternate_memory_space) {
return MemoryTransferDirection::kDefaultToAlternate;
}
if (operand_memory_space == alternate_memory_space &&
output_memory_space == xla::Layout::kDefaultMemorySpace) {
return MemoryTransferDirection::kAlternateToDefault;
}
return MemoryTransferDirection::kUnsupported;
}
const std::list<OutstandingAsyncCopyLike>&
RuntimeSimulator::GetOutstandingReadDefaultQueue() const {
return outstanding_read_default_queue_;
}
const std::list<OutstandingAsyncCopyLike>&
RuntimeSimulator::GetOutstandingWriteDefaultQueue() const {
return outstanding_write_default_queue_;
}
const HloInstruction* RuntimeSimulator::RemoveBytesFromQueueIfNotEmpty(
std::list<OutstandingAsyncCopyLike>& async_copy_like_queue,
float processed_bytes) {
if (async_copy_like_queue.empty()) return nullptr;
CHECK_GE(async_copy_like_queue.front().remaining_bytes_to_transfer,
processed_bytes);
async_copy_like_queue.front().remaining_bytes_to_transfer -= processed_bytes;
if (async_copy_like_queue.front().remaining_bytes_to_transfer == 0.0) {
const HloInstruction* retired_instruction =
async_copy_like_queue.front().copy_like_start_inst;
async_copy_like_queue.pop_front();
return retired_instruction;
}
return nullptr;
}
float RuntimeSimulator::SimulateAsyncCopyLikeDone(
const HloInstruction* copy_like_done_instruction) {
const HloInstruction* copy_like_start_instruction =
copy_like_done_instruction->operand(0);
MemoryTransferDirection direction = GetAsyncCopyLikeDirection(
copy_like_start_instruction, alternate_memory_space_);
if (direction == MemoryTransferDirection::kUnsupported) {
LOG(WARNING) << "Unsupported memory transfer direction for copy-done: "
<< copy_like_done_instruction->ToString();
return 0.0;
}
std::list<OutstandingAsyncCopyLike>& same_direction_queue =
direction == MemoryTransferDirection::kDefaultToAlternate
? outstanding_read_default_queue_
: outstanding_write_default_queue_;
std::list<OutstandingAsyncCopyLike>& opposite_direction_queue =
direction == MemoryTransferDirection::kDefaultToAlternate
? outstanding_write_default_queue_
: outstanding_read_default_queue_;
if (absl::c_find_if(same_direction_queue,
[&](const OutstandingAsyncCopyLike& async_copy_like) {
return async_copy_like.copy_like_start_inst ==
copy_like_start_instruction;
}) == same_direction_queue.end()) {
return 0.0;
}
float elapsed_time = 0.0;
const HloInstruction* retired_instruction_in_same_direction_queue = nullptr;
do {
float bytes_to_process =
same_direction_queue.front().remaining_bytes_to_transfer;
float available_bandwidth = cost_analysis_->base_costs().BytesPerSecond();
if (!opposite_direction_queue.empty()) {
available_bandwidth *= 0.5;
bytes_to_process = std::min(
bytes_to_process,
opposite_direction_queue.front().remaining_bytes_to_transfer);
}
elapsed_time += bytes_to_process / available_bandwidth;
RemoveBytesFromQueueIfNotEmpty(opposite_direction_queue, bytes_to_process);
retired_instruction_in_same_direction_queue =
RemoveBytesFromQueueIfNotEmpty(same_direction_queue, bytes_to_process);
} while (retired_instruction_in_same_direction_queue !=
copy_like_start_instruction);
return elapsed_time;
};
float RuntimeSimulator::SimulateComputeInstruction(
const HloInstruction* instruction,
absl::Span<const std::pair<int64_t, ShapeIndex>>
operands_in_alternate_memory,
absl::Span<const ShapeIndex> outputs_in_alternate_memory) {
float default_memory_idle_time =
cost_analysis_->GetDefaultMemoryBandwidthIdleTime(
*instruction, operands_in_alternate_memory,
outputs_in_alternate_memory);
ProcessAsyncCopyLikesInIdleTime(default_memory_idle_time);
float inst_elapsed = cost_analysis_->GetInstructionElapsedInAlternateMemory(
*instruction, operands_in_alternate_memory, outputs_in_alternate_memory);
return inst_elapsed;
}
void RuntimeSimulator::ProcessAsyncCopyLikesInIdleTime(float time) {
if (time <= 0.0) {
return;
}
float remaining_simulation_time = time;
while ((!outstanding_read_default_queue_.empty() ||
!outstanding_write_default_queue_.empty()) &&
remaining_simulation_time > 0.0) {
float available_bandwidth = cost_analysis_->base_costs().BytesPerSecond();
if (!outstanding_read_default_queue_.empty() &&
!outstanding_write_default_queue_.empty()) {
available_bandwidth *= 0.5;
}
float bytes_to_process = available_bandwidth * remaining_simulation_time;
if (!outstanding_read_default_queue_.empty()) {
bytes_to_process = std::min(
bytes_to_process,
outstanding_read_default_queue_.front().remaining_bytes_to_transfer);
}
if (!outstanding_write_default_queue_.empty()) {
bytes_to_process = std::min(
bytes_to_process,
outstanding_write_default_queue_.front().remaining_bytes_to_transfer);
}
float real_elapsed_time = bytes_to_process / available_bandwidth;
remaining_simulation_time -= real_elapsed_time;
RemoveBytesFromQueueIfNotEmpty(outstanding_read_default_queue_,
bytes_to_process);
RemoveBytesFromQueueIfNotEmpty(outstanding_write_default_queue_,
bytes_to_process);
}
}
float RuntimeSimulator::SimulateElapsedTime(
const HloModule* hlo_module, const AllocationSequence& allocations) {
InitializeAlternateMemoryMap(allocations);
std::unique_ptr<xla::HloAliasAnalysis> alias_analysis =
HloAliasAnalysis::Run(hlo_module).value();
std::unique_ptr<HloLiveRange> hlo_live_range =
HloLiveRange::Run(hlo_module->schedule(), *alias_analysis,
hlo_module->entry_computation())
.value();
CHECK_GT(cost_analysis_->base_costs().BytesPerSecond(), 0.0);
float total_elapsed = 0.0;
const auto& instruction_sequence =
hlo_live_range->flattened_instruction_sequence().instructions();
for (const HloInstruction* instruction : instruction_sequence) {
float inst_elapsed = 0.0;
if (instruction->opcode() == HloOpcode::kWhile) {
continue;
}
if (instruction->parent()->IsAsyncComputation()) {
continue;
}
if (IsAsyncCopyLikeStart(instruction)) {
MemoryTransferDirection direction =
GetAsyncCopyLikeDirection(instruction, alternate_memory_space_);
const Shape& transfer_shape =
(instruction->opcode() == HloOpcode::kCopyStart)
? instruction->operand(0)->shape()
: ShapeUtil::GetSubshape(instruction->shape(),
{1});
float transfer_bytes = static_cast<float>(
cost_analysis_->base_costs().GetShapeSize(transfer_shape));
if (direction == MemoryTransferDirection::kDefaultToAlternate) {
outstanding_read_default_queue_.push_back(
OutstandingAsyncCopyLike{instruction, transfer_bytes});
} else if (direction == MemoryTransferDirection::kAlternateToDefault) {
outstanding_write_default_queue_.push_back(
OutstandingAsyncCopyLike{instruction, transfer_bytes});
} else {
}
} else if (IsAsyncCopyLikeDone(instruction)) {
inst_elapsed = SimulateAsyncCopyLikeDone(instruction);
} else {
absl::Span<const ShapeIndex> outputs_in_alternate_memory;
auto output_it = outputs_in_alternate_memory_map_.find(instruction);
if (output_it != outputs_in_alternate_memory_map_.end()) {
outputs_in_alternate_memory = absl::MakeSpan(output_it->second);
}
absl::Span<const std::pair<int64_t, ShapeIndex>>
operands_in_alternate_memory;
auto operand_it = operands_in_alternate_memory_map_.find(instruction);
if (operand_it != operands_in_alternate_memory_map_.end())
operands_in_alternate_memory = absl::MakeSpan(operand_it->second);
inst_elapsed =
SimulateComputeInstruction(instruction, operands_in_alternate_memory,
outputs_in_alternate_memory);
}
if (inst_elapsed > 0.0) {
float total_trip_count = cost_analysis_->CalculateNestTripCount(
instruction, &cost_analysis_cache_);
total_elapsed += inst_elapsed * total_trip_count;
}
}
return total_elapsed;
}
}
} | #include "xla/service/memory_space_assignment/simulator.h"
#include <cstdint>
#include <list>
#include <memory>
#include <string_view>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using memory_space_assignment::CostAnalysis;
using memory_space_assignment::CostAnalysisOptions;
using memory_space_assignment::RuntimeSimulator;
using ::testing::ElementsAreArray;
using ::testing::IsEmpty;
constexpr int64_t kPointerSize = 8;
constexpr int64_t kAlternateMemorySpace = 1;
int64_t ShapeSize(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
class MemorySpaceAssignmentSimulatorTest : public HloTestBase {
protected:
absl::Status Initialize(absl::string_view hlo_string) {
TF_ASSIGN_OR_RETURN(module_, ParseAndReturnVerifiedModule(hlo_string));
for (HloInstruction* inst : module_->entry_computation()->instructions()) {
instruction_map_[inst->name()] = inst;
if (inst->shape().has_layout() &&
inst->shape().layout().memory_space() == kAlternateMemorySpace) {
std::unique_ptr<xla::memory_space_assignment::Allocation> allocation =
std::make_unique<memory_space_assignment::PinnedAllocation>(
HloPosition{inst, {}},
memory_space_assignment::MemorySpace::kAlternate,
HeapSimulator::Chunk::FromOffsetSize(-1, -1),
0,
1, false);
for (HloInstruction* user : inst->users()) {
allocation->AddUse(HloUse{user, 0});
}
allocations_.push_back(std::move(allocation));
}
}
HloCostAnalysis::Options tpu_device_options;
tpu_device_options.shape_size = ShapeSize;
tpu_device_options.set_flops_per_second(1);
tpu_device_options.set_bytes_per_second(1);
hlo_cost_analysis_ = std::make_unique<HloCostAnalysis>(tpu_device_options);
TF_RETURN_IF_ERROR(
module_->entry_computation()->Accept(hlo_cost_analysis_.get()));
hlo_cost_analysis_costs_ =
std::make_unique<memory_space_assignment::HloCostAnalysisCosts>(
*hlo_cost_analysis_);
CostAnalysisOptions _options;
_options.alternate_mem_bandwidth_bytes_per_second = 2;
TF_ASSIGN_OR_RETURN(
cost_analysis_,
CostAnalysis::Create(*hlo_cost_analysis_costs_, _options, *module_));
TF_ASSIGN_OR_RETURN(alias_analysis_, HloAliasAnalysis::Run(module_.get()));
TF_ASSIGN_OR_RETURN(hlo_live_range_,
HloLiveRange::Run(module_->schedule(), *alias_analysis_,
module_->entry_computation()));
runtime_simulator_ = std::make_unique<RuntimeSimulator>(
cost_analysis_.get(), kAlternateMemorySpace);
return absl::OkStatus();
}
absl::flat_hash_map<std::string_view, const HloInstruction*> instruction_map_;
std::unique_ptr<HloCostAnalysis> hlo_cost_analysis_;
std::unique_ptr<memory_space_assignment::HloCostAnalysisCosts>
hlo_cost_analysis_costs_;
std::unique_ptr<CostAnalysis> cost_analysis_;
std::unique_ptr<HloAliasAnalysis> alias_analysis_;
std::unique_ptr<HloLiveRange> hlo_live_range_;
memory_space_assignment::AllocationSequence allocations_;
std::unique_ptr<RuntimeSimulator> runtime_simulator_;
std::unique_ptr<HloModule> module_;
};
TEST_F(MemorySpaceAssignmentSimulatorTest, SingleLayerLoop) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
%body {
%constant.1 = s32[] constant(1)
%param = (s32[]) parameter(0)
%count = s32[] get-tuple-element(%param), index=0
%increment = s32[] add(s32[] %count, s32[] %constant.1)
ROOT %loop_result = (s32[]) tuple(%increment)
}
%condition {
%param = (s32[]) parameter(0)
%constant.42 = s32[] constant(42)
%condition_input = s32[] get-tuple-element(%param), index=0
ROOT %greater = pred[] compare(s32[] %constant.42, s32[] %condition_input), direction=GT
}
ENTRY Entry {
%dummy_input = s32[] parameter(0)
%constant.0 = s32[] constant(0)
ROOT %while = (s32[]) while(tuple(%constant.0)), condition=%condition, body=%body
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
EXPECT_EQ(runtime_simulator_->SimulateElapsedTimeWithoutAsyncCopyLikes(
*hlo_live_range_, allocations_),
1226);
EXPECT_EQ(
runtime_simulator_->SimulateElapsedTime(module_.get(), allocations_),
1226);
}
TEST_F(MemorySpaceAssignmentSimulatorTest, NestedLayerLoop) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
%inner.body {
%constant.1 = s32[] constant(1)
%param = (s32[]) parameter(0)
%count = s32[] get-tuple-element(%param), index=0
%increment = s32[] add(s32[] %count, s32[] %constant.1)
ROOT %loop_result = (s32[]) tuple(%increment)
}
%inner.condition {
%param = (s32[]) parameter(0)
%constant.42 = s32[] constant(42)
%condition_input = s32[] get-tuple-element(%param), index=0
ROOT %greater = pred[] compare(s32[] %constant.42, s32[] %condition_input), direction=GT
}
%outer.body {
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(1)
%param = (s32[]) parameter(0)
%inner_while = (s32[]) while(tuple(%constant.0)), condition=%inner.condition, body=%inner.body
%count = s32[] get-tuple-element(%param), index=0
%increment = s32[] add(s32[] %count, s32[] %constant.1)
ROOT %loop_result = (s32[]) tuple(%increment)
}
%outer.condition {
%param = (s32[]) parameter(0)
%constant.27 = s32[] constant(27)
%condition_input = s32[] get-tuple-element(%param), index=0
ROOT %greater = pred[] compare(s32[] %constant.27, s32[] %condition_input), direction=GT
}
ENTRY Entry {
%constant.0 = s32[] constant(0)
ROOT %while_outer = (s32[]) while(tuple(%constant.0)), condition=%outer.condition, body=%outer.body
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
EXPECT_EQ(runtime_simulator_->SimulateElapsedTimeWithoutAsyncCopyLikes(
*hlo_live_range_, allocations_),
33893);
EXPECT_EQ(
runtime_simulator_->SimulateElapsedTime(module_.get(), allocations_),
33893);
}
TEST_F(MemorySpaceAssignmentSimulatorTest, SingleAsyncCopyOverhead) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[1,1,1024,2048] parameter(0)
copy-start.1 = (f32[1,1,1024,2048]{0,1,2,3:S(1)}, f32[1,1,1024,2048], u32[]) copy-start(param_0)
ROOT copy-done.1 = f32[1,1,1024,2048]{0,1,2,3:S(1)} copy-done(copy-start.1)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
memory_space_assignment::AllocationSequence allocations;
EXPECT_EQ(runtime_simulator_->SimulateElapsedTimeWithoutAsyncCopyLikes(
*hlo_live_range_, allocations_),
0);
EXPECT_EQ(
runtime_simulator_->SimulateElapsedTime(module_.get(), allocations_),
8388608);
}
TEST_F(MemorySpaceAssignmentSimulatorTest, AsyncCopyWithComputationOverhead) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[8] parameter(0)
param_1 = f32[2] parameter(1)
copy-start.1 = (f32[8]{0:S(1)}, f32[8], u32[]) copy-start(param_0)
neg_compute = f32[2] negate(param_1)
ROOT copy-done.1 = f32[8]{0:S(1)} copy-done(copy-start.1)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
EXPECT_EQ(
runtime_simulator_->SimulateElapsedTime(module_.get(), allocations_), 48);
}
TEST_F(MemorySpaceAssignmentSimulatorTest, SingleAsyncSliceCopyOverhead) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[3072,2048] parameter(0)
slice-start = ((f32[3072,2048]), f32[768,2048]{1,0:S(1)}, s32[]) slice-start(f32[3072,2048] param_0), slice={[1536:2304], [0:2048]}
ROOT slice-done = f32[768,2048]{1,0:T(8,128)S(1)} slice-done(((f32[3072,2048]), f32[768,2048]{1,0:S(1)}, s32[]) slice-start)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
memory_space_assignment::AllocationSequence allocations;
float expected_elapsed_time = 6291456;
EXPECT_EQ(
runtime_simulator_->SimulateElapsedTime(module_.get(), allocations_),
expected_elapsed_time);
}
TEST_F(MemorySpaceAssignmentSimulatorTest,
AsyncCopyAndAsyncSliceAndComputeOverhead) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[2048] parameter(0)
param_1 = f32[64] parameter(1)
param_2 = f32[128] parameter(2)
slice-start = ((f32[2048]), f32[64]{0:S(1)}, s32[]) slice-start(f32[2048] param_0), slice={[0:64]}
copy-start = (f32[64]{0:S(1)}, f32[64], u32[]) copy-start(f32[64] param_1)
slice-done = f32[64]{0:S(1)} slice-done(((f32[2048]), f32[64]{0:S(1)}, s32[]) slice-start)
copy-done = f32[64]{0:S(1)} copy-done(copy-start)
copy-start-overlap = (f32[128]{0:S(1)}, f32[128], u32[]) copy-start(f32[128] param_2)
add = f32[64]{0:S(1)} add(slice-done, copy-done)
ROOT copy-done-overlap = f32[128]{0:S(1)} copy-done(copy-start-overlap)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
EXPECT_EQ(
runtime_simulator_->SimulateElapsedTime(module_.get(), allocations_),
1024);
}
class SimulateAsyncCopyLikeDoneTest
: public MemorySpaceAssignmentSimulatorTest {
protected:
absl::Status Initialize(absl::string_view hlo_string) {
TF_RETURN_IF_ERROR(
MemorySpaceAssignmentSimulatorTest::Initialize(hlo_string));
if (instruction_map_.contains("copy-start.1")) {
outstanding_read_default_queue_.push_back(
memory_space_assignment::OutstandingAsyncCopyLike{
instruction_map_["copy-start.1"], 512});
}
if (instruction_map_.contains("copy-start.2")) {
outstanding_write_default_queue_.push_back(
memory_space_assignment::OutstandingAsyncCopyLike{
instruction_map_["copy-start.2"], 128});
}
runtime_simulator_ = std::make_unique<RuntimeSimulator>(
cost_analysis_.get(), kAlternateMemorySpace,
outstanding_read_default_queue_, outstanding_write_default_queue_);
return absl::OkStatus();
}
std::list<memory_space_assignment::OutstandingAsyncCopyLike>
outstanding_read_default_queue_;
std::list<memory_space_assignment::OutstandingAsyncCopyLike>
outstanding_write_default_queue_;
};
TEST_F(SimulateAsyncCopyLikeDoneTest, AsyncCopyAlreadyCompleted) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[128] parameter(0)
copy-start.1 = (f32[128]{0:S(1)}, f32[128], u32[]) copy-start(param_0)
ROOT copy-done.1 = f32[128]{0:S(1)} copy-done(copy-start.1)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
const HloInstruction* copy_done_inst = instruction_map_["copy-done.1"];
runtime_simulator_->SimulateAsyncCopyLikeDone(copy_done_inst);
EXPECT_THAT(runtime_simulator_->GetOutstandingReadDefaultQueue(), IsEmpty());
EXPECT_THAT(runtime_simulator_->GetOutstandingWriteDefaultQueue(), IsEmpty());
float elapsed_time_for_completed_copy =
runtime_simulator_->SimulateAsyncCopyLikeDone(copy_done_inst);
EXPECT_EQ(elapsed_time_for_completed_copy, 0);
EXPECT_THAT(runtime_simulator_->GetOutstandingReadDefaultQueue(), IsEmpty());
EXPECT_THAT(runtime_simulator_->GetOutstandingWriteDefaultQueue(), IsEmpty());
}
TEST_F(SimulateAsyncCopyLikeDoneTest, AsyncCopyFullBandwidth) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[128] parameter(0)
copy-start.1 = (f32[128]{0:S(1)}, f32[128], u32[]) copy-start(param_0)
ROOT copy-done.1 = f32[128]{0:S(1)} copy-done(copy-start.1)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
const HloInstruction* copy_done_inst = instruction_map_["copy-done.1"];
float copy_done_elapsed_time =
runtime_simulator_->SimulateAsyncCopyLikeDone(copy_done_inst);
EXPECT_EQ(copy_done_elapsed_time, 512);
EXPECT_THAT(runtime_simulator_->GetOutstandingReadDefaultQueue(), IsEmpty());
EXPECT_THAT(runtime_simulator_->GetOutstandingWriteDefaultQueue(), IsEmpty());
}
TEST_F(SimulateAsyncCopyLikeDoneTest, AsyncCopySharedBandwidth) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[128] parameter(0)
param_1 = f32[32]{0:S(1)} parameter(1)
copy-start.1 = (f32[128]{0:S(1)}, f32[128], u32[]) copy-start(param_0)
copy-start.2 = (f32[32], f32[32]{0:S(1)}, u32[]) copy-start(param_1)
copy-done.2 = f32[32] copy-done(copy-start.2)
ROOT copy-done.1 = f32[128]{0:S(1)} copy-done(copy-start.1)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
const HloInstruction* copy_start_1_inst = instruction_map_["copy-start.1"];
const HloInstruction* copy_done_2_inst = instruction_map_["copy-done.2"];
float copy_done_2_elapsed_time =
runtime_simulator_->SimulateAsyncCopyLikeDone(copy_done_2_inst);
EXPECT_EQ(copy_done_2_elapsed_time, 256);
EXPECT_THAT(runtime_simulator_->GetOutstandingWriteDefaultQueue(), IsEmpty());
EXPECT_THAT(
runtime_simulator_->GetOutstandingReadDefaultQueue(),
ElementsAreArray({memory_space_assignment::OutstandingAsyncCopyLike{
copy_start_1_inst, 384}}));
}
TEST_F(SimulateAsyncCopyLikeDoneTest, AsyncCopyTransferPartialProcess) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[128] parameter(0)
param_1 = f32[32]{0:S(1)} parameter(1)
copy-start.1 = (f32[128]{0:S(1)}, f32[128], u32[]) copy-start(param_0)
copy-start.2 = (f32[32], f32[32]{0:S(1)}, u32[]) copy-start(param_1)
copy-done.2 = f32[32] copy-done(copy-start.2)
ROOT copy-done.1 = f32[128]{0:S(1)} copy-done(copy-start.1)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
const HloInstruction* copy_start_1_inst = instruction_map_["copy-start.1"];
const HloInstruction* copy_done_1_inst = instruction_map_["copy-done.1"];
const HloInstruction* copy_done_2_inst = instruction_map_["copy-done.2"];
float copy_done_2_elapsed_time =
runtime_simulator_->SimulateAsyncCopyLikeDone(copy_done_2_inst);
EXPECT_EQ(copy_done_2_elapsed_time, 256);
EXPECT_THAT(
runtime_simulator_->GetOutstandingReadDefaultQueue(),
ElementsAreArray({memory_space_assignment::OutstandingAsyncCopyLike{
copy_start_1_inst, 384}}));
EXPECT_THAT(runtime_simulator_->GetOutstandingWriteDefaultQueue(), IsEmpty());
float copy_done_1_elapsed_time =
runtime_simulator_->SimulateAsyncCopyLikeDone(copy_done_1_inst);
EXPECT_EQ(copy_done_1_elapsed_time, 384);
EXPECT_THAT(runtime_simulator_->GetOutstandingReadDefaultQueue(), IsEmpty());
EXPECT_THAT(runtime_simulator_->GetOutstandingWriteDefaultQueue(), IsEmpty());
}
TEST_F(SimulateAsyncCopyLikeDoneTest,
SimulateComputeInstructionWithSingleAsyncCopy) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[128] parameter(0)
param_1 = f32[32] parameter(1)
copy-start.1 = (f32[128]{0:S(1)}, f32[128], u32[]) copy-start(param_0)
neg = f32[32] negate(param_1)
ROOT copy-done.1 = f32[128]{0:S(1)} copy-done(copy-start.1)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
const HloInstruction* copy_start_1_inst = instruction_map_["copy-start.1"];
const HloInstruction* neg_inst = instruction_map_["neg"];
float compute_elapsed_time = runtime_simulator_->SimulateComputeInstruction(
neg_inst, {},
{});
EXPECT_EQ(compute_elapsed_time, 256);
EXPECT_THAT(
runtime_simulator_->GetOutstandingReadDefaultQueue(),
ElementsAreArray({memory_space_assignment::OutstandingAsyncCopyLike{
copy_start_1_inst, 512}}));
EXPECT_THAT(runtime_simulator_->GetOutstandingWriteDefaultQueue(), IsEmpty());
}
TEST_F(SimulateAsyncCopyLikeDoneTest,
SimulateComputeInstructionWithSharedBandwidth) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[128] parameter(0)
param_1 = f32[32]{0:S(1)} parameter(1)
copy-start.1 = (f32[128]{0:S(1)}, f32[128], u32[]) copy-start(param_0)
copy-start.2 = (f32[32], f32[32]{0:S(1)}, u32[]) copy-start(param_1)
neg = f32[32] negate(param_1)
copy-done.2 = f32[32] copy-done(copy-start.2)
ROOT copy-done.1 = f32[128]{0:S(1)} copy-done(copy-start.1)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
const HloInstruction* copy_start_1_inst = instruction_map_["copy-start.1"];
const HloInstruction* copy_start_2_inst = instruction_map_["copy-start.2"];
float compute_elapsed_time = runtime_simulator_->SimulateComputeInstruction(
instruction_map_["neg"], {{0, {}}},
{});
EXPECT_EQ(compute_elapsed_time, 192);
EXPECT_THAT(
runtime_simulator_->GetOutstandingReadDefaultQueue(),
ElementsAreArray({memory_space_assignment::OutstandingAsyncCopyLike{
copy_start_1_inst, 480}}));
EXPECT_THAT(
runtime_simulator_->GetOutstandingWriteDefaultQueue(),
ElementsAreArray({memory_space_assignment::OutstandingAsyncCopyLike{
copy_start_2_inst, 96}}));
}
TEST_F(SimulateAsyncCopyLikeDoneTest,
SimulateComputeInstructionWithFullBandwidth) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[128] parameter(0)
param_1 = f32[32]{0:S(1)} parameter(1)
copy-start.1 = (f32[128]{0:S(1)}, f32[128], u32[]) copy-start(param_0)
neg = f32[32] negate(param_1)
ROOT copy-done.1 = f32[128]{0:S(1)} copy-done(copy-start.1)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
const HloInstruction* copy_start_1_inst = instruction_map_["copy-start.1"];
float compute_elapsed_time = runtime_simulator_->SimulateComputeInstruction(
instruction_map_["neg"], {{0, {}}},
{});
EXPECT_EQ(compute_elapsed_time, 192);
EXPECT_THAT(
runtime_simulator_->GetOutstandingReadDefaultQueue(),
ElementsAreArray({memory_space_assignment::OutstandingAsyncCopyLike{
copy_start_1_inst, 448}}));
EXPECT_THAT(runtime_simulator_->GetOutstandingWriteDefaultQueue(), IsEmpty());
}
TEST_F(SimulateAsyncCopyLikeDoneTest,
SimulateComputeInstructionWithEmptyQueues) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[128] parameter(0)
ROOT neg = f32[128] negate(param_0)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
float compute_elapsed_time = runtime_simulator_->SimulateComputeInstruction(
instruction_map_["neg"], {},
{});
EXPECT_EQ(compute_elapsed_time, 1024);
EXPECT_THAT(runtime_simulator_->GetOutstandingReadDefaultQueue(), IsEmpty());
EXPECT_THAT(runtime_simulator_->GetOutstandingWriteDefaultQueue(), IsEmpty());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/simulator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/simulator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3f4b2a34-7de6-42d3-a0cc-6171f8717ed0 | cpp | tensorflow/tensorflow | reduce_scatter_creator | third_party/xla/xla/service/gpu/transforms/reduce_scatter_creator.cc | third_party/xla/xla/service/gpu/transforms/reduce_scatter_creator_test.cc | #include "xla/service/gpu/transforms/reduce_scatter_creator.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/collective_opt_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/status_macros.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> ReduceScatterCreator::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
const HloModuleConfig &config = module->config();
int64_t next_channel_id = hlo_query::NextChannelId(*module);
bool changed = false;
for (HloComputation *computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction *instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->opcode() != HloOpcode::kAllReduce) {
continue;
}
auto *ar = Cast<HloAllReduceInstruction>(instruction);
auto ar_spec = MatchReduceScatter(ar, config.num_partitions(),
config.replica_count(),
false,
true);
if (!ar_spec) {
VLOG(2) << "Cannot match reduce-scatter " << ar->ToString();
continue;
}
HloInstruction *ds = ar_spec->dynamic_slice;
const int64_t split_dim = ar_spec->split_dim;
Shape scatter_shape = ar->shape();
const int64_t split_dim_size = scatter_shape.dimensions(split_dim);
HloInstruction *rs_input = ar->mutable_operand(0);
const int64_t scatter_dim_size = split_dim_size / ar_spec->group_size;
TF_RET_CHECK(scatter_dim_size * ar_spec->group_size <= split_dim_size);
if (split_dim_size % ar_spec->group_size != 0) {
scatter_shape.set_dimensions(split_dim,
scatter_dim_size * ar_spec->group_size);
rs_input = computation->AddInstruction(HloInstruction::CreateSlice(
scatter_shape, rs_input,
std::vector<int64_t>(scatter_shape.rank(), 0),
scatter_shape.dimensions(),
std::vector<int64_t>(scatter_shape.rank(), 1)));
}
scatter_shape.set_dimensions(split_dim, scatter_dim_size);
std::optional<int64_t> channel_id;
if (ar->channel_id()) {
channel_id = next_channel_id++;
}
HloInstruction *ars =
computation->AddInstruction(HloInstruction::CreateReduceScatter(
scatter_shape, {rs_input}, ar->to_apply(), ar->device_list(),
ar->constrain_layout(), channel_id, ar->use_global_device_ids(),
ar_spec->split_dim));
HloInstruction *result = ars;
HloInstruction *reshape = nullptr;
if (ds->operand(0) != ar) {
reshape = ds->mutable_operand(0);
result = computation->AddInstruction(
HloInstruction::CreateReshape(ds->shape(), result));
}
TF_RETURN_IF_ERROR(ds->ReplaceAllUsesWith(result));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(ds));
if (reshape) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(reshape));
}
TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(ar));
changed = true;
}
}
return changed;
}
}
} | #include "xla/service/gpu/transforms/reduce_scatter_creator.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class GpuReduceScatterCreatorTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, int64_t num_replicas,
int64_t num_partitions, bool expect_change) {
HloModuleConfig config = GetModuleConfigForTest(
num_replicas,
num_partitions);
config.set_use_spmd_partitioning(num_partitions > 1);
TF_ASSIGN_OR_RETURN(auto module,
ParseAndReturnVerifiedModule(hlo_module, config));
auto changed = ReduceScatterCreator().Run(module.get());
if (!changed.ok()) {
return changed.status();
}
EXPECT_EQ(changed.value(), expect_change);
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
size_t AllReduceCount(std::unique_ptr<HloModule> &module) {
return CollectiveCount(module, HloOpcode::kAllReduce);
}
size_t ReduceScatterCount(std::unique_ptr<HloModule> &module) {
return CollectiveCount(module, HloOpcode::kAllReduce);
}
private:
size_t CollectiveCount(std::unique_ptr<HloModule> &module, HloOpcode opcode) {
return absl::c_count_if(
module->entry_computation()->instructions(),
[&opcode](HloInstruction *instr) { return instr->opcode() == opcode; });
}
};
TEST_F(GpuReduceScatterCreatorTest, AllReplicas) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={}, to_apply=%sum
%table = s32[8]{0} constant({0,1,2,3,4,5,6,7})
%rid = u32[] replica-id()
%id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%id)
%slice_size = s32[] constant(4)
%offset = s32[] multiply(%reshape, %slice_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %offset, %zero, %zero),
dynamic_slice_sizes={4,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
const auto *rs = Cast<HloReduceScatterInstruction>(
module->entry_computation()->root_instruction());
EXPECT_EQ(rs->scatter_dimension(), 0) << rs->ToString();
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllReplicasWithOffsetReshape) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={}, to_apply=%sum
%table = s32[8]{0} constant({0,1,2,3,4,5,6,7})
%rid = u32[] replica-id()
%id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1}
%slice_size = s32[1] constant({4})
%offset = s32[1] multiply(%id, %slice_size)
%reshape = s32[] reshape(%offset)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %reshape, %zero, %zero),
dynamic_slice_sizes={4,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
const auto *rs = Cast<HloReduceScatterInstruction>(
module->entry_computation()->root_instruction());
EXPECT_EQ(rs->scatter_dimension(), 0) << rs->ToString();
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllReplicasWithReshape) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={}, to_apply=%sum
%table = s32[8]{0} constant({0,1,2,3,4,5,6,7})
%rid = u32[] replica-id()
%id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%id)
%slice_size = s32[] constant(4)
%offset = s32[] multiply(%reshape, %slice_size)
%zero = s32[] constant(0)
%reshape.1 = f32[32,16,64] reshape(%all-reduce)
ROOT %dynamic-slice = f32[4,16,64] dynamic-slice(%reshape.1, %offset, %zero, %zero),
dynamic_slice_sizes={4,16,64}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(m::ReduceScatter(m::Parameter(0)))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllReplicasWithReshapeSplitDimModified) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[336,1024] parameter(0)
%all-reduce = f32[336,1024] all-reduce(%param), replica_groups={}, to_apply=%sum
%rid = u32[] replica-id()
%id = s32[] convert(%rid)
%slice_size = s32[] constant(128)
%offset = s32[] multiply(%id, %slice_size)
%zero = s32[] constant(0)
%reshape.1 = f32[4,84,1024] reshape(%all-reduce)
ROOT %dynamic-slice = f32[4,84,128] dynamic-slice(%reshape.1, %zero, %zero, %offset),
dynamic_slice_sizes={4,84,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(m::ReduceScatter(m::Parameter(0)))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllReplicasDim2) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={}, to_apply=%sum
%table = s32[8]{0} constant({0,1,2,3,4,5,6,7})
%rid = u32[] replica-id()
%rid_s32 = s32[] convert(%rid)
%slice_size = s32[] constant(16)
%offset = s32[] multiply(%rid_s32, %slice_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[32,8,16] dynamic-slice(%all-reduce, %zero, %zero, %offset),
dynamic_slice_sizes={32,8,16}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
const auto *rs = Cast<HloReduceScatterInstruction>(
module->entry_computation()->root_instruction());
EXPECT_EQ(rs->scatter_dimension(), 2) << rs->ToString();
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllReplicasWrongOffsets) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={}, to_apply=%sum
%table = s32[8]{0} constant({0,1,2,3,4,5,6,8})
%rid = u32[] replica-id()
%id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%id)
%slice_size = s32[] constant(4)
%offset = s32[] multiply(%reshape, %slice_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %offset, %zero, %zero),
dynamic_slice_sizes={4,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
false));
}
TEST_F(GpuReduceScatterCreatorTest, AllReplicasIotaTable) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={}, to_apply=%sum
%table = s32[8]{0} iota(), iota_dimension=0
%rid = u32[] replica-id()
%id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%id)
%slice_size = s32[] constant(4)
%offset = s32[] multiply(%reshape, %slice_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %offset, %zero, %zero),
dynamic_slice_sizes={4,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
2,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, SubgroupedReplicas) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={{1,3,2,0},{4,5,6,7}}, to_apply=%sum
%gtable = s32[8]{0} constant({3,0,2,1,0,1,2,3})
%rid = u32[] replica-id()
%id = s32[1] dynamic-slice(%gtable, %rid), dynamic_slice_sizes={1}
%reshape.0 = s32[] reshape(%id)
%table = s32[4]{0} constant({0,8,16,24})
%offset = s32[1] dynamic-slice(%table, %reshape.0), dynamic_slice_sizes={1}
%reshape.1 = s32[] reshape(%offset)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[8,8,128] dynamic-slice(%all-reduce, %reshape.1, %zero, %zero),
dynamic_slice_sizes={8,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
2,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllPartitions) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={{0},{1}}, to_apply=%sum, channel_id=1
%table = s32[8]{0} constant({0,1,2,3,4,5,6,7})
%pid = u32[] partition-id()
%id = s32[1] dynamic-slice(%table, %pid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%id)
%slice_size = s32[] constant(4)
%offset = s32[] multiply(%reshape, %slice_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %offset, %zero, %zero),
dynamic_slice_sizes={4,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
2,
8,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllReduceFollowedByAllReduce) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce.scattered = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={{0,1,2,3,4,5,6,7},{8,9,10,11,12,13,14,15}}, to_apply=%sum, use_global_device_ids=true, channel_id=1
%table = s32[8]{0} constant({0,1,2,3,4,5,6,7})
%pid = u32[] partition-id()
%id = s32[1] dynamic-slice(%table, %pid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%id)
%slice_size = s32[] constant(4)
%offset = s32[] multiply(%reshape, %slice_size)
%zero = s32[] constant(0)
%dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce.scattered, %offset, %zero, %zero),
dynamic_slice_sizes={4,8,128}
ROOT %all-reduce.sync = f32[4,8,128]{2,1,0} all-reduce(%dynamic-slice),
replica_groups={{0,8},{1,9},{2,10},{3,11},{4,12},{5,13},{6,14},{7,15}}, to_apply=%sum, use_global_device_ids=true, channel_id=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
2,
8,
true));
EXPECT_EQ(AllReduceCount(module), 1);
EXPECT_EQ(ReduceScatterCount(module), 1);
}
TEST_F(GpuReduceScatterCreatorTest, SubgroupsGlobals) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={{1,3,2,0},{4,5,6,7}}, to_apply=%sum, channel_id=1, use_global_device_ids=true
%pid = u32[] partition-id()
%rid = u32[] replica-id()
%pcount = u32[] constant(4)
%ridxp = u32[] multiply(%rid, %pcount)
%gid = u32[] add(%ridxp, %pid)
%gtable = s32[8]{0} constant({3,0,2,1,0,1,2,3})
%id = s32[1] dynamic-slice(%gtable, %gid), dynamic_slice_sizes={1}
%reshape.0 = s32[] reshape(%id)
%table = s32[4]{0} constant({0,8,16,24})
%offset = s32[1] dynamic-slice(%table, %reshape.0), dynamic_slice_sizes={1}
%reshape.1 = s32[] reshape(%offset)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[8,8,128] dynamic-slice(%all-reduce, %reshape.1, %zero, %zero),
dynamic_slice_sizes={8,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
2,
4,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, SubgroupsGlobalsOrthogonalReplicas) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={{1,3,2,0},{5,7,6,4}}, to_apply=%sum, channel_id=1, use_global_device_ids=true
%pid = u32[] partition-id()
%pid_table = s32[4]{0} constant({3,0,2,1})
%offset = s32[1] dynamic-slice(%pid_table, %pid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%offset)
%shard_size = s32[] constant(8)
%mul = s32[] multiply(%reshape, %shard_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[8,8,128] dynamic-slice(%all-reduce, %mul, %zero, %zero),
dynamic_slice_sizes={8,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
2,
4,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, SubgroupsGlobalsNonOrthogonalReplicas) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={{1,3,2,0},{7,5,6,4}}, to_apply=%sum, channel_id=1, use_global_device_ids=true
%pid = u32[] partition-id()
%pid_table = s32[4]{0} constant({3,0,2,1})
%offset = s32[1] dynamic-slice(%pid_table, %pid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%offset)
%shard_size = s32[] constant(8)
%mul = s32[] multiply(%reshape, %shard_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[8,8,128] dynamic-slice(%all-reduce, %mul, %zero, %zero),
dynamic_slice_sizes={8,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
2,
4,
false));
}
TEST_F(GpuReduceScatterCreatorTest, NonUniformSplit) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[1,7]{1,0} parameter(0)
%all-reduce = f32[1,7]{1,0} all-reduce(%param),
replica_groups={{0,1},{2,3},{4,5},{6,7}}, to_apply=%sum, channel_id=1, use_global_device_ids=true
%pid = u32[] partition-id()
%pid_table = s32[8]{0} constant({0, 1, 0, 1, 0, 1, 0, 1})
%offset = s32[1] dynamic-slice(%pid_table, %pid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%offset)
%shard_size = s32[] constant(3)
%mul = s32[] multiply(%reshape, %shard_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[1,3] dynamic-slice(%all-reduce, %zero, %mul),
dynamic_slice_sizes={1,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
1,
8,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Slice(m::Parameter(0)))));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/reduce_scatter_creator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/reduce_scatter_creator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7dc63e63-9db6-4acf-bf59-96ebbb710399 | cpp | google/tensorstore | transform_broadcastable_array | tensorstore/index_space/transform_broadcastable_array.cc | tensorstore/index_space/transform_broadcastable_array_test.cc | #include "tensorstore/index_space/transform_broadcastable_array.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
Result<SharedArray<const void>> TransformOutputBroadcastableArray(
IndexTransformView<> transform, SharedArrayView<const void> output_array,
IndexDomainView<> output_domain) {
assert(transform.valid());
Box<dynamic_rank(kMaxRank)> broadcast_domain(transform.output_rank());
if (output_domain.valid()) {
broadcast_domain = output_domain.box();
} else {
TENSORSTORE_RETURN_IF_ERROR(
tensorstore::GetOutputRange(transform, broadcast_domain));
const DimensionIndex output_rank = transform.output_rank();
for (DimensionIndex output_dim = 0; output_dim < output_rank;
++output_dim) {
const auto map = transform.output_index_maps()[output_dim];
switch (map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::array: {
broadcast_domain[output_dim] = IndexInterval();
break;
}
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
if (map.stride() != 1 && map.stride() != -1) {
broadcast_domain[output_dim] = IndexInterval::Infinite();
} else {
const DimensionIndex output_array_dim =
output_dim + output_array.rank() - output_rank;
if (output_array_dim >= 0 &&
transform.domain()[input_dim].optionally_implicit_interval() ==
OptionallyImplicitIndexInterval{IndexInterval::Infinite(),
true, true}) {
broadcast_domain[output_dim] =
output_array.domain()[output_array_dim];
}
}
break;
}
}
}
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto broadcast_output_array,
tensorstore::BroadcastArray(std::move(output_array), broadcast_domain));
TENSORSTORE_ASSIGN_OR_RETURN(auto input_array,
std::move(broadcast_output_array) | transform |
tensorstore::Materialize());
return UnbroadcastArray(std::move(input_array));
}
Result<SharedArray<const void>> TransformInputBroadcastableArray(
IndexTransformView<> transform, SharedArrayView<const void> input_array) {
assert(transform.valid());
SharedArray<const void> output_array;
output_array.layout().set_rank(transform.output_rank());
DimensionSet seen_input_dims;
ByteStridedPointer<const void> data_pointer =
input_array.byte_strided_pointer();
const DimensionIndex input_rank = transform.input_rank();
for (DimensionIndex output_dim = 0; output_dim < output_array.rank();
++output_dim) {
const auto map = transform.output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Cannot transform input array through ",
map.method(), " output index map"));
}
const DimensionIndex input_dim = map.input_dimension();
if (seen_input_dims[input_dim]) {
return absl::InvalidArgumentError(
"Cannot transform input array with multiple "
"output dimensions mapping to the same input dimension");
}
if (std::abs(map.stride()) != 1) {
return absl::InvalidArgumentError(
"Cannot transform input array through "
"non-unit-stride output index map");
}
seen_input_dims[input_dim] = true;
const DimensionIndex input_array_dim =
input_array.rank() - input_rank + input_dim;
if (input_array_dim < 0) {
output_array.shape()[output_dim] = 1;
output_array.byte_strides()[output_dim] = 0;
} else {
const Index size = input_array.shape()[input_array_dim];
output_array.shape()[output_dim] = size;
const Index byte_stride = input_array.byte_strides()[input_array_dim];
const Index stride = map.stride();
output_array.byte_strides()[output_dim] =
internal::wrap_on_overflow::Multiply(byte_stride, stride);
if (stride == -1 && size != 0) {
data_pointer +=
internal::wrap_on_overflow::Multiply(byte_stride, size - 1);
}
}
}
for (DimensionIndex input_array_dim = 0; input_array_dim < input_array.rank();
++input_array_dim) {
if (input_array.shape()[input_array_dim] == 1 ||
input_array.byte_strides()[input_array_dim] == 0) {
continue;
}
const DimensionIndex input_dim =
input_rank - input_array.rank() + input_array_dim;
if (input_dim < 0 || !seen_input_dims[input_dim]) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Cannot transform input array; "
"dimension ",
input_array_dim, " cannot be mapped"));
}
}
output_array.element_pointer() = SharedElementPointer<const void>(
std::shared_ptr<const void>(std::move(input_array.pointer()),
data_pointer.get()),
input_array.dtype());
return UnbroadcastArray(std::move(output_array));
}
} | #include "tensorstore/index_space/transform_broadcastable_array.h"
#include <stddef.h>
#include <random>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::IndexDomain;
using ::tensorstore::IndexDomainView;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeScalarArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::SharedArray;
using ::tensorstore::SharedArrayView;
using ::tensorstore::span;
using ::tensorstore::TransformInputBroadcastableArray;
using ::tensorstore::TransformOutputBroadcastableArray;
void TestRoundTrip(IndexTransformView<> transform,
SharedArrayView<const void> input_array,
SharedArrayView<const void> output_array,
IndexDomainView<> output_domain) {
SCOPED_TRACE(tensorstore::StrCat(
"transform=", transform, ", output_domain=", output_domain,
", input_array.shape=", input_array.shape(),
", output_array.shape=", output_array.shape()));
EXPECT_THAT(
TransformOutputBroadcastableArray(transform, output_array, output_domain),
::testing::Optional(input_array));
EXPECT_THAT(TransformInputBroadcastableArray(transform, input_array),
::testing::Optional(output_array));
}
void TestRoundTrip(IndexTransformView<> transform,
SharedArrayView<const void> output_array,
IndexDomainView<> output_domain = IndexDomainView<>(),
bool test_inverse = false) {
SCOPED_TRACE(tensorstore::StrCat(
"transform=", transform, ", output_domain=", output_domain,
", output_array.shape=", output_array.shape()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto input_array,
TransformOutputBroadcastableArray(
transform, output_array, output_domain));
EXPECT_THAT(TransformInputBroadcastableArray(transform, input_array),
::testing::Optional(output_array));
if (test_inverse) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inverse_transform,
tensorstore::InverseTransform(transform));
EXPECT_THAT(
TransformInputBroadcastableArray(inverse_transform, output_array),
::testing::Optional(input_array));
}
}
SharedArray<int> MakeTestArray(span<const Index> shape) {
auto array = tensorstore::AllocateArray<int>(shape);
for (Index i = 0, num_elements = array.num_elements(); i < num_elements;
++i) {
array.data()[i] = i;
}
return array;
}
TEST(RoundTripTest, IdentityTransform) {
for (DimensionIndex rank = 0; rank <= 3; ++rank) {
SCOPED_TRACE(tensorstore::StrCat("rank=", rank));
std::vector<Index> shape(rank);
for (DimensionIndex dim = 0; dim < rank; ++dim) {
shape[dim] = dim + 2;
}
auto array = MakeTestArray(shape);
TestRoundTrip(tensorstore::IdentityTransform(shape), array, array,
tensorstore::IndexDomain<>());
TestRoundTrip(tensorstore::IdentityTransform(rank), array, array,
tensorstore::IndexDomain<>());
TestRoundTrip(tensorstore::IdentityTransform(shape), array, array,
tensorstore::IdentityTransform(shape).domain());
}
}
TEST(RoundTripTest, RandomInvertibleTransform) {
constexpr size_t kNumIterations = 100;
for (size_t i = 0; i < kNumIterations; ++i) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_TRANSFORM_BROADCASTABLE_ARRAY_TEST_SEED")};
auto box = tensorstore::internal::MakeRandomBox(gen);
auto array = tensorstore::UnbroadcastArray(MakeTestArray(box.shape()));
auto domain = IndexDomain(box);
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, domain);
TestRoundTrip(transform, array);
TestRoundTrip(transform, array, domain);
}
}
TEST(RoundTripTest, RandomInvertibleTransformNoNewDims) {
constexpr size_t kNumIterations = 100;
for (size_t i = 0; i < kNumIterations; ++i) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_TRANSFORM_BROADCASTABLE_ARRAY_TEST_SEED")};
auto box = tensorstore::internal::MakeRandomBox(gen);
auto array = tensorstore::UnbroadcastArray(MakeTestArray(box.shape()));
auto domain = IndexDomain(box);
tensorstore::internal::MakeStridedIndexTransformForOutputSpaceParameters p;
p.max_new_dims = 0;
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, domain, p);
TestRoundTrip(transform, array, IndexDomain(), true);
TestRoundTrip(transform, array, domain, true);
}
}
TEST(TransformOutputBroadcastableArrayTest, ConstantMap) {
auto array = MakeArray<int>({{1}, {2}, {3}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, IndexTransformBuilder(1, 2)
.output_single_input_dimension(0, 5, -1, 0)
.output_constant(1, 42)
.Finalize());
EXPECT_THAT(
TransformOutputBroadcastableArray(transform, array, IndexDomain()),
::testing::Optional(MakeArray<int>({3, 2, 1})));
}
TEST(TransformOutputBroadcastableArrayTest, NonUnitStrideMap) {
auto array = MakeArray<int>({{1}, {2}, {3}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, IndexTransformBuilder(2, 2)
.output_single_input_dimension(0, 5, -1, 0)
.output_single_input_dimension(1, 42, 2, 1)
.Finalize());
EXPECT_THAT(
TransformOutputBroadcastableArray(transform, array, IndexDomain()),
::testing::Optional(MakeArray<int>({{3}, {2}, {1}})));
}
TEST(TransformOutputBroadcastableArrayTest, ArrayMap) {
auto array = MakeArray<int>({{1}, {2}, {3}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(1, 2)
.input_shape({3})
.output_single_input_dimension(0, 5, -1, 0)
.output_index_array(1, 20, 1, MakeArray<Index>({0, 5, 10}))
.Finalize());
EXPECT_THAT(
TransformOutputBroadcastableArray(transform, array, IndexDomain()),
::testing::Optional(MakeArray<int>({3, 2, 1})));
}
TEST(TransformInputBroadcastableArrayTest, ConstantMap) {
auto array = MakeScalarArray<int>(42);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(0, 1).output_constant(0, 42).Finalize());
EXPECT_THAT(
TransformInputBroadcastableArray(transform, array),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot transform input array through constant output index map"));
}
TEST(TransformInputBroadcastableArrayTest, NonUnitStrideMap) {
auto array = MakeArray<int>({1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, IndexTransformBuilder(1, 1)
.output_single_input_dimension(0, 5, 2, 0)
.Finalize());
EXPECT_THAT(TransformInputBroadcastableArray(transform, array),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot transform input array through "
"non-unit-stride output index map"));
}
TEST(TransformInputBroadcastableArrayTest, ArrayMap) {
auto array = MakeArray<int>({1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 20, 1, MakeArray<Index>({0, 5, 10}))
.Finalize());
EXPECT_THAT(
TransformInputBroadcastableArray(transform, array),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot transform input array through array output index map"));
}
TEST(TransformInputBroadcastableArrayTest, Diagonal) {
auto array = MakeArray<int>({1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto transform,
IndexTransformBuilder(1, 2)
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 0)
.Finalize());
EXPECT_THAT(
TransformInputBroadcastableArray(transform, array),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot transform input array with multiple "
"output dimensions mapping to the same input dimension"));
}
TEST(TransformInputBroadcastableArrayTest, UnmappedNoError) {
auto array = MakeArray<int>({1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto transform,
IndexTransformBuilder(2, 1)
.output_single_input_dimension(0, 1)
.Finalize());
EXPECT_THAT(TransformInputBroadcastableArray(transform, array),
::testing::Optional(array));
}
TEST(TransformInputBroadcastableArrayTest, UnmappedError) {
auto array = MakeArray<int>({1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto transform,
IndexTransformBuilder(2, 1)
.output_single_input_dimension(0, 0)
.Finalize());
EXPECT_THAT(
TransformInputBroadcastableArray(transform, array),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot transform input array; dimension 0 cannot be mapped"));
}
TEST(TransformInputBroadcastableArrayTest, ExtraDimensionError) {
auto array = MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
EXPECT_THAT(
TransformInputBroadcastableArray(tensorstore::IdentityTransform(1),
array),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot transform input array; dimension 0 cannot be mapped"));
}
TEST(TransformInputBroadcastableArrayTest, ExtraDimensionNoError) {
auto array = MakeArray<int>({{1, 2, 3}});
EXPECT_THAT(TransformInputBroadcastableArray(
tensorstore::IdentityTransform(1), array),
::testing::Optional(MakeArray<int>({1, 2, 3})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transform_broadcastable_array.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transform_broadcastable_array_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
3975291b-1507-48db-9dd2-f43682542a45 | cpp | google/libaddressinput | rule | cpp/src/rule.cc | cpp/test/rule_test.cc | #include "rule.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <string>
#include <utility>
#include <re2/re2.h>
#include "address_field_util.h"
#include "format_element.h"
#include "grit.h"
#include "messages.h"
#include "region_data_constants.h"
#include "util/json.h"
#include "util/re2ptr.h"
#include "util/size.h"
#include "util/string_split.h"
namespace i18n {
namespace addressinput {
namespace {
const char kSeparator = '~';
struct NameIdInfo {
const char* name;
int id;
static bool less(const NameIdInfo& a, const NameIdInfo& b) {
return strcmp(a.name, b.name) < 0;
}
};
struct NameIdMap {
const NameIdInfo* infos;
size_t size;
int GetIdFromName(const std::string& name) const {
NameIdInfo key{name.c_str()};
const NameIdInfo* begin = infos;
const NameIdInfo* end = begin + size;
const NameIdInfo* probe =
std::lower_bound(begin, end, key, NameIdInfo::less);
return probe != end && name == probe->name ? probe->id : INVALID_MESSAGE_ID;
}
bool IsSorted() const {
for (size_t n = 1; n < size; ++n) {
if (!NameIdInfo::less(infos[n - 1], infos[n])) {
return false;
}
}
return true;
}
};
const NameIdInfo kAdminAreaInfoArray[] = {
{"area", IDS_LIBADDRESSINPUT_AREA},
{"county", IDS_LIBADDRESSINPUT_COUNTY},
{"department", IDS_LIBADDRESSINPUT_DEPARTMENT},
{"district", IDS_LIBADDRESSINPUT_DISTRICT},
{"do_si", IDS_LIBADDRESSINPUT_DO_SI},
{"emirate", IDS_LIBADDRESSINPUT_EMIRATE},
{"island", IDS_LIBADDRESSINPUT_ISLAND},
{"oblast", IDS_LIBADDRESSINPUT_OBLAST},
{"parish", IDS_LIBADDRESSINPUT_PARISH},
{"prefecture", IDS_LIBADDRESSINPUT_PREFECTURE},
{"province", IDS_LIBADDRESSINPUT_PROVINCE},
{"state", IDS_LIBADDRESSINPUT_STATE},
};
const NameIdMap kAdminAreaMessageIds{
kAdminAreaInfoArray,
size(kAdminAreaInfoArray)
};
const NameIdInfo kPostalCodeInfoArray[] = {
{"eircode", IDS_LIBADDRESSINPUT_EIR_CODE_LABEL},
{"pin", IDS_LIBADDRESSINPUT_PIN_CODE_LABEL},
{"postal", IDS_LIBADDRESSINPUT_POSTAL_CODE_LABEL},
{"zip", IDS_LIBADDRESSINPUT_ZIP_CODE_LABEL},
};
const NameIdMap kPostalCodeMessageIds{
kPostalCodeInfoArray,
size(kPostalCodeInfoArray)
};
const NameIdInfo kLocalityInfoArray[] = {
{"city", IDS_LIBADDRESSINPUT_LOCALITY_LABEL},
{"district", IDS_LIBADDRESSINPUT_DISTRICT},
{"post_town", IDS_LIBADDRESSINPUT_POST_TOWN},
{"suburb", IDS_LIBADDRESSINPUT_SUBURB},
};
const NameIdMap kLocalityMessageIds{
kLocalityInfoArray,
size(kLocalityInfoArray)
};
const NameIdInfo kSublocalityInfoArray[] = {
{"district", IDS_LIBADDRESSINPUT_DISTRICT},
{"neighborhood", IDS_LIBADDRESSINPUT_NEIGHBORHOOD},
{"suburb", IDS_LIBADDRESSINPUT_SUBURB},
{"townland", IDS_LIBADDRESSINPUT_TOWNLAND},
{"village_township", IDS_LIBADDRESSINPUT_VILLAGE_TOWNSHIP},
};
const NameIdMap kSublocalityMessageIds{
kSublocalityInfoArray,
size(kSublocalityInfoArray)
};
#ifndef _NDEBUG
struct StaticMapChecker {
StaticMapChecker() {
assert(kAdminAreaMessageIds.IsSorted());
assert(kPostalCodeMessageIds.IsSorted());
assert(kLocalityMessageIds.IsSorted());
assert(kSublocalityMessageIds.IsSorted());
}
};
#endif
bool ContainsRegExSpecialCharacters(const std::string& input) {
return input.find_first_of(R"(([\{?)") != std::string::npos;
}
}
Rule::Rule()
: id_(),
format_(),
latin_format_(),
required_(),
sub_keys_(),
languages_(),
postal_code_matcher_(nullptr),
sole_postal_code_(),
admin_area_name_message_id_(INVALID_MESSAGE_ID),
postal_code_name_message_id_(INVALID_MESSAGE_ID),
locality_name_message_id_(INVALID_MESSAGE_ID),
sublocality_name_message_id_(INVALID_MESSAGE_ID),
name_(),
latin_name_(),
postal_code_example_(),
post_service_url_() {}
Rule::~Rule() = default;
const Rule& Rule::GetDefault() {
static Rule* default_rule = nullptr;
if (default_rule == nullptr) {
default_rule = new Rule;
default_rule->ParseSerializedRule(
RegionDataConstants::GetDefaultRegionData());
}
return *default_rule;
}
void Rule::CopyFrom(const Rule& rule) {
assert(this != &rule);
id_ = rule.id_;
format_ = rule.format_;
latin_format_ = rule.latin_format_;
required_ = rule.required_;
sub_keys_ = rule.sub_keys_;
languages_ = rule.languages_;
postal_code_matcher_.reset(
rule.postal_code_matcher_ == nullptr
? nullptr
: new RE2ptr(new RE2(rule.postal_code_matcher_->ptr->pattern(),
rule.postal_code_matcher_->ptr->options())));
sole_postal_code_ = rule.sole_postal_code_;
admin_area_name_message_id_ = rule.admin_area_name_message_id_;
postal_code_name_message_id_ = rule.postal_code_name_message_id_;
locality_name_message_id_ = rule.locality_name_message_id_;
sublocality_name_message_id_ = rule.sublocality_name_message_id_;
name_ = rule.name_;
latin_name_ = rule.latin_name_;
postal_code_example_ = rule.postal_code_example_;
post_service_url_ = rule.post_service_url_;
}
bool Rule::ParseSerializedRule(const std::string& serialized_rule) {
Json json;
if (!json.ParseObject(serialized_rule)) {
return false;
}
ParseJsonRule(json);
return true;
}
void Rule::ParseJsonRule(const Json& json) {
#ifndef _NDEBUG
static StaticMapChecker map_checker;
#endif
std::string value;
if (json.GetStringValueForKey("id", &value)) {
id_.swap(value);
}
if (json.GetStringValueForKey("fmt", &value)) {
ParseFormatRule(value, &format_);
}
if (json.GetStringValueForKey("lfmt", &value)) {
ParseFormatRule(value, &latin_format_);
}
if (json.GetStringValueForKey("require", &value)) {
ParseAddressFieldsRequired(value, &required_);
}
if (json.GetStringValueForKey("sub_keys", &value)) {
SplitString(value, kSeparator, &sub_keys_);
}
if (json.GetStringValueForKey("languages", &value)) {
SplitString(value, kSeparator, &languages_);
}
sole_postal_code_.clear();
if (json.GetStringValueForKey("zip", &value)) {
RE2::Options options;
options.set_never_capture(true);
RE2* matcher = new RE2("^(" + value + ")", options);
if (matcher->ok()) {
postal_code_matcher_.reset(new RE2ptr(matcher));
} else {
postal_code_matcher_.reset(nullptr);
delete matcher;
}
if (!ContainsRegExSpecialCharacters(value)) {
sole_postal_code_.swap(value);
}
}
if (json.GetStringValueForKey("state_name_type", &value)) {
admin_area_name_message_id_ = kAdminAreaMessageIds.GetIdFromName(value);
}
if (json.GetStringValueForKey("zip_name_type", &value)) {
postal_code_name_message_id_ = kPostalCodeMessageIds.GetIdFromName(value);
}
if (json.GetStringValueForKey("locality_name_type", &value)) {
locality_name_message_id_ = kLocalityMessageIds.GetIdFromName(value);
}
if (json.GetStringValueForKey("sublocality_name_type", &value)) {
sublocality_name_message_id_ = kSublocalityMessageIds.GetIdFromName(value);
}
if (json.GetStringValueForKey("name", &value)) {
name_.swap(value);
}
if (json.GetStringValueForKey("lname", &value)) {
latin_name_.swap(value);
}
if (json.GetStringValueForKey("zipex", &value)) {
postal_code_example_.swap(value);
}
if (json.GetStringValueForKey("posturl", &value)) {
post_service_url_.swap(value);
}
}
}
} | #include "rule.h"
#include <libaddressinput/address_field.h>
#include <libaddressinput/localization.h>
#include <cstddef>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "format_element.h"
#include "grit.h"
#include "messages.h"
#include "region_data_constants.h"
#include "util/json.h"
namespace {
using i18n::addressinput::AddressField;
using i18n::addressinput::ADMIN_AREA;
using i18n::addressinput::FormatElement;
using i18n::addressinput::INVALID_MESSAGE_ID;
using i18n::addressinput::Json;
using i18n::addressinput::LOCALITY;
using i18n::addressinput::Localization;
using i18n::addressinput::RegionDataConstants;
using i18n::addressinput::Rule;
using i18n::addressinput::STREET_ADDRESS;
TEST(RuleTest, CopyOverwritesRule) {
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule(
R"({)"
R"("fmt":"%S%Z",)"
R"("lfmt":"%Z%S",)"
R"("id":"data/XA",)"
R"("name":"Le Test",)"
R"("lname":"Testistan",)"
R"("require":"AC",)"
R"("sub_keys":"aa~bb~cc",)"
R"("languages":"en~fr",)"
R"("zip":"\\d{3}",)"
R"("state_name_type":"area",)"
R"("locality_name_type":"post_town",)"
R"("sublocality_name_type":"neighborhood",)"
R"("zip_name_type":"postal",)"
R"("zipex":"1234",)"
R"("posturl":"http:
R"(})"));
Rule copy;
EXPECT_NE(rule.GetFormat(), copy.GetFormat());
EXPECT_NE(rule.GetLatinFormat(), copy.GetLatinFormat());
EXPECT_NE(rule.GetId(), copy.GetId());
EXPECT_NE(rule.GetRequired(), copy.GetRequired());
EXPECT_NE(rule.GetSubKeys(), copy.GetSubKeys());
EXPECT_NE(rule.GetLanguages(), copy.GetLanguages());
EXPECT_NE(rule.GetAdminAreaNameMessageId(),
copy.GetAdminAreaNameMessageId());
EXPECT_NE(rule.GetPostalCodeNameMessageId(),
copy.GetPostalCodeNameMessageId());
EXPECT_NE(rule.GetLocalityNameMessageId(),
copy.GetLocalityNameMessageId());
EXPECT_NE(rule.GetSublocalityNameMessageId(),
copy.GetSublocalityNameMessageId());
EXPECT_NE(rule.GetName(), copy.GetName());
EXPECT_NE(rule.GetLatinName(), copy.GetLatinName());
EXPECT_NE(rule.GetPostalCodeExample(), copy.GetPostalCodeExample());
EXPECT_NE(rule.GetPostServiceUrl(), copy.GetPostServiceUrl());
EXPECT_TRUE(rule.GetPostalCodeMatcher() != nullptr);
EXPECT_TRUE(copy.GetPostalCodeMatcher() == nullptr);
copy.CopyFrom(rule);
EXPECT_EQ(rule.GetFormat(), copy.GetFormat());
EXPECT_EQ(rule.GetLatinFormat(), copy.GetLatinFormat());
EXPECT_EQ(rule.GetId(), copy.GetId());
EXPECT_EQ(rule.GetRequired(), copy.GetRequired());
EXPECT_EQ(rule.GetSubKeys(), copy.GetSubKeys());
EXPECT_EQ(rule.GetLanguages(), copy.GetLanguages());
EXPECT_EQ(rule.GetAdminAreaNameMessageId(),
copy.GetAdminAreaNameMessageId());
EXPECT_EQ(rule.GetPostalCodeNameMessageId(),
copy.GetPostalCodeNameMessageId());
EXPECT_EQ(rule.GetSublocalityNameMessageId(),
copy.GetSublocalityNameMessageId());
EXPECT_EQ(rule.GetLocalityNameMessageId(),
copy.GetLocalityNameMessageId());
EXPECT_EQ(rule.GetName(), copy.GetName());
EXPECT_EQ(rule.GetLatinName(), copy.GetLatinName());
EXPECT_EQ(rule.GetPostalCodeExample(), copy.GetPostalCodeExample());
EXPECT_EQ(rule.GetPostServiceUrl(), copy.GetPostServiceUrl());
EXPECT_TRUE(copy.GetPostalCodeMatcher() != nullptr);
}
TEST(RuleTest, ParseOverwritesRule) {
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{"
"\"fmt\":\"%S%Z\","
"\"state_name_type\":\"area\","
"\"zip\":\"1234\","
"\"zip_name_type\":\"postal\","
"\"zipex\":\"1234\","
"\"posturl\":\"http:
"}"));
EXPECT_FALSE(rule.GetFormat().empty());
EXPECT_EQ(IDS_LIBADDRESSINPUT_AREA,
rule.GetAdminAreaNameMessageId());
EXPECT_EQ(IDS_LIBADDRESSINPUT_POSTAL_CODE_LABEL,
rule.GetPostalCodeNameMessageId());
EXPECT_EQ("1234", rule.GetSolePostalCode());
EXPECT_EQ("1234", rule.GetPostalCodeExample());
EXPECT_EQ("http:
ASSERT_TRUE(rule.ParseSerializedRule("{"
"\"fmt\":\"\","
"\"state_name_type\":\"do_si\","
"\"zip_name_type\":\"zip\","
"\"zipex\":\"5678\","
"\"posturl\":\"http:
"}"));
EXPECT_TRUE(rule.GetFormat().empty());
EXPECT_EQ(IDS_LIBADDRESSINPUT_DO_SI,
rule.GetAdminAreaNameMessageId());
EXPECT_EQ(IDS_LIBADDRESSINPUT_ZIP_CODE_LABEL,
rule.GetPostalCodeNameMessageId());
EXPECT_TRUE(rule.GetSolePostalCode().empty());
EXPECT_EQ("5678", rule.GetPostalCodeExample());
EXPECT_EQ("http:
}
TEST(RuleTest, ParsesFormatCorrectly) {
const std::vector<FormatElement> expected{
FormatElement{ADMIN_AREA},
FormatElement{LOCALITY},
};
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{\"fmt\":\"%S%C\"}"));
EXPECT_EQ(expected, rule.GetFormat());
}
TEST(RuleTest, ParsesNameCorrectly) {
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{\"name\":\"Le Test\"}"));
EXPECT_EQ("Le Test", rule.GetName());
}
TEST(RuleTest, ParsesLatinNameCorrectly) {
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{\"lname\":\"Testistan\"}"));
EXPECT_EQ("Testistan", rule.GetLatinName());
}
TEST(RuleTest, ParsesLatinFormatCorrectly) {
const std::vector<FormatElement> expected{
FormatElement{LOCALITY},
FormatElement{ADMIN_AREA},
};
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{\"lfmt\":\"%C%S\"}"));
EXPECT_EQ(expected, rule.GetLatinFormat());
}
TEST(RuleTest, ParsesRequiredCorrectly) {
const std::vector<AddressField> expected{
STREET_ADDRESS,
LOCALITY,
};
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{\"require\":\"AC\"}"));
EXPECT_EQ(expected, rule.GetRequired());
}
TEST(RuleTest, ParsesSubKeysCorrectly) {
const std::vector<std::string> expected{
"aa",
"bb",
"cc",
};
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{\"sub_keys\":\"aa~bb~cc\"}"));
EXPECT_EQ(expected, rule.GetSubKeys());
}
TEST(RuleTest, ParsesLanguagesCorrectly) {
const std::vector<std::string> expected{
"de",
"fr",
"it",
};
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{\"languages\":\"de~fr~it\"}"));
EXPECT_EQ(expected, rule.GetLanguages());
}
TEST(RuleTest, ParsesPostalCodeExampleCorrectly) {
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{\"zipex\":\"1234,12345-6789\"}"));
EXPECT_EQ("1234,12345-6789", rule.GetPostalCodeExample());
}
TEST(RuleTest, ParsesPostServiceUrlCorrectly) {
Rule rule;
ASSERT_TRUE(
rule.ParseSerializedRule("{\"posturl\":\"http:
EXPECT_EQ("http:
}
TEST(RuleTest, PostalCodeMatcher) {
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule(R"({"zip":"\\d{3}"})"));
EXPECT_TRUE(rule.GetPostalCodeMatcher() != nullptr);
}
TEST(RuleTest, PostalCodeMatcherInvalidRegExp) {
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule(R"({"zip":"("})"));
EXPECT_TRUE(rule.GetPostalCodeMatcher() == nullptr);
}
TEST(RuleTest, ParsesJsonRuleCorrectly) {
Json json;
ASSERT_TRUE(json.ParseObject(R"({"zip":"\\d{3}"})"));
Rule rule;
rule.ParseJsonRule(json);
EXPECT_TRUE(rule.GetPostalCodeMatcher() != nullptr);
}
TEST(RuleTest, EmptyStringIsNotValid) {
Rule rule;
EXPECT_FALSE(rule.ParseSerializedRule(std::string()));
}
TEST(RuleTest, EmptyDictionaryIsValid) {
Rule rule;
EXPECT_TRUE(rule.ParseSerializedRule("{}"));
}
class PostalCodeNameParseTest
: public testing::TestWithParam<std::pair<std::string, int> > {
public:
PostalCodeNameParseTest(const PostalCodeNameParseTest&) = delete;
PostalCodeNameParseTest& operator=(const PostalCodeNameParseTest&) = delete;
protected:
PostalCodeNameParseTest() = default;
Rule rule_;
};
TEST_P(PostalCodeNameParseTest, ParsedCorrectly) {
ASSERT_TRUE(rule_.ParseSerializedRule(GetParam().first));
EXPECT_EQ(GetParam().second, rule_.GetPostalCodeNameMessageId());
}
INSTANTIATE_TEST_SUITE_P(
AllPostalCodeNames, PostalCodeNameParseTest,
testing::Values(std::make_pair("{\"zip_name_type\":\"pin\"}",
IDS_LIBADDRESSINPUT_PIN_CODE_LABEL),
std::make_pair("{\"zip_name_type\":\"postal\"}",
IDS_LIBADDRESSINPUT_POSTAL_CODE_LABEL),
std::make_pair("{\"zip_name_type\":\"zip\"}",
IDS_LIBADDRESSINPUT_ZIP_CODE_LABEL)));
class LocalityNameParseTest
: public testing::TestWithParam<std::pair<std::string, int> > {
public:
LocalityNameParseTest(const LocalityNameParseTest&) = delete;
LocalityNameParseTest& operator=(const LocalityNameParseTest&) = delete;
protected:
LocalityNameParseTest() = default;
Rule rule_;
};
TEST_P(LocalityNameParseTest, ParsedCorrectly) {
ASSERT_TRUE(rule_.ParseSerializedRule(GetParam().first));
EXPECT_EQ(GetParam().second, rule_.GetLocalityNameMessageId());
}
INSTANTIATE_TEST_SUITE_P(
AllLocalityNames, LocalityNameParseTest,
testing::Values(std::make_pair("{\"locality_name_type\":\"post_town\"}",
IDS_LIBADDRESSINPUT_POST_TOWN),
std::make_pair("{\"locality_name_type\":\"city\"}",
IDS_LIBADDRESSINPUT_LOCALITY_LABEL),
std::make_pair("{\"locality_name_type\":\"district\"}",
IDS_LIBADDRESSINPUT_DISTRICT)));
class SublocalityNameParseTest
: public testing::TestWithParam<std::pair<std::string, int> > {
public:
SublocalityNameParseTest(const SublocalityNameParseTest&) = delete;
SublocalityNameParseTest& operator=(const SublocalityNameParseTest&) = delete;
protected:
SublocalityNameParseTest() = default;
Rule rule_;
};
TEST_P(SublocalityNameParseTest, ParsedCorrectly) {
ASSERT_TRUE(rule_.ParseSerializedRule(GetParam().first));
EXPECT_EQ(GetParam().second, rule_.GetSublocalityNameMessageId());
}
INSTANTIATE_TEST_SUITE_P(
AllSublocalityNames, SublocalityNameParseTest,
testing::Values(
std::make_pair("{\"sublocality_name_type\":\"village_township\"}",
IDS_LIBADDRESSINPUT_VILLAGE_TOWNSHIP),
std::make_pair("{\"sublocality_name_type\":\"neighborhood\"}",
IDS_LIBADDRESSINPUT_NEIGHBORHOOD),
std::make_pair("{\"sublocality_name_type\":\"suburb\"}",
IDS_LIBADDRESSINPUT_SUBURB),
std::make_pair("{\"sublocality_name_type\":\"district\"}",
IDS_LIBADDRESSINPUT_DISTRICT)));
class AdminAreaNameParseTest
: public testing::TestWithParam<std::pair<std::string, int> > {
public:
AdminAreaNameParseTest(const AdminAreaNameParseTest&) = delete;
AdminAreaNameParseTest& operator=(const AdminAreaNameParseTest&) = delete;
protected:
AdminAreaNameParseTest() = default;
Rule rule_;
};
TEST_P(AdminAreaNameParseTest, ParsedCorrectly) {
ASSERT_TRUE(rule_.ParseSerializedRule(GetParam().first));
EXPECT_EQ(GetParam().second, rule_.GetAdminAreaNameMessageId());
}
INSTANTIATE_TEST_SUITE_P(
AllAdminAreaNames, AdminAreaNameParseTest,
testing::Values(std::make_pair("{\"state_name_type\":\"area\"}",
IDS_LIBADDRESSINPUT_AREA),
std::make_pair("{\"state_name_type\":\"county\"}",
IDS_LIBADDRESSINPUT_COUNTY),
std::make_pair("{\"state_name_type\":\"department\"}",
IDS_LIBADDRESSINPUT_DEPARTMENT),
std::make_pair("{\"state_name_type\":\"district\"}",
IDS_LIBADDRESSINPUT_DISTRICT),
std::make_pair("{\"state_name_type\":\"do_si\"}",
IDS_LIBADDRESSINPUT_DO_SI),
std::make_pair("{\"state_name_type\":\"emirate\"}",
IDS_LIBADDRESSINPUT_EMIRATE),
std::make_pair("{\"state_name_type\":\"island\"}",
IDS_LIBADDRESSINPUT_ISLAND),
std::make_pair("{\"state_name_type\":\"parish\"}",
IDS_LIBADDRESSINPUT_PARISH),
std::make_pair("{\"state_name_type\":\"prefecture\"}",
IDS_LIBADDRESSINPUT_PREFECTURE),
std::make_pair("{\"state_name_type\":\"province\"}",
IDS_LIBADDRESSINPUT_PROVINCE),
std::make_pair("{\"state_name_type\":\"state\"}",
IDS_LIBADDRESSINPUT_STATE)));
class RuleParseTest : public testing::TestWithParam<std::string> {
public:
RuleParseTest(const RuleParseTest&) = delete;
RuleParseTest& operator=(const RuleParseTest&) = delete;
protected:
RuleParseTest() = default;
std::string GetRegionData() const {
std::string data = RegionDataConstants::GetRegionData(GetParam());
return !data.empty() ? data : GetParam();
}
Rule rule_;
Localization localization_;
};
TEST_P(RuleParseTest, RegionDataParsedSuccessfully) {
EXPECT_TRUE(rule_.ParseSerializedRule(GetRegionData()));
}
TEST_P(RuleParseTest, AdminAreaNameTypeHasUiString) {
const std::string& region_data = GetRegionData();
rule_.ParseSerializedRule(region_data);
if (region_data.find("state_name_type") != std::string::npos) {
EXPECT_NE(INVALID_MESSAGE_ID, rule_.GetAdminAreaNameMessageId());
EXPECT_FALSE(
localization_.GetString(rule_.GetAdminAreaNameMessageId()).empty());
}
}
TEST_P(RuleParseTest, PostalCodeNameTypeHasUiString) {
const std::string& region_data = GetRegionData();
rule_.ParseSerializedRule(region_data);
if (region_data.find("zip_name_type") != std::string::npos) {
EXPECT_NE(INVALID_MESSAGE_ID, rule_.GetPostalCodeNameMessageId());
EXPECT_FALSE(
localization_.GetString(rule_.GetPostalCodeNameMessageId()).empty());
}
}
TEST_P(RuleParseTest, LocalityNameTypeHasUiString) {
const std::string& region_data = GetRegionData();
rule_.ParseSerializedRule(region_data);
if (region_data.find("\"locality_name_type") != std::string::npos) {
EXPECT_NE(INVALID_MESSAGE_ID, rule_.GetLocalityNameMessageId());
EXPECT_FALSE(
localization_.GetString(rule_.GetLocalityNameMessageId()).empty());
}
}
TEST_P(RuleParseTest, SublocalityNameTypeHasUiString) {
const std::string& region_data = GetRegionData();
rule_.ParseSerializedRule(region_data);
if (region_data.find("sublocality_name_type") != std::string::npos) {
EXPECT_NE(INVALID_MESSAGE_ID, rule_.GetSublocalityNameMessageId());
EXPECT_FALSE(
localization_.GetString(rule_.GetSublocalityNameMessageId()).empty());
}
}
TEST_P(RuleParseTest, SolePostalCode) {
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{\"zip\":\"1234\"}"));
EXPECT_TRUE(rule.GetPostalCodeMatcher() != nullptr);
EXPECT_EQ(rule.GetSolePostalCode(), "1234");
Rule copy;
EXPECT_TRUE(copy.GetPostalCodeMatcher() == nullptr);
EXPECT_TRUE(copy.GetSolePostalCode().empty());
copy.CopyFrom(rule);
EXPECT_TRUE(copy.GetPostalCodeMatcher() != nullptr);
EXPECT_EQ(rule.GetSolePostalCode(), copy.GetSolePostalCode());
}
INSTANTIATE_TEST_SUITE_P(
AllRulesTest, RuleParseTest,
testing::ValuesIn(RegionDataConstants::GetRegionCodes()));
INSTANTIATE_TEST_SUITE_P(
DefaultRuleTest, RuleParseTest,
testing::Values(RegionDataConstants::GetDefaultRegionData()));
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/rule.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/rule_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |