ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
c4c7ef74-e75d-4082-85ad-b20b4f359d7d | cpp | google/quiche | cert_compressor | quiche/quic/core/crypto/cert_compressor.cc | quiche/quic/core/crypto/cert_compressor_test.cc | #include "quiche/quic/core/crypto/cert_compressor.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "zlib.h"
namespace quic {
namespace {
static const unsigned char kCommonCertSubstrings[] = {
0x04, 0x02, 0x30, 0x00, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x25, 0x04,
0x16, 0x30, 0x14, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03,
0x01, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x02, 0x30,
0x5f, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x04, 0x01,
0x06, 0x06, 0x0b, 0x60, 0x86, 0x48, 0x01, 0x86, 0xfd, 0x6d, 0x01, 0x07,
0x17, 0x01, 0x30, 0x33, 0x20, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65,
0x64, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x20, 0x53, 0x20, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x31, 0x34,
0x20, 0x53, 0x53, 0x4c, 0x20, 0x43, 0x41, 0x30, 0x1e, 0x17, 0x0d, 0x31,
0x32, 0x20, 0x53, 0x65, 0x63, 0x75, 0x72, 0x65, 0x20, 0x53, 0x65, 0x72,
0x76, 0x65, 0x72, 0x20, 0x43, 0x41, 0x30, 0x2d, 0x61, 0x69, 0x61, 0x2e,
0x76, 0x65, 0x72, 0x69, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x63, 0x6f, 0x6d,
0x2f, 0x45, 0x2d, 0x63, 0x72, 0x6c, 0x2e, 0x76, 0x65, 0x72, 0x69, 0x73,
0x69, 0x67, 0x6e, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x2e, 0x63, 0x65,
0x72, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01,
0x01, 0x05, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, 0x4a, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
0x2f, 0x63, 0x70, 0x73, 0x20, 0x28, 0x63, 0x29, 0x30, 0x30, 0x09, 0x06,
0x03, 0x55, 0x1d, 0x13, 0x04, 0x02, 0x30, 0x00, 0x30, 0x1d, 0x30, 0x0d,
0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05,
0x00, 0x03, 0x82, 0x01, 0x01, 0x00, 0x7b, 0x30, 0x1d, 0x06, 0x03, 0x55,
0x1d, 0x0e, 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86,
0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01,
0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xd2,
0x6f, 0x64, 0x6f, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x2e,
0x63, 0x72, 0x6c, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16,
0x04, 0x14, 0xb4, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x73, 0x69,
0x67, 0x6e, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x72, 0x30, 0x0b, 0x06, 0x03,
0x55, 0x1d, 0x0f, 0x04, 0x04, 0x03, 0x02, 0x01, 0x30, 0x0d, 0x06, 0x09,
0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x30,
0x81, 0xca, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13,
0x02, 0x55, 0x53, 0x31, 0x10, 0x30, 0x0e, 0x06, 0x03, 0x55, 0x04, 0x08,
0x13, 0x07, 0x41, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x61, 0x31, 0x13, 0x30,
0x11, 0x06, 0x03, 0x55, 0x04, 0x07, 0x13, 0x0a, 0x53, 0x63, 0x6f, 0x74,
0x74, 0x73, 0x64, 0x61, 0x6c, 0x65, 0x31, 0x1a, 0x30, 0x18, 0x06, 0x03,
0x55, 0x04, 0x0a, 0x13, 0x11, 0x47, 0x6f, 0x44, 0x61, 0x64, 0x64, 0x79,
0x2e, 0x63, 0x6f, 0x6d, 0x2c, 0x20, 0x49, 0x6e, 0x63, 0x2e, 0x31, 0x33,
0x30, 0x31, 0x06, 0x03, 0x55, 0x04, 0x0b, 0x13, 0x2a, 0x68, 0x74, 0x74,
0x70, 0x3a, 0x2f, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x65, 0x73, 0x2e, 0x67, 0x6f, 0x64, 0x61, 0x64, 0x64, 0x79,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74,
0x6f, 0x72, 0x79, 0x31, 0x30, 0x30, 0x2e, 0x06, 0x03, 0x55, 0x04, 0x03,
0x13, 0x27, 0x47, 0x6f, 0x20, 0x44, 0x61, 0x64, 0x64, 0x79, 0x20, 0x53,
0x65, 0x63, 0x75, 0x72, 0x65, 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x41, 0x75, 0x74, 0x68,
0x6f, 0x72, 0x69, 0x74, 0x79, 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55,
0x04, 0x05, 0x13, 0x08, 0x30, 0x37, 0x39, 0x36, 0x39, 0x32, 0x38, 0x37,
0x30, 0x1e, 0x17, 0x0d, 0x31, 0x31, 0x30, 0x0e, 0x06, 0x03, 0x55, 0x1d,
0x0f, 0x01, 0x01, 0xff, 0x04, 0x04, 0x03, 0x02, 0x05, 0xa0, 0x30, 0x0c,
0x06, 0x03, 0x55, 0x1d, 0x13, 0x01, 0x01, 0xff, 0x04, 0x02, 0x30, 0x00,
0x30, 0x1d, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x01, 0x01, 0xff,
0x04, 0x05, 0x30, 0x03, 0x01, 0x01, 0x00, 0x30, 0x1d, 0x06, 0x03, 0x55,
0x1d, 0x25, 0x04, 0x16, 0x30, 0x14, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05,
0x05, 0x07, 0x03, 0x01, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07,
0x03, 0x02, 0x30, 0x0e, 0x06, 0x03, 0x55, 0x1d, 0x0f, 0x01, 0x01, 0xff,
0x04, 0x04, 0x03, 0x02, 0x05, 0xa0, 0x30, 0x33, 0x06, 0x03, 0x55, 0x1d,
0x1f, 0x04, 0x2c, 0x30, 0x2a, 0x30, 0x28, 0xa0, 0x26, 0xa0, 0x24, 0x86,
0x22, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x63, 0x72, 0x6c, 0x2e,
0x67, 0x6f, 0x64, 0x61, 0x64, 0x64, 0x79, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
0x67, 0x64, 0x73, 0x31, 0x2d, 0x32, 0x30, 0x2a, 0x30, 0x28, 0x06, 0x08,
0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x02, 0x01, 0x16, 0x1c, 0x68, 0x74,
0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x76, 0x65,
0x72, 0x69, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63,
0x70, 0x73, 0x30, 0x34, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x5a, 0x17,
0x0d, 0x31, 0x33, 0x30, 0x35, 0x30, 0x39, 0x06, 0x08, 0x2b, 0x06, 0x01,
0x05, 0x05, 0x07, 0x30, 0x02, 0x86, 0x2d, 0x68, 0x74, 0x74, 0x70, 0x3a,
0x2f, 0x2f, 0x73, 0x30, 0x39, 0x30, 0x37, 0x06, 0x08, 0x2b, 0x06, 0x01,
0x05, 0x05, 0x07, 0x02, 0x30, 0x44, 0x06, 0x03, 0x55, 0x1d, 0x20, 0x04,
0x3d, 0x30, 0x3b, 0x30, 0x39, 0x06, 0x0b, 0x60, 0x86, 0x48, 0x01, 0x86,
0xf8, 0x45, 0x01, 0x07, 0x17, 0x06, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03,
0x55, 0x04, 0x06, 0x13, 0x02, 0x47, 0x42, 0x31, 0x1b, 0x53, 0x31, 0x17,
0x30, 0x15, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x0e, 0x56, 0x65, 0x72,
0x69, 0x53, 0x69, 0x67, 0x6e, 0x2c, 0x20, 0x49, 0x6e, 0x63, 0x2e, 0x31,
0x1f, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x04, 0x0b, 0x13, 0x16, 0x56, 0x65,
0x72, 0x69, 0x53, 0x69, 0x67, 0x6e, 0x20, 0x54, 0x72, 0x75, 0x73, 0x74,
0x20, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x31, 0x3b, 0x30, 0x39,
0x06, 0x03, 0x55, 0x04, 0x0b, 0x13, 0x32, 0x54, 0x65, 0x72, 0x6d, 0x73,
0x20, 0x6f, 0x66, 0x20, 0x75, 0x73, 0x65, 0x20, 0x61, 0x74, 0x20, 0x68,
0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x76,
0x65, 0x72, 0x69, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
0x72, 0x70, 0x61, 0x20, 0x28, 0x63, 0x29, 0x30, 0x31, 0x10, 0x30, 0x0e,
0x06, 0x03, 0x55, 0x04, 0x07, 0x13, 0x07, 0x53, 0x31, 0x13, 0x30, 0x11,
0x06, 0x03, 0x55, 0x04, 0x0b, 0x13, 0x0a, 0x47, 0x31, 0x13, 0x30, 0x11,
0x06, 0x0b, 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x3c, 0x02, 0x01,
0x03, 0x13, 0x02, 0x55, 0x31, 0x16, 0x30, 0x14, 0x06, 0x03, 0x55, 0x04,
0x03, 0x14, 0x31, 0x19, 0x30, 0x17, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13,
0x31, 0x1d, 0x30, 0x1b, 0x06, 0x03, 0x55, 0x04, 0x0f, 0x13, 0x14, 0x50,
0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x20, 0x4f, 0x72, 0x67, 0x61, 0x6e,
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x31, 0x12, 0x31, 0x21, 0x30,
0x1f, 0x06, 0x03, 0x55, 0x04, 0x0b, 0x13, 0x18, 0x44, 0x6f, 0x6d, 0x61,
0x69, 0x6e, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x20, 0x56,
0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, 0x31, 0x14, 0x31, 0x31,
0x30, 0x2f, 0x06, 0x03, 0x55, 0x04, 0x0b, 0x13, 0x28, 0x53, 0x65, 0x65,
0x20, 0x77, 0x77, 0x77, 0x2e, 0x72, 0x3a, 0x2f, 0x2f, 0x73, 0x65, 0x63,
0x75, 0x72, 0x65, 0x2e, 0x67, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53,
0x69, 0x67, 0x6e, 0x31, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x41,
0x2e, 0x63, 0x72, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x53, 0x69, 0x67, 0x6e,
0x20, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x20, 0x33, 0x20, 0x45, 0x63, 0x72,
0x6c, 0x2e, 0x67, 0x65, 0x6f, 0x74, 0x72, 0x75, 0x73, 0x74, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x63, 0x72, 0x6c, 0x73, 0x2f, 0x73, 0x64, 0x31, 0x1a,
0x30, 0x18, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x3a,
0x2f, 0x2f, 0x45, 0x56, 0x49, 0x6e, 0x74, 0x6c, 0x2d, 0x63, 0x63, 0x72,
0x74, 0x2e, 0x67, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x69, 0x63, 0x65, 0x72,
0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x31, 0x6f, 0x63, 0x73, 0x70, 0x2e,
0x76, 0x65, 0x72, 0x69, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x63, 0x6f, 0x6d,
0x30, 0x39, 0x72, 0x61, 0x70, 0x69, 0x64, 0x73, 0x73, 0x6c, 0x2e, 0x63,
0x6f, 0x73, 0x2e, 0x67, 0x6f, 0x64, 0x61, 0x64, 0x64, 0x79, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72,
0x79, 0x2f, 0x30, 0x81, 0x80, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05,
0x07, 0x01, 0x01, 0x04, 0x74, 0x30, 0x72, 0x30, 0x24, 0x06, 0x08, 0x2b,
0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x01, 0x86, 0x18, 0x68, 0x74, 0x74,
0x70, 0x3a, 0x2f, 0x2f, 0x6f, 0x63, 0x73, 0x70, 0x2e, 0x67, 0x6f, 0x64,
0x61, 0x64, 0x64, 0x79, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x30, 0x4a, 0x06,
0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x02, 0x86, 0x3e, 0x68,
0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66,
0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x2e, 0x67, 0x6f, 0x64, 0x61, 0x64,
0x64, 0x79, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x72, 0x65, 0x70, 0x6f, 0x73,
0x69, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x67, 0x64, 0x5f, 0x69, 0x6e, 0x74,
0x65, 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x2e, 0x63, 0x72,
0x74, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, 0x18, 0x30, 0x16,
0x80, 0x14, 0xfd, 0xac, 0x61, 0x32, 0x93, 0x6c, 0x45, 0xd6, 0xe2, 0xee,
0x85, 0x5f, 0x9a, 0xba, 0xe7, 0x76, 0x99, 0x68, 0xcc, 0xe7, 0x30, 0x27,
0x86, 0x29, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x63, 0x86, 0x30,
0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x73,
};
struct CertEntry {
public:
enum Type {
COMPRESSED = 1,
CACHED = 2,
};
Type type;
uint64_t hash;
uint64_t set_hash;
uint32_t index;
};
std::vector<CertEntry> MatchCerts(const std::vector<std::string>& certs,
absl::string_view client_cached_cert_hashes) {
std::vector<CertEntry> entries;
entries.reserve(certs.size());
const bool cached_valid =
client_cached_cert_hashes.size() % sizeof(uint64_t) == 0 &&
!client_cached_cert_hashes.empty();
for (auto i = certs.begin(); i != certs.end(); ++i) {
CertEntry entry;
if (cached_valid) {
bool cached = false;
uint64_t hash = QuicUtils::FNV1a_64_Hash(*i);
for (size_t j = 0; j < client_cached_cert_hashes.size();
j += sizeof(uint64_t)) {
uint64_t cached_hash;
memcpy(&cached_hash, client_cached_cert_hashes.data() + j,
sizeof(uint64_t));
if (hash != cached_hash) {
continue;
}
entry.type = CertEntry::CACHED;
entry.hash = hash;
entries.push_back(entry);
cached = true;
break;
}
if (cached) {
continue;
}
}
entry.type = CertEntry::COMPRESSED;
entries.push_back(entry);
}
return entries;
}
size_t CertEntriesSize(const std::vector<CertEntry>& entries) {
size_t entries_size = 0;
for (auto i = entries.begin(); i != entries.end(); ++i) {
entries_size++;
switch (i->type) {
case CertEntry::COMPRESSED:
break;
case CertEntry::CACHED:
entries_size += sizeof(uint64_t);
break;
}
}
entries_size++;
return entries_size;
}
void SerializeCertEntries(uint8_t* out, const std::vector<CertEntry>& entries) {
for (auto i = entries.begin(); i != entries.end(); ++i) {
*out++ = static_cast<uint8_t>(i->type);
switch (i->type) {
case CertEntry::COMPRESSED:
break;
case CertEntry::CACHED:
memcpy(out, &i->hash, sizeof(i->hash));
out += sizeof(uint64_t);
break;
}
}
*out++ = 0;
}
std::string ZlibDictForEntries(const std::vector<CertEntry>& entries,
const std::vector<std::string>& certs) {
std::string zlib_dict;
size_t zlib_dict_size = 0;
for (size_t i = certs.size() - 1; i < certs.size(); i--) {
if (entries[i].type != CertEntry::COMPRESSED) {
zlib_dict_size += certs[i].size();
}
}
zlib_dict_size += sizeof(kCommonCertSubstrings);
zlib_dict.reserve(zlib_dict_size);
for (size_t i = certs.size() - 1; i < certs.size(); i--) {
if (entries[i].type != CertEntry::COMPRESSED) {
zlib_dict += certs[i];
}
}
zlib_dict += std::string(reinterpret_cast<const char*>(kCommonCertSubstrings),
sizeof(kCommonCertSubstrings));
QUICHE_DCHECK_EQ(zlib_dict.size(), zlib_dict_size);
return zlib_dict;
}
std::vector<uint64_t> HashCerts(const std::vector<std::string>& certs) {
std::vector<uint64_t> ret;
ret.reserve(certs.size());
for (auto i = certs.begin(); i != certs.end(); ++i) {
ret.push_back(QuicUtils::FNV1a_64_Hash(*i));
}
return ret;
}
bool ParseEntries(absl::string_view* in_out,
const std::vector<std::string>& cached_certs,
std::vector<CertEntry>* out_entries,
std::vector<std::string>* out_certs) {
absl::string_view in = *in_out;
std::vector<uint64_t> cached_hashes;
out_entries->clear();
out_certs->clear();
for (;;) {
if (in.empty()) {
return false;
}
CertEntry entry;
const uint8_t type_byte = in[0];
in.remove_prefix(1);
if (type_byte == 0) {
break;
}
entry.type = static_cast<CertEntry::Type>(type_byte);
switch (entry.type) {
case CertEntry::COMPRESSED:
out_certs->push_back(std::string());
break;
case CertEntry::CACHED: {
if (in.size() < sizeof(uint64_t)) {
return false;
}
memcpy(&entry.hash, in.data(), sizeof(uint64_t));
in.remove_prefix(sizeof(uint64_t));
if (cached_hashes.size() != cached_certs.size()) {
cached_hashes = HashCerts(cached_certs);
}
bool found = false;
for (size_t i = 0; i < cached_hashes.size(); i++) {
if (cached_hashes[i] == entry.hash) {
out_certs->push_back(cached_certs[i]);
found = true;
break;
}
}
if (!found) {
return false;
}
break;
}
default:
return false;
}
out_entries->push_back(entry);
}
*in_out = in;
return true;
}
class ScopedZLib {
public:
enum Type {
INFLATE,
DEFLATE,
};
explicit ScopedZLib(Type type) : z_(nullptr), type_(type) {}
void reset(z_stream* z) {
Clear();
z_ = z;
}
~ScopedZLib() { Clear(); }
private:
void Clear() {
if (!z_) {
return;
}
if (type_ == DEFLATE) {
deflateEnd(z_);
} else {
inflateEnd(z_);
}
z_ = nullptr;
}
z_stream* z_;
const Type type_;
};
}
std::string CertCompressor::CompressChain(
const std::vector<std::string>& certs,
absl::string_view client_cached_cert_hashes) {
const std::vector<CertEntry> entries =
MatchCerts(certs, client_cached_cert_hashes);
QUICHE_DCHECK_EQ(entries.size(), certs.size());
size_t uncompressed_size = 0;
for (size_t i = 0; i < entries.size(); i++) {
if (entries[i].type == CertEntry::COMPRESSED) {
uncompressed_size += 4 + certs[i].size();
}
}
size_t compressed_size = 0;
z_stream z;
ScopedZLib scoped_z(ScopedZLib::DEFLATE);
if (uncompressed_size > 0) {
memset(&z, 0, sizeof(z));
int rv = deflateInit(&z, Z_DEFAULT_COMPRESSION);
QUICHE_DCHECK_EQ(Z_OK, rv);
if (rv != Z_OK) {
return "";
}
scoped_z.reset(&z);
std::string zlib_dict = ZlibDictForEntries(entries, certs);
rv = deflateSetDictionary(
&z, reinterpret_cast<const uint8_t*>(&zlib_dict[0]), zlib_dict.size());
QUICHE_DCHECK_EQ(Z_OK, rv);
if (rv != Z_OK) {
return "";
}
compressed_size = deflateBound(&z, uncompressed_size);
}
const size_t entries_size = CertEntriesSize(entries);
std::string result;
result.resize(entries_size + (uncompressed_size > 0 ? 4 : 0) +
compressed_size);
uint8_t* j = reinterpret_cast<uint8_t*>(&result[0]);
SerializeCertEntries(j, entries);
j += entries_size;
if (uncompressed_size == 0) {
return result;
}
uint32_t uncompressed_size_32 = uncompressed_size;
memcpy(j, &uncompressed_size_32, sizeof(uint32_t));
j += sizeof(uint32_t);
int rv;
z.next_out = j;
z.avail_out = compressed_size;
for (size_t i = 0; i < certs.size(); i++) {
if (entries[i].type != CertEntry::COMPRESSED) {
continue;
}
uint32_t length32 = certs[i].size();
z.next_in = reinterpret_cast<uint8_t*>(&length32);
z.avail_in = sizeof(length32);
rv = deflate(&z, Z_NO_FLUSH);
QUICHE_DCHECK_EQ(Z_OK, rv);
QUICHE_DCHECK_EQ(0u, z.avail_in);
if (rv != Z_OK || z.avail_in) {
return "";
}
z.next_in =
const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(certs[i].data()));
z.avail_in = certs[i].size();
rv = deflate(&z, Z_NO_FLUSH);
QUICHE_DCHECK_EQ(Z_OK, rv);
QUICHE_DCHECK_EQ(0u, z.avail_in);
if (rv != Z_OK || z.avail_in) {
return "";
}
}
z.avail_in = 0;
rv = deflate(&z, Z_FINISH);
QUICHE_DCHECK_EQ(Z_STREAM_END, rv);
if (rv != Z_STREAM_END) {
return "";
}
result.resize(result.size() - z.avail_out);
return result;
}
bool CertCompressor::DecompressChain(
absl::string_view in, const std::vector<std::string>& cached_certs,
std::vector<std::string>* out_certs) {
std::vector<CertEntry> entries;
if (!ParseEntries(&in, cached_certs, &entries, out_certs)) {
return false;
}
QUICHE_DCHECK_EQ(entries.size(), out_certs->size());
std::unique_ptr<uint8_t[]> uncompressed_data;
absl::string_view uncompressed;
if (!in.empty()) {
if (in.size() < sizeof(uint32_t)) {
return false;
}
uint32_t uncompressed_size;
memcpy(&uncompressed_size, in.data(), sizeof(uncompressed_size));
in.remove_prefix(sizeof(uint32_t));
if (uncompressed_size > 128 * 1024) {
return false;
}
uncompressed_data = std::make_unique<uint8_t[]>(uncompressed_size);
z_stream z;
ScopedZLib scoped_z(ScopedZLib::INFLATE);
memset(&z, 0, sizeof(z));
z.next_out = uncompressed_data.get();
z.avail_out = uncompressed_size;
z.next_in =
const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(in.data()));
z.avail_in = in.size();
if (Z_OK != inflateInit(&z)) {
return false;
}
scoped_z.reset(&z);
int rv = inflate(&z, Z_FINISH);
if (rv == Z_NEED_DICT) {
std::string zlib_dict = ZlibDictForEntries(entries, *out_certs);
const uint8_t* dict = reinterpret_cast<const uint8_t*>(zlib_dict.data());
if (Z_OK != inflateSetDictionary(&z, dict, zlib_dict.size())) {
return false;
}
rv = inflate(&z, Z_FINISH);
}
if (Z_STREAM_END != rv || z.avail_out > 0 || z.avail_in > 0) {
return false;
}
uncompressed = absl::string_view(
reinterpret_cast<char*>(uncompressed_data.get()), uncompressed_size);
}
for (size_t i = 0; i < entries.size(); i++) {
switch (entries[i].type) {
case CertEntry::COMPRESSED:
if (uncompressed.size() < sizeof(uint32_t)) {
return false;
}
uint32_t cert_len;
memcpy(&cert_len, uncompressed.data(), sizeof(cert_len));
uncompressed.remove_prefix(sizeof(uint32_t));
if (uncompressed.size() < cert_len) {
return false;
}
(*out_certs)[i] = std::string(uncompressed.substr(0, cert_len));
uncompressed.remove_prefix(cert_len);
break;
case CertEntry::CACHED:
break;
}
}
if (!uncompressed.empty()) {
return false;
}
return true;
}
} | #include "quiche/quic/core/crypto/cert_compressor.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
namespace quic {
namespace test {
class CertCompressorTest : public QuicTest {};
TEST_F(CertCompressorTest, EmptyChain) {
std::vector<std::string> chain;
const std::string compressed =
CertCompressor::CompressChain(chain, absl::string_view());
EXPECT_EQ("00", absl::BytesToHexString(compressed));
std::vector<std::string> chain2, cached_certs;
ASSERT_TRUE(
CertCompressor::DecompressChain(compressed, cached_certs, &chain2));
EXPECT_EQ(chain.size(), chain2.size());
}
TEST_F(CertCompressorTest, Compressed) {
std::vector<std::string> chain;
chain.push_back("testcert");
const std::string compressed =
CertCompressor::CompressChain(chain, absl::string_view());
ASSERT_GE(compressed.size(), 2u);
EXPECT_EQ("0100", absl::BytesToHexString(compressed.substr(0, 2)));
std::vector<std::string> chain2, cached_certs;
ASSERT_TRUE(
CertCompressor::DecompressChain(compressed, cached_certs, &chain2));
EXPECT_EQ(chain.size(), chain2.size());
EXPECT_EQ(chain[0], chain2[0]);
}
TEST_F(CertCompressorTest, Common) {
std::vector<std::string> chain;
chain.push_back("testcert");
static const uint64_t set_hash = 42;
const std::string compressed = CertCompressor::CompressChain(
chain, absl::string_view(reinterpret_cast<const char*>(&set_hash),
sizeof(set_hash)));
ASSERT_GE(compressed.size(), 2u);
EXPECT_EQ("0100", absl::BytesToHexString(compressed.substr(0, 2)));
std::vector<std::string> chain2, cached_certs;
ASSERT_TRUE(
CertCompressor::DecompressChain(compressed, cached_certs, &chain2));
EXPECT_EQ(chain.size(), chain2.size());
EXPECT_EQ(chain[0], chain2[0]);
}
TEST_F(CertCompressorTest, Cached) {
std::vector<std::string> chain;
chain.push_back("testcert");
uint64_t hash = QuicUtils::FNV1a_64_Hash(chain[0]);
absl::string_view hash_bytes(reinterpret_cast<char*>(&hash), sizeof(hash));
const std::string compressed =
CertCompressor::CompressChain(chain, hash_bytes);
EXPECT_EQ("02" + absl::BytesToHexString(hash_bytes) +
"00" ,
absl::BytesToHexString(compressed));
std::vector<std::string> cached_certs, chain2;
cached_certs.push_back(chain[0]);
ASSERT_TRUE(
CertCompressor::DecompressChain(compressed, cached_certs, &chain2));
EXPECT_EQ(chain.size(), chain2.size());
EXPECT_EQ(chain[0], chain2[0]);
}
TEST_F(CertCompressorTest, BadInputs) {
std::vector<std::string> cached_certs, chain;
EXPECT_FALSE(CertCompressor::DecompressChain(
absl::BytesToHexString("04") , cached_certs, &chain));
EXPECT_FALSE(CertCompressor::DecompressChain(
absl::BytesToHexString("01") , cached_certs, &chain));
EXPECT_FALSE(CertCompressor::DecompressChain(
absl::BytesToHexString("0200") , cached_certs,
&chain));
EXPECT_FALSE(CertCompressor::DecompressChain(
absl::BytesToHexString("0300") ,
cached_certs, &chain));
EXPECT_FALSE(
CertCompressor::DecompressChain(absl::BytesToHexString("03"
"0000000000000000"
"00000000"),
cached_certs, &chain));
EXPECT_FALSE(
CertCompressor::DecompressChain(absl::BytesToHexString("03"
"a200000000000000"
"00000000"),
cached_certs, &chain));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/cert_compressor.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/cert_compressor_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
50095472-91e4-4cbb-baa3-ba1f4cebcf97 | cpp | tensorflow/tensorflow | collective_param_resolver_local | tensorflow/core/common_runtime/collective_param_resolver_local.cc | tensorflow/core/common_runtime/collective_param_resolver_local_test.cc | #include "tensorflow/core/common_runtime/collective_param_resolver_local.h"
#include <stddef.h>
#include <algorithm>
#include <tuple>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
CollectiveParamResolverLocal::CollectiveParamResolverLocal(
const ConfigProto& config, const DeviceMgr* dev_mgr,
DeviceResolverInterface* dev_resolver,
NcclCommunicatorInterface* nccl_communicator, const string& task_name)
: nccl_(config.experimental().collective_nccl()),
dev_mgr_(dev_mgr),
dev_resolver_(dev_resolver),
nccl_communicator_(nccl_communicator),
task_name_(task_name),
gpu_ring_order_(
config.gpu_options().experimental().collective_ring_order()) {}
void CollectiveParamResolverLocal::CompleteGroupAsync(
const DeviceAttributes& device, CollGroupParams* group_params,
CancellationManager* cancel_mgr, const StatusCallback& done) {
CompleteGroupLocal(device, group_params, cancel_mgr, done);
}
namespace {
const char* GetCollectiveName(const CollectiveParams* cp, bool nccl) {
switch (cp->instance.type) {
case BROADCAST_COLLECTIVE:
return nccl ? "NcclBroadcast" : "HierarchicalTreeBroadcast";
case REDUCTION_COLLECTIVE:
return nccl ? "NcclReduce" : "RingReduce";
case GATHER_COLLECTIVE:
return nccl ? "NcclGather" : "RingGather";
case PERMUTE_COLLECTIVE:
return "Permute";
case ALL_TO_ALL_COLLECTIVE:
return nccl ? "NcclAllToAll" : "AllToAll";
case REDUCE_SCATTER_COLLECTIVE:
return nccl ? "NcclReduceScatter" : "undef";
default:
return "undef";
}
}
string TaskNameFromDeviceName(const string& device_name) {
DeviceNameUtils::ParsedName parsed_device;
CHECK(DeviceNameUtils::ParseFullName(device_name, &parsed_device));
string task_name;
CHECK(DeviceNameUtils::GetTaskName(parsed_device, &task_name));
return task_name;
}
struct RankFormatter {
void operator()(std::string* out, CollGroupMember m) const {
out->append(std::to_string(m.rank));
}
};
Status CheckUserSpecifiedRanks(const std::vector<CollGroupMember> members) {
absl::flat_hash_set<int> user_ranks = {};
bool at_least_one_member_with_no_rank = false;
bool at_least_one_member_with_user_rank = false;
for (const auto& m : members) {
if (m.rank == -1) {
at_least_one_member_with_no_rank = true;
} else {
at_least_one_member_with_user_rank = true;
user_ranks.insert(m.rank);
}
}
auto received_ranks = absl::StrJoin(members, ",", RankFormatter());
if (at_least_one_member_with_no_rank && at_least_one_member_with_user_rank) {
return errors::InvalidArgument(
"Only part of the group members have user given rank specified.",
"Received ranks: ", received_ranks);
}
if (at_least_one_member_with_user_rank &&
user_ranks.size() < members.size()) {
return errors::InvalidArgument(
"Duplicate ranks specified for group members. Received ranks: ",
received_ranks);
}
return absl::OkStatus();
}
}
void CollectiveParamResolverLocal::CompleteGroupLocal(
const DeviceAttributes& device, CollGroupParams* group_params,
CancellationManager* cancel_mgr, StatusCallback done) {
VLOG(1) << "CompleteGroup device=" << device.name() << ": "
<< group_params->ToString();
std::vector<StatusCallback> to_be_called;
GroupRec* gr = nullptr;
Status status;
{
mutex_lock l(group_mu_);
auto it = group_table_.find(group_params->group_key);
if (it == group_table_.end()) {
gr = new GroupRec;
mutex_lock grl(gr->mu);
gr->group.group_key = group_params->group_key;
gr->group.group_size = group_params->group_size;
gr->group.device_type = group_params->device_type;
if (nccl_communicator_ != nullptr) {
gr->group.runtime_details.communicator_key =
nccl_communicator_->GenerateCommunicatorKey();
}
group_table_[gr->group.group_key].reset(gr);
VLOG(2) << "New group_key=" << gr->group.group_key
<< " group_size=" << gr->group.group_size
<< " runtime_details=" << gr->group.runtime_details.ToString();
} else {
gr = it->second.get();
}
}
{
mutex_lock l(status_mu_);
status = status_;
}
if (!status.ok()) {
done(status);
return;
}
if (cancel_mgr != nullptr) {
CancellationToken token = cancel_mgr->get_cancellation_token();
bool is_cancelled = !cancel_mgr->RegisterCallback(
token, std::bind(&CollectiveParamResolverLocal::CancelGroup, this,
group_params->group_key));
if (is_cancelled) {
done(errors::Cancelled("CompleteGroup is cancelled before it starts"));
return;
}
done = [cancel_mgr, token,
original_done = std::move(done)](const Status& status) {
cancel_mgr->TryDeregisterCallback(token);
original_done(status);
};
}
{
mutex_lock gr_lock(gr->mu);
VLOG(2) << "gr device_type=" << gr->group.device_type
<< " cp device_type=" << group_params->device_type
<< " current device=" << device.name();
if (gr->status.ok()) {
if (group_params->device_type != gr->group.device_type) {
gr->status = errors::Internal(
"Device ", device.name(),
" is joining a group with incompatible device type",
gr->group.device_type.type_string(),
" (group_key=", gr->group.group_key, ")");
} else if (group_params->group_size != gr->group.group_size) {
gr->status = errors::Internal(
"Device ", device.name(), " is joining a group with size",
group_params->group_size, ", but that group has size ",
gr->group.group_size, " (group_key=", gr->group.group_key, ")");
}
}
bool new_device = false;
if (gr->status.ok()) {
auto it = gr->incarnations_by_device_name.find(device.name());
if (it == gr->incarnations_by_device_name.end()) {
if (gr->group.members.size() == gr->group.group_size) {
gr->status =
errors::Internal("Device ", device.name(),
" is joining a group that is already full",
" (group_key=", gr->group.group_key, ")");
} else {
gr->incarnations_by_device_name[device.name()] = device.incarnation();
CollGroupMember member;
member.device = device;
if (group_params->user_specified_rank == -1 ||
(group_params->user_specified_rank >= 0 &&
group_params->user_specified_rank < gr->group.group_size)) {
member.rank = group_params->user_specified_rank;
} else {
gr->status = errors::InvalidArgument(
"User Provided rank is invalid. It should be between [0, "
"group_size)");
}
gr->group.members.push_back(std::move(member));
new_device = true;
if (VLOG_IS_ON(1)) {
string dev_buf;
for (const auto& m : gr->group.members) {
strings::StrAppend(&dev_buf, ",", m.device.name());
}
VLOG(1) << "CompleteGroupLocal group_key=" << gr->group.group_key
<< " group_size=" << gr->group.group_size << " (current"
<< " devices)=(" << dev_buf << ") (number of"
<< " devices pending)="
<< (gr->group.group_size - gr->group.members.size());
}
}
} else {
if (it->second != device.incarnation()) {
gr->status = errors::FailedPrecondition(
"Device ", device.name(),
" current incarnation doesn't match with one in the group. This "
"usually means this worker has restarted but the collective "
"leader hasn't, or this worker connects to a wrong cluster.");
}
}
}
if (gr->status.ok()) {
VLOG(2) << "group_size " << gr->group.group_size << " set size "
<< gr->group.members.size() << " gr " << gr;
if (gr->group.members.size() < gr->group.group_size) {
gr->pending_done.push_back(std::move(done));
gr->pending_params.push_back(group_params);
return;
}
CHECK_EQ(gr->group.members.size(), gr->group.group_size);
auto st = CheckUserSpecifiedRanks(gr->group.members);
if (!st.ok()) {
gr->status = st;
}
if (new_device) {
FinishGroup(gr);
}
*group_params = gr->group;
for (auto* params : gr->pending_params) {
*params = gr->group;
}
}
to_be_called.swap(gr->pending_done);
gr->pending_params.clear();
status = gr->status;
}
done(status);
for (int i = 0; i < to_be_called.size(); ++i) {
to_be_called[i](status);
}
}
namespace {
struct DevRec {
string task;
string device;
int original_rank;
int local_rank;
int global_rank;
const DeviceLocality* locality;
};
typedef std::unordered_map<string, DevRec> TaskDeviceMap;
typedef std::unordered_map<string, TaskDeviceMap> GlobalDeviceMap;
GlobalDeviceMap BuildDevRecs(const CollGroupParams& gp) {
GlobalDeviceMap gdm;
CHECK_EQ(gp.members.size(), gp.members.size());
for (int i = 0; i < gp.members.size(); ++i) {
TaskDeviceMap& tdm = gdm[gp.members[i].task];
DevRec* dr = &tdm[gp.members[i].device.name()];
dr->task = gp.members[i].task;
dr->device = gp.members[i].device.name();
dr->original_rank = i;
dr->local_rank = 0;
dr->global_rank = 0;
dr->locality = &gp.members[i].device.locality();
}
return gdm;
}
bool ParseRingOrder(const string& gpu_ring_order_str, TaskDeviceMap* tdm) {
std::vector<string> split_gpu_ring_order_str =
str_util::Split(gpu_ring_order_str, ',');
if (split_gpu_ring_order_str.size() != tdm->size()) return false;
gtl::FlatMap<int32, int32> gpu_ranks;
for (int32_t rank = 0;
rank < static_cast<int32>(split_gpu_ring_order_str.size()); ++rank) {
int32_t tmp;
if (strings::safe_strto32(split_gpu_ring_order_str[rank], &tmp)) {
gpu_ranks[tmp] = rank;
} else {
return false;
}
}
for (auto& tdm_it : *tdm) {
DeviceNameUtils::ParsedName parsed_name;
DevRec* dr = &tdm_it.second;
if (!DeviceNameUtils::ParseFullName(dr->device, &parsed_name)) {
return false;
}
auto rank_it = gpu_ranks.find(parsed_name.id);
if (rank_it == gpu_ranks.end()) return false;
dr->local_rank = rank_it->second;
}
VLOG(2) << "Assigned local ranks based on ring order " << gpu_ring_order_str;
return true;
}
void OrderTaskDeviceMap(const string& gpu_ring_order, TaskDeviceMap* tdm) {
CHECK_GT(tdm->size(), 0);
if (ParseRingOrder(gpu_ring_order, tdm)) return;
int least_rank = -1;
string next_device;
std::set<string> selected;
for (const auto& it : *tdm) {
if (least_rank < 0 || it.second.original_rank < least_rank) {
least_rank = it.second.original_rank;
next_device = it.second.device;
}
}
CHECK_GE(least_rank, 0);
DeviceNameUtils::ParsedName parsed_name;
CHECK(DeviceNameUtils::ParseFullName(next_device, &parsed_name));
int next_rank = 0;
while (true) {
selected.insert(next_device);
auto next_dev_it = tdm->find(next_device);
CHECK(next_dev_it != tdm->end());
DevRec* dr = &next_dev_it->second;
dr->local_rank = next_rank;
++next_rank;
if (selected.size() == tdm->size()) {
break;
}
const InterconnectLink* best_link = nullptr;
if (parsed_name.type == "GPU") {
for (const InterconnectLink& il : dr->locality->links().link()) {
parsed_name.id = il.device_id();
string endpoint_device =
DeviceNameUtils::ParsedNameToString(parsed_name);
if (selected.find(endpoint_device) != selected.end()) {
continue;
}
if (tdm->find(endpoint_device) == tdm->end()) {
continue;
}
if (best_link == nullptr || il.strength() > best_link->strength()) {
best_link = &il;
}
}
}
if (best_link != nullptr) {
parsed_name.id = best_link->device_id();
next_device = DeviceNameUtils::ParsedNameToString(parsed_name);
} else {
least_rank = -1;
for (const auto& it : *tdm) {
if (selected.find(it.second.device) != selected.end()) {
continue;
}
if (least_rank < 0 || it.second.original_rank < least_rank) {
least_rank = it.second.original_rank;
next_device = it.second.device;
}
}
CHECK_GE(least_rank, 0);
}
}
}
GlobalDeviceMap EstablishGlobalRank(const CollGroupParams& gp,
const string& gpu_ring_order) {
VLOG(1) << "EstablishGlobalRank";
GlobalDeviceMap gdm = BuildDevRecs(gp);
for (auto& iter : gdm) {
TaskDeviceMap& tdm = iter.second;
OrderTaskDeviceMap(gpu_ring_order, &tdm);
}
std::set<string> tasks;
for (const CollGroupMember& member : gp.members) {
tasks.insert(member.task);
}
int next_rank = 0;
for (const string& task : tasks) {
TaskDeviceMap* tdm = &gdm[task];
for (auto& it : *tdm) {
it.second.global_rank = it.second.local_rank + next_rank;
}
next_rank += tdm->size();
}
return gdm;
}
void SetDevPerTask(CollGroupParams* gp) {
gp->num_devices_per_task.clear();
for (const CollGroupMember& member : gp->members) {
gp->num_devices_per_task[member.task]++;
}
gp->same_num_devices_per_task = false;
int dev_per_task = -1;
for (const auto& task_dev : gp->num_devices_per_task) {
if (dev_per_task == -1) {
dev_per_task = task_dev.second;
} else if (dev_per_task != task_dev.second) {
return;
}
}
gp->same_num_devices_per_task = true;
}
}
void CollectiveParamResolverLocal::FinishGroup(GroupRec* gr) {
for (CollGroupMember& member : gr->group.members) {
member.task = TaskNameFromDeviceName(member.device.name());
member.is_local = member.task == task_name_;
}
CompleteDefaultRanking(&gr->group);
SetDevPerTask(&gr->group);
gr->group.num_tasks =
static_cast<int32>(gr->group.num_devices_per_task.size());
}
void CollectiveParamResolverLocal::CancelGroup(int32 group_key) {
std::vector<StatusCallback> pending_done;
GroupRec* gr = nullptr;
{
mutex_lock l(group_mu_);
auto it = group_table_.find(group_key);
if (it == group_table_.end()) {
return;
}
gr = it->second.get();
}
{
mutex_lock l(gr->mu);
if (gr->group.members.size() == gr->group.group_size) {
return;
}
gr->status = errors::Cancelled("group is cancelled");
pending_done.swap(gr->pending_done);
gr->pending_params.clear();
}
for (const StatusCallback& done : pending_done) {
done(errors::Cancelled("group is cancelled"));
}
}
void CollectiveParamResolverLocal::SetDefaultRank(const string& device,
CollectiveParams* cp) {
CHECK_EQ(cp->group.group_size, cp->group.members.size()) << cp->ToString();
for (int i = 0; i < cp->group.group_size; ++i) {
if (cp->group.members[i].device.name() == device) {
cp->default_rank = i;
}
if (cp->group.members[i].rank == -1) {
cp->group.members[i].rank = i;
}
}
}
void CollectiveParamResolverLocal::InitInstanceSharedParams(
const CollectiveParams* cp, InstanceRec* ir) {
ir->shared->instance = cp->instance;
ir->shared->default_rank = -1;
}
void CollectiveParamResolverLocal::CompleteDefaultRanking(CollGroupParams* gp) {
std::sort(gp->members.begin(), gp->members.end(),
[](const CollGroupMember& lhs, const CollGroupMember& rhs) {
return DeviceNameUtils::CompareFullNames(lhs.device.name(),
rhs.device.name());
});
GlobalDeviceMap gdm = EstablishGlobalRank(*gp, gpu_ring_order_);
std::vector<CollGroupMember> new_members(gp->group_size);
for (const auto& git : gdm) {
const TaskDeviceMap& tdm = git.second;
for (const auto& tit : tdm) {
const DevRec& dr = tit.second;
new_members[dr.global_rank] = std::move(gp->members[dr.original_rank]);
}
}
if (VLOG_IS_ON(2)) {
string buf;
for (const auto& m : new_members)
strings::StrAppend(&buf, "\n", m.device.name());
VLOG(2) << "Optimized device order for group " << gp->group_key << ": "
<< buf;
}
gp->members = std::move(new_members);
}
CollectiveParamResolverLocal::InstanceRec*
CollectiveParamResolverLocal::GetOrCreateInstanceRec(CollectiveParams* cp,
bool* created) {
*created = false;
InstanceRec* irec = nullptr;
{
mutex_lock l(instance_mu_);
std::tuple<int64_t, int32_t> key = {cp->instance.step_id,
cp->instance.instance_key};
auto group_it = instance_table_.find(cp->group.group_key);
if (group_it != instance_table_.end()) {
auto instance_it = group_it->second.find(key);
if (instance_it != group_it->second.end()) {
irec = instance_it->second.get();
}
}
if (irec == nullptr) {
irec = new InstanceRec;
*created = true;
{
mutex_lock il(irec->mu);
irec->known.resize(cp->group.group_size, false);
}
InitInstanceSharedParams(cp, irec);
instance_table_[cp->group.group_key][key].reset(irec);
}
}
Status status;
{
mutex_lock l(status_mu_);
status = status_;
}
if (!status.ok()) {
mutex_lock l(irec->mu);
irec->status = status;
}
return irec;
}
Status CollectiveParamResolverLocal::LookupGroup(int32_t group_key,
CollGroupParams* group) {
mutex_lock l(group_mu_);
auto group_rec = group_table_.find(group_key);
if (group_rec == group_table_.end()) {
return errors::InvalidArgument("Group ", group_key,
" is not "
"initialized. Please call group "
"initialization op first before invoking "
"collective op.");
}
mutex_lock lock(group_rec->second->mu);
if (!group_rec->second->status.ok()) {
return errors::FailedPrecondition(
"Failed to run collective due to "
"unsuccessful group initialization. "
"Group initialization failed with error ",
group_rec->second->status.ToString());
}
*group = group_rec->second->group;
return absl::OkStatus();
}
void CollectiveParamResolverLocal::CompleteParamsAsync(
const DeviceAttributes& device, CollectiveParams* cp,
CancellationManager* cancel_mgr, const StatusCallback& done) {
VLOG(1) << "CompleteParams local " << device.name() << " for " << cp << ": "
<< cp->ToString();
if (cp->run_group_initialization) {
CompleteGroupLocal(device, &cp->group, cancel_mgr,
[this, device, cp, done](const Status& s) {
if (s.ok()) {
CompleteInstanceLocal(device.name(), cp, done);
} else {
done(s);
}
});
} else {
const auto s = LookupGroup(cp->group.group_key, &cp->group);
if (s.ok()) {
CompleteInstanceLocal(device.name(), cp, done);
} else {
done(s);
}
}
}
void CollectiveParamResolverLocal::CompleteInstanceAsync(
const CompleteInstanceRequest* request, CompleteInstanceResponse* response,
CancellationManager* cancel_mgr, const StatusCallback& done) {
done(
errors::Internal("CompleteInstance is not implemented by "
"CollectiveParamResolverLocal which is "
"intended only for non-distributed deployment."));
}
void CollectiveParamResolverLocal::AssignCollectiveType(CollectiveParams* cp) {
CollectiveImplementationInterface* col_impl;
bool use_nccl =
(nccl_ || cp->instance.impl_details.communication_hint == "nccl") &&
cp->group.device_type == DEVICE_GPU &&
CollectiveRegistry::LookupParamResolverInstance("NcclReduce", &col_impl)
.ok();
cp->instance.impl_details.collective_name = GetCollectiveName(cp, use_nccl);
VLOG(1) << "AssignCollectiveType "
<< cp->instance.impl_details.collective_name;
}
void CollectiveParamResolverLocal::CompleteInstanceLocal(
const string& device, CollectiveParams* cp, const StatusCallback& done) {
VLOG(1) << "CompleteInstanceLocal " << device
<< " instance_key: " << cp->instance.instance_key << " group_key "
<< cp->group.group_key;
bool created_irec;
InstanceRec* ir = GetOrCreateInstanceRec(cp, &created_irec);
if (!created_irec) {
if (ir->shared->instance.type != cp->instance.type ||
ir->shared->instance.data_type != cp->instance.data_type) {
done(errors::Internal("Collective instance ", cp->instance.instance_key,
" expected type ", ir->shared->instance.type,
" and data_type ", ir->shared->instance.data_type,
" but got type ", cp->instance.type,
" and data_type ", cp->instance.data_type));
return;
}
}
CompleteInstanceFromInitializedIRec(device, cp, ir, done);
}
void CollectiveParamResolverLocal::CompleteInstanceFromInitializedIRec(
const string& device, CollectiveParams* cp, InstanceRec* ir,
const StatusCallback& done) {
auto expected_shape = cp->instance.shape;
Status status;
{
mutex_lock l(ir->mu);
status = ir->status;
if (status.ok()) {
cp->instance = ir->shared->instance;
}
}
if (!status.ok()) {
done(status);
return;
}
if (expected_shape != cp->instance.shape) {
done(errors::InvalidArgument(
"Shape mismatch in the collective instance ", cp->instance.instance_key,
". Op at device ", device, " expected shape ",
expected_shape.DebugString(), " but another member in the group ",
"expected shape ", cp->instance.shape.DebugString(), ". This is likely",
" due to different input shapes at different members of the collective",
" op."));
return;
}
AssignCollectiveType(cp);
SetDefaultRank(device, cp);
CollectiveImplementationInterface* col_impl;
status = CollectiveRegistry::LookupParamResolverInstance(
cp->instance.impl_details.collective_name, &col_impl);
if (!status.ok()) {
done(status);
return;
}
if (cp->instance.type == BROADCAST_COLLECTIVE) {
WaitForGroup(ir, cp, [col_impl, ir, device, cp, done](InstanceRec* irec) {
Status s;
if (ir != irec) {
s = errors::Internal("Expected ir ", ir, " and irec ", irec,
" to be equal");
} else {
mutex_lock l(irec->mu);
s = irec->status;
cp->source_rank = irec->source_rank;
}
if (s.ok()) {
s = col_impl->InitializeCollectiveParams(cp);
}
done(s);
});
} else {
done(col_impl->InitializeCollectiveParams(cp));
}
}
void CollectiveParamResolverLocal::WaitForGroup(InstanceRec* ir,
CollectiveParams* cp,
const IRConsumer& f) {
std::vector<IRConsumer> ready_waiters;
do {
mutex_lock l(ir->mu);
if (!ir->status.ok()) {
break;
}
CHECK_EQ(cp->group.group_size, ir->known.size());
CHECK_GE(cp->default_rank, 0);
if (!ir->known[cp->default_rank]) {
ir->known[cp->default_rank] = true;
++ir->known_count;
if (cp->is_source) {
if (ir->source_rank >= 0) {
ir->status = errors::Internal("Instance ", cp->instance.instance_key,
" already has source ", ir->source_rank,
", received second claim from ",
cp->default_rank);
} else {
ir->source_rank = cp->default_rank;
}
}
}
if (ir->known_count < cp->group.group_size) {
ir->known_waiters.push_back(f);
return;
}
CHECK_EQ(ir->known_count, cp->group.group_size);
if (ir->source_rank < 0) {
ir->status =
errors::Internal("Instance ", cp->instance.instance_key,
" found no source for broadcast. This "
"could mean that there were group_size=",
ir->known_count, " BcastRecvs but no BcastSend.");
}
if (!ir->known_waiters.empty()) {
ready_waiters = std::move(ir->known_waiters);
}
} while (false);
f(ir);
for (auto& f : ready_waiters) {
f(ir);
}
}
void CollectiveParamResolverLocal::StartAbort(const Status& s) {
{
mutex_lock l(status_mu_);
if (!status_.ok()) {
VLOG(2) << "CollectiveParamResolverLocal already aborted. Ignoring "
"subsequent abortion with status: "
<< s;
return;
}
status_ = s;
}
StartAbortLocal(s);
}
void CollectiveParamResolverLocal::StartAbortLocal(const Status& s) {
std::vector<StatusCallback> pending_done;
{
mutex_lock l(group_mu_);
for (const auto& item : group_table_) {
GroupRec* gr = item.second.get();
{
mutex_lock gl(gr->mu);
gr->status = s;
for (auto& done : gr->pending_done) {
pending_done.push_back(std::move(done));
}
gr->pending_done.clear();
gr->pending_params.clear();
}
}
}
for (const StatusCallback& done : pending_done) {
done(s);
}
std::vector<InstanceRec*> instances;
{
mutex_lock l(instance_mu_);
for (const auto& group_entry : instance_table_) {
for (const auto& item : group_entry.second) {
instances.push_back(item.second.get());
}
}
}
for (InstanceRec* ir : instances) {
std::vector<IRConsumer> known_waiters;
{
mutex_lock il(ir->mu);
ir->status = s;
known_waiters.swap(ir->known_waiters);
}
for (const IRConsumer& done : known_waiters) {
done(ir);
}
}
}
} | #include "tensorflow/core/common_runtime/collective_param_resolver_local.h"
#include <atomic>
#include "absl/strings/str_join.h"
#include "tensorflow/core/common_runtime/collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
#define NUM_DEVS 3
class CollectiveParamResolverLocalTest : public ::testing::Test {
protected:
CollectiveParamResolverLocalTest() {
ConfigProto cp;
SessionOptions options;
task_name_ = "/job:localhost/replica:0/task:0";
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", NUM_DEVS});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(options, task_name_, &devices));
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
drl_.reset(new DeviceResolverLocal(device_mgr_.get()));
ResetParamResolver(ConfigProto());
}
void ResetParamResolver(const ConfigProto& config) {
prl_.reset(new CollectiveParamResolverLocal(
config, device_mgr_.get(), drl_.get(), nullptr,
task_name_));
}
void RunCompleteDefaultRanking(
CollGroupParams group, const std::vector<int32>& gpu_ring_order,
const std::vector<string>& expected_device_order) {
ConfigProto config;
if (!gpu_ring_order.empty()) {
config.mutable_gpu_options()
->mutable_experimental()
->set_collective_ring_order(absl::StrJoin(gpu_ring_order, ","));
}
ResetParamResolver(config);
prl_->CompleteDefaultRanking(&group);
std::vector<string> actual_device_order;
for (const CollGroupMember& member : group.members) {
actual_device_order.push_back(member.device.name());
}
EXPECT_EQ(actual_device_order, expected_device_order);
}
DeviceAttributes GetDeviceAttributes(const string& device_name) {
Device* device = nullptr;
TF_CHECK_OK(device_mgr_->LookupDevice(device_name, &device));
return device->attributes();
}
string task_name_;
std::unique_ptr<DeviceMgr> device_mgr_;
std::unique_ptr<DeviceResolverLocal> drl_;
std::unique_ptr<CollectiveParamResolverLocal> prl_;
};
TEST_F(CollectiveParamResolverLocalTest, CompleteDefaultRanking) {
constexpr int kNumGpus = 8;
CollGroupParams group;
group.device_type = DeviceType("GPU");
group.num_tasks = 1;
group.group_size = kNumGpus;
std::unordered_set<int> clique1 = {0, 1, 6, 7};
for (int gpu_idx = 0; gpu_idx < kNumGpus; ++gpu_idx) {
CollGroupMember member;
member.task = "/job:localhost/replica:0/task:0";
member.device.set_name(strings::StrCat(
"/job:localhost/replica:0/task:0/device:GPU:", gpu_idx));
for (int link_idx = 0; link_idx < kNumGpus; ++link_idx) {
if (gpu_idx == link_idx) continue;
bool gpu_in_clique1 = clique1.find(gpu_idx) != clique1.end();
bool link_in_clique1 = clique1.find(link_idx) != clique1.end();
if ((gpu_in_clique1 && link_in_clique1) ||
(!gpu_in_clique1 && !link_in_clique1)) {
LocalLinks* links = member.device.mutable_locality()->mutable_links();
InterconnectLink* ilink = links->add_link();
ilink->set_device_id(link_idx);
ilink->set_strength(2);
} else if ((gpu_idx == 3 && link_idx == 7) ||
(gpu_idx == 7 && link_idx == 3)) {
LocalLinks* links = member.device.mutable_locality()->mutable_links();
InterconnectLink* ilink = links->add_link();
ilink->set_device_id(link_idx);
ilink->set_strength(1);
}
}
group.members.push_back(member);
}
RunCompleteDefaultRanking(group, {1, 3, 5, 7, 6, 4, 2, 0},
{
"/job:localhost/replica:0/task:0/device:GPU:1",
"/job:localhost/replica:0/task:0/device:GPU:3",
"/job:localhost/replica:0/task:0/device:GPU:5",
"/job:localhost/replica:0/task:0/device:GPU:7",
"/job:localhost/replica:0/task:0/device:GPU:6",
"/job:localhost/replica:0/task:0/device:GPU:4",
"/job:localhost/replica:0/task:0/device:GPU:2",
"/job:localhost/replica:0/task:0/device:GPU:0",
});
RunCompleteDefaultRanking(group, {7, 6, 5, 4, 3, 2, 1, 0},
{
"/job:localhost/replica:0/task:0/device:GPU:7",
"/job:localhost/replica:0/task:0/device:GPU:6",
"/job:localhost/replica:0/task:0/device:GPU:5",
"/job:localhost/replica:0/task:0/device:GPU:4",
"/job:localhost/replica:0/task:0/device:GPU:3",
"/job:localhost/replica:0/task:0/device:GPU:2",
"/job:localhost/replica:0/task:0/device:GPU:1",
"/job:localhost/replica:0/task:0/device:GPU:0",
});
RunCompleteDefaultRanking(group, {},
{
"/job:localhost/replica:0/task:0/device:GPU:0",
"/job:localhost/replica:0/task:0/device:GPU:1",
"/job:localhost/replica:0/task:0/device:GPU:6",
"/job:localhost/replica:0/task:0/device:GPU:7",
"/job:localhost/replica:0/task:0/device:GPU:3",
"/job:localhost/replica:0/task:0/device:GPU:2",
"/job:localhost/replica:0/task:0/device:GPU:4",
"/job:localhost/replica:0/task:0/device:GPU:5",
});
}
TEST_F(CollectiveParamResolverLocalTest, CompleteParamsReduction1Task) {
CollectiveParams* cps[NUM_DEVS];
Status statuses[NUM_DEVS];
Notification note[NUM_DEVS];
for (int i = 0; i < NUM_DEVS; ++i) {
cps[i] = new CollectiveParams();
CollectiveParams* cp = cps[i];
cp->group.group_key = 1;
cp->group.group_size = 3;
cp->group.device_type = DeviceType("CPU");
cp->group.num_tasks = 1;
cp->instance.instance_key = 7;
cp->instance.type = REDUCTION_COLLECTIVE;
cp->instance.data_type = DataType(DT_FLOAT);
cp->instance.shape = TensorShape({5});
cp->instance.impl_details.subdiv_offsets.push_back(0);
cp->is_source = false;
Env::Default()->SchedClosure([this, i, cp, ¬e, &statuses]() {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp,
nullptr ,
[&statuses, ¬e, i](const Status& s) {
statuses[i] = s;
note[i].Notify();
});
});
}
for (int i = 0; i < NUM_DEVS; ++i) {
note[i].WaitForNotification();
}
for (int i = 0; i < NUM_DEVS; ++i) {
TF_ASSERT_OK(statuses[i]);
ASSERT_EQ(cps[i]->group.members.size(), 3);
for (int j = 0; j < NUM_DEVS; ++j) {
EXPECT_EQ(
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", j),
cps[i]->group.members[j].device.name());
EXPECT_TRUE(cps[i]->group.members[j].is_local);
}
EXPECT_EQ(cps[i]->instance.impl_details.subdiv_source_rank.size(), 0);
EXPECT_FALSE(cps[i]->is_source);
EXPECT_EQ(cps[i]->default_rank, i);
EXPECT_TRUE(cps[i]->group.same_num_devices_per_task);
cps[i]->Unref();
}
}
void InitializeCollectiveParamsForBroadcast(int instance_key, int device_idx,
bool is_source,
CollectiveParams* cp) {
cp->group.group_key = 1;
cp->group.group_size = 3;
cp->group.device_type = DeviceType("CPU");
cp->group.num_tasks = 1;
cp->instance.instance_key = instance_key;
cp->instance.type = BROADCAST_COLLECTIVE;
cp->instance.data_type = DataType(DT_FLOAT);
cp->instance.shape = TensorShape({5});
cp->instance.impl_details.subdiv_offsets.push_back(0);
cp->is_source = is_source;
}
TEST_F(CollectiveParamResolverLocalTest, CompleteParamsBroadcast1Task) {
constexpr int kInstanceKey = 5;
CollectiveParams* cps[NUM_DEVS];
Status statuses[NUM_DEVS];
Notification note[NUM_DEVS];
for (int i = 0; i < NUM_DEVS; ++i) {
cps[i] = new CollectiveParams();
CollectiveParams* cp = cps[i];
InitializeCollectiveParamsForBroadcast(kInstanceKey, i, i == 1, cp);
Env::Default()->SchedClosure([this, i, cp, ¬e, &statuses]() {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp,
nullptr ,
[&statuses, ¬e, i](const Status& s) {
statuses[i] = s;
note[i].Notify();
});
});
}
for (int i = 0; i < NUM_DEVS; ++i) {
note[i].WaitForNotification();
}
for (int i = 0; i < NUM_DEVS; ++i) {
TF_ASSERT_OK(statuses[i]);
ASSERT_EQ(cps[i]->group.members.size(), 3);
for (int j = 0; j < NUM_DEVS; ++j) {
EXPECT_EQ(
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", j),
cps[i]->group.members[j].device.name());
EXPECT_TRUE(cps[i]->group.members[j].is_local);
}
EXPECT_EQ(cps[i]->is_source, (i == 1));
EXPECT_EQ(cps[i]->default_rank, i);
EXPECT_TRUE(cps[i]->group.same_num_devices_per_task);
cps[i]->Unref();
}
}
TEST_F(CollectiveParamResolverLocalTest, CompleteParamsBroadcastForgotSender) {
constexpr int kInstanceKey = 8;
CollectiveParams* cps[NUM_DEVS];
Status statuses[NUM_DEVS];
Notification note[NUM_DEVS];
for (int i = 0; i < NUM_DEVS; ++i) {
cps[i] = new CollectiveParams();
CollectiveParams* cp = cps[i];
InitializeCollectiveParamsForBroadcast(kInstanceKey, i, false, cp);
Env::Default()->SchedClosure([this, i, cp, ¬e, &statuses]() {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp,
nullptr ,
[&statuses, ¬e, i](const Status& s) {
statuses[i] = s;
note[i].Notify();
});
});
}
for (int i = 0; i < NUM_DEVS; ++i) {
note[i].WaitForNotification();
}
for (int i = 0; i < NUM_DEVS; ++i) {
EXPECT_EQ(statuses[i].code(), error::INTERNAL);
EXPECT_EQ(statuses[i].message(),
strings::StrCat(
"Instance ", kInstanceKey,
" found no source for broadcast. This could mean that there"
" were group_size=",
NUM_DEVS, " BcastRecvs but no BcastSend."));
cps[i]->Unref();
}
}
CollectiveParams* MakeCollectiveParams(int group_key, int instance_key,
bool is_source) {
auto* cp = new CollectiveParams();
cp->group.group_key = group_key;
cp->group.group_size = NUM_DEVS;
cp->group.device_type = DeviceType("CPU");
cp->group.num_tasks = 1;
cp->instance.instance_key = instance_key;
cp->instance.type = BROADCAST_COLLECTIVE;
cp->is_source = is_source;
return cp;
}
TEST_F(CollectiveParamResolverLocalTest, AbortPendingGroup) {
CancellationManager cancel_mgr;
std::vector<CollectiveParams*> cp(NUM_DEVS - 1);
BlockingCounter start(NUM_DEVS - 1);
BlockingCounter done(NUM_DEVS - 1);
for (int i = 0; i < NUM_DEVS - 1; ++i) {
Env::Default()->SchedClosure([this, i, &cancel_mgr, &cp, &start, &done] {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
cp[i] = MakeCollectiveParams( 100, 100,
i == 0);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp[i], &cancel_mgr,
[&done, cp = cp[i]](const Status& s) {
EXPECT_EQ(s.code(),
absl::StatusCode::kAborted);
EXPECT_EQ(s.message(), "__aborted__");
done.DecrementCount();
cp->Unref();
});
start.DecrementCount();
});
}
start.Wait();
prl_->StartAbort(Status(absl::StatusCode::kAborted, "__aborted__"));
done.Wait();
}
TEST_F(CollectiveParamResolverLocalTest, AbortPendingInstance) {
CancellationManager cancel_mgr;
std::vector<CollectiveParams*> cp(NUM_DEVS);
int group_key = 100;
int instance_key = 100;
{
BlockingCounter done(NUM_DEVS);
for (int i = 0; i < NUM_DEVS; ++i) {
Env::Default()->SchedClosure([this, group_key, instance_key, i,
&cancel_mgr, &cp, &done] {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
cp[i] = MakeCollectiveParams(group_key, instance_key,
i == 0);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp[i],
&cancel_mgr,
[&done, cp = cp[i]](const Status& s) {
EXPECT_EQ(s.code(), error::OK);
done.DecrementCount();
cp->Unref();
});
});
}
done.Wait();
}
BlockingCounter start(NUM_DEVS - 1);
BlockingCounter done(NUM_DEVS - 1);
for (int i = 0; i < NUM_DEVS - 1; ++i) {
Env::Default()->SchedClosure([this, group_key, instance_key, i, &cancel_mgr,
&cp, &start, &done] {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
cp[i] = MakeCollectiveParams(group_key, instance_key + 1,
i == 0);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp[i], &cancel_mgr,
[&done, cp = cp[i]](const Status& s) {
EXPECT_EQ(s.code(),
absl::StatusCode::kAborted);
EXPECT_EQ(s.message(), "__aborted__");
done.DecrementCount();
cp->Unref();
});
start.DecrementCount();
});
}
start.Wait();
prl_->StartAbort(Status(absl::StatusCode::kAborted, "__aborted__"));
done.Wait();
}
TEST_F(CollectiveParamResolverLocalTest, CompleteParamsAfterAbortion) {
CancellationManager cancel_mgr;
int group_key = 100;
int instance_key = 100;
{
std::vector<CollectiveParams*> cp(NUM_DEVS);
BlockingCounter done(NUM_DEVS);
for (int i = 0; i < NUM_DEVS; ++i) {
Env::Default()->SchedClosure([this, group_key, instance_key, i,
&cancel_mgr, &cp, &done] {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
cp[i] = MakeCollectiveParams(group_key, instance_key,
i == 0);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp[i],
&cancel_mgr,
[&done, cp = cp[i]](const Status& s) {
EXPECT_EQ(s.code(), error::OK);
done.DecrementCount();
cp->Unref();
});
});
}
done.Wait();
}
prl_->StartAbort(Status(absl::StatusCode::kAborted, "__aborted__"));
auto complete_params = [this, &cancel_mgr](int group_key, int instance_key) {
string device = "/job:localhost/replica:0/task:0/device:CPU:0";
Notification done;
auto* cp = MakeCollectiveParams(group_key, instance_key,
true);
core::ScopedUnref unref(cp);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp, &cancel_mgr,
[&done](const Status& s) {
EXPECT_EQ(s.code(), absl::StatusCode::kAborted);
EXPECT_EQ(s.message(), "__aborted__");
done.Notify();
});
done.WaitForNotification();
};
complete_params(group_key, instance_key);
complete_params(group_key, instance_key + 1);
complete_params(group_key + 1, instance_key + 1);
}
TEST_F(CollectiveParamResolverLocalTest, AbortNormalCompleteParamsAsync) {
CancellationManager cancel_mgr;
std::atomic<int64_t> num_ok{0};
for (int cnt = 0; cnt < 100; ++cnt) {
BlockingCounter done(NUM_DEVS);
for (int i = 0; i < NUM_DEVS; ++i) {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
Env::Default()->SchedClosure(
[this, i, device, &num_ok, &cancel_mgr, &done] {
int key = 100;
while (true) {
Status status;
Notification n;
auto* cp =
MakeCollectiveParams( key, key,
i == 0);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp,
&cancel_mgr,
[&status, &n](const Status& s) {
status = s;
n.Notify();
});
n.WaitForNotification();
cp->Unref();
if (!status.ok()) {
EXPECT_EQ(status.code(), absl::StatusCode::kAborted);
EXPECT_EQ(status.message(), "__aborted__");
done.DecrementCount();
return;
}
++num_ok;
++key;
}
});
}
int64_t delay_ms = random::New64() % 50000;
Env::Default()->SleepForMicroseconds(delay_ms);
prl_->StartAbort(Status(absl::StatusCode::kAborted, "__aborted__"));
done.Wait();
ResetParamResolver(ConfigProto());
}
EXPECT_GT(num_ok.load(), 50);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/collective_param_resolver_local.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/collective_param_resolver_local_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a89d6cb3-79a8-4858-8019-c96a1d6c109d | cpp | google/quiche | quic_libevent | quiche/quic/bindings/quic_libevent.cc | quiche/quic/bindings/quic_libevent_test.cc | #include "quiche/quic/bindings/quic_libevent.h"
#include <memory>
#include <utility>
#include "absl/time/time.h"
#include "event2/event.h"
#include "event2/event_struct.h"
#include "event2/thread.h"
#include "quiche/quic/core/io/quic_event_loop.h"
#include "quiche/quic/core/quic_alarm.h"
#include "quiche/quic/core/quic_clock.h"
#include "quiche/quic/core/quic_default_clock.h"
#include "quiche/quic/core/quic_time.h"
namespace quic {
using LibeventEventMask = short;
QuicSocketEventMask LibeventEventMaskToQuicEvents(int events) {
return ((events & EV_READ) ? kSocketEventReadable : 0) |
((events & EV_WRITE) ? kSocketEventWritable : 0);
}
LibeventEventMask QuicEventsToLibeventEventMask(QuicSocketEventMask events) {
return ((events & kSocketEventReadable) ? EV_READ : 0) |
((events & kSocketEventWritable) ? EV_WRITE : 0);
}
class LibeventAlarm : public QuicAlarm {
public:
LibeventAlarm(LibeventQuicEventLoop* loop,
QuicArenaScopedPtr<QuicAlarm::Delegate> delegate)
: QuicAlarm(std::move(delegate)), clock_(loop->clock()) {
event_.reset(evtimer_new(
loop->base(),
[](evutil_socket_t, LibeventEventMask, void* arg) {
LibeventAlarm* self = reinterpret_cast<LibeventAlarm*>(arg);
self->Fire();
},
this));
}
protected:
void SetImpl() override {
absl::Duration timeout =
absl::Microseconds((deadline() - clock_->Now()).ToMicroseconds());
timeval unix_time = absl::ToTimeval(timeout);
event_add(event_.get(), &unix_time);
}
void CancelImpl() override { event_del(event_.get()); }
private:
std::unique_ptr<event, LibeventEventDeleter> event_;
QuicClock* clock_;
};
LibeventQuicEventLoop::LibeventQuicEventLoop(event_base* base, QuicClock* clock)
: base_(base),
edge_triggered_(event_base_get_features(base) & EV_FEATURE_ET),
clock_(clock),
artifical_event_timer_(evtimer_new(
base_,
[](evutil_socket_t, LibeventEventMask, void* arg) {
auto* self = reinterpret_cast<LibeventQuicEventLoop*>(arg);
self->ActivateArtificialEvents();
},
this)) {
QUICHE_CHECK_LE(sizeof(event), event_get_struct_event_size())
<< "libevent ABI mismatch: sizeof(event) is bigger than the one QUICHE "
"has been compiled with";
}
LibeventQuicEventLoop::~LibeventQuicEventLoop() {
event_del(artifical_event_timer_.get());
}
bool LibeventQuicEventLoop::RegisterSocket(QuicUdpSocketFd fd,
QuicSocketEventMask events,
QuicSocketEventListener* listener) {
auto [it, success] =
registration_map_.try_emplace(fd, this, fd, events, listener);
return success;
}
bool LibeventQuicEventLoop::UnregisterSocket(QuicUdpSocketFd fd) {
fds_with_artifical_events_.erase(fd);
return registration_map_.erase(fd);
}
bool LibeventQuicEventLoop::RearmSocket(QuicUdpSocketFd fd,
QuicSocketEventMask events) {
if (edge_triggered_) {
QUICHE_BUG(LibeventQuicEventLoop_RearmSocket_called_on_ET)
<< "RearmSocket() called on an edge-triggered event loop";
return false;
}
auto it = registration_map_.find(fd);
if (it == registration_map_.end()) {
return false;
}
it->second.Rearm(events);
return true;
}
bool LibeventQuicEventLoop::ArtificiallyNotifyEvent(
QuicUdpSocketFd fd, QuicSocketEventMask events) {
auto it = registration_map_.find(fd);
if (it == registration_map_.end()) {
return false;
}
it->second.RecordArtificalEvents(events);
fds_with_artifical_events_.insert(fd);
if (!evtimer_pending(artifical_event_timer_.get(), nullptr)) {
struct timeval tv = {0, 0};
evtimer_add(artifical_event_timer_.get(), &tv);
}
return true;
}
void LibeventQuicEventLoop::ActivateArtificialEvents() {
absl::flat_hash_set<QuicUdpSocketFd> fds_with_artifical_events;
{
using std::swap;
swap(fds_with_artifical_events_, fds_with_artifical_events);
}
for (QuicUdpSocketFd fd : fds_with_artifical_events) {
auto it = registration_map_.find(fd);
if (it == registration_map_.end()) {
continue;
}
it->second.MaybeNotifyArtificalEvents();
}
}
void LibeventQuicEventLoop::RunEventLoopOnce(QuicTime::Delta default_timeout) {
timeval timeout =
absl::ToTimeval(absl::Microseconds(default_timeout.ToMicroseconds()));
event_base_loopexit(base_, &timeout);
event_base_loop(base_, EVLOOP_ONCE);
}
void LibeventQuicEventLoop::WakeUp() {
timeval timeout = absl::ToTimeval(absl::ZeroDuration());
event_base_loopexit(base_, &timeout);
}
LibeventQuicEventLoop::Registration::Registration(
LibeventQuicEventLoop* loop, QuicUdpSocketFd fd, QuicSocketEventMask events,
QuicSocketEventListener* listener)
: loop_(loop), listener_(listener) {
event_callback_fn callback = [](evutil_socket_t fd, LibeventEventMask events,
void* arg) {
auto* self = reinterpret_cast<LibeventQuicEventLoop::Registration*>(arg);
self->listener_->OnSocketEvent(self->loop_, fd,
LibeventEventMaskToQuicEvents(events));
};
if (loop_->SupportsEdgeTriggered()) {
LibeventEventMask mask =
QuicEventsToLibeventEventMask(events) | EV_PERSIST | EV_ET;
event_assign(&both_events_, loop_->base(), fd, mask, callback, this);
event_add(&both_events_, nullptr);
} else {
event_assign(&read_event_, loop_->base(), fd, EV_READ, callback, this);
event_assign(&write_event_, loop_->base(), fd, EV_WRITE, callback, this);
Rearm(events);
}
}
LibeventQuicEventLoop::Registration::~Registration() {
if (loop_->SupportsEdgeTriggered()) {
event_del(&both_events_);
} else {
event_del(&read_event_);
event_del(&write_event_);
}
}
void LibeventQuicEventLoop::Registration::RecordArtificalEvents(
QuicSocketEventMask events) {
artificial_events_ |= events;
}
void LibeventQuicEventLoop::Registration::MaybeNotifyArtificalEvents() {
if (artificial_events_ == 0) {
return;
}
QuicSocketEventMask events = artificial_events_;
artificial_events_ = 0;
if (loop_->SupportsEdgeTriggered()) {
event_active(&both_events_, QuicEventsToLibeventEventMask(events), 0);
return;
}
if (events & kSocketEventReadable) {
event_active(&read_event_, EV_READ, 0);
}
if (events & kSocketEventWritable) {
event_active(&write_event_, EV_WRITE, 0);
}
}
void LibeventQuicEventLoop::Registration::Rearm(QuicSocketEventMask events) {
QUICHE_DCHECK(!loop_->SupportsEdgeTriggered());
if (events & kSocketEventReadable) {
event_add(&read_event_, nullptr);
}
if (events & kSocketEventWritable) {
event_add(&write_event_, nullptr);
}
}
QuicAlarm* LibeventQuicEventLoop::AlarmFactory::CreateAlarm(
QuicAlarm::Delegate* delegate) {
return new LibeventAlarm(loop_,
QuicArenaScopedPtr<QuicAlarm::Delegate>(delegate));
}
QuicArenaScopedPtr<QuicAlarm> LibeventQuicEventLoop::AlarmFactory::CreateAlarm(
QuicArenaScopedPtr<QuicAlarm::Delegate> delegate,
QuicConnectionArena* arena) {
if (arena != nullptr) {
return arena->New<LibeventAlarm>(loop_, std::move(delegate));
}
return QuicArenaScopedPtr<QuicAlarm>(
new LibeventAlarm(loop_, std::move(delegate)));
}
QuicLibeventEventLoopFactory::QuicLibeventEventLoopFactory(
bool force_level_triggered)
: force_level_triggered_(force_level_triggered) {
std::unique_ptr<QuicEventLoop> event_loop = Create(QuicDefaultClock::Get());
name_ = absl::StrFormat(
"libevent(%s)",
event_base_get_method(
static_cast<LibeventQuicEventLoopWithOwnership*>(event_loop.get())
->base()));
}
struct LibeventConfigDeleter {
void operator()(event_config* config) { event_config_free(config); }
};
std::unique_ptr<LibeventQuicEventLoopWithOwnership>
LibeventQuicEventLoopWithOwnership::Create(QuicClock* clock,
bool force_level_triggered) {
static int threads_initialized = []() {
#ifdef _WIN32
return evthread_use_windows_threads();
#else
return evthread_use_pthreads();
#endif
}();
QUICHE_DCHECK_EQ(threads_initialized, 0);
std::unique_ptr<event_config, LibeventConfigDeleter> config(
event_config_new());
if (force_level_triggered) {
event_config_avoid_method(config.get(), "epoll");
event_config_avoid_method(config.get(), "kqueue");
}
return std::make_unique<LibeventQuicEventLoopWithOwnership>(
event_base_new_with_config(config.get()), clock);
}
} | #include "quiche/quic/bindings/quic_libevent.h"
#include <atomic>
#include <memory>
#include "absl/memory/memory.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "quiche/quic/core/quic_alarm.h"
#include "quiche/quic/core/quic_default_clock.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/platform/api/quic_thread.h"
namespace quic::test {
namespace {
class FailureAlarmDelegate : public QuicAlarm::Delegate {
public:
QuicConnectionContext* GetConnectionContext() override { return nullptr; }
void OnAlarm() override { ADD_FAILURE() << "Test timed out"; }
};
class LoopBreakThread : public QuicThread {
public:
LoopBreakThread(LibeventQuicEventLoop* loop)
: QuicThread("LoopBreakThread"), loop_(loop) {}
void Run() override {
absl::SleepFor(absl::Milliseconds(250));
loop_broken_.store(true);
loop_->WakeUp();
}
std::atomic<int>& loop_broken() { return loop_broken_; }
private:
LibeventQuicEventLoop* loop_;
std::atomic<int> loop_broken_ = 0;
};
TEST(QuicLibeventTest, WakeUpFromAnotherThread) {
QuicClock* clock = QuicDefaultClock::Get();
auto event_loop_owned = QuicLibeventEventLoopFactory::Get()->Create(clock);
LibeventQuicEventLoop* event_loop =
static_cast<LibeventQuicEventLoop*>(event_loop_owned.get());
std::unique_ptr<QuicAlarmFactory> alarm_factory =
event_loop->CreateAlarmFactory();
std::unique_ptr<QuicAlarm> timeout_alarm =
absl::WrapUnique(alarm_factory->CreateAlarm(new FailureAlarmDelegate()));
const QuicTime kTimeoutAt = clock->Now() + QuicTime::Delta::FromSeconds(10);
timeout_alarm->Set(kTimeoutAt);
LoopBreakThread thread(event_loop);
thread.Start();
event_loop->RunEventLoopOnce(QuicTime::Delta::FromSeconds(5 * 60));
EXPECT_TRUE(thread.loop_broken().load());
thread.Join();
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/bindings/quic_libevent.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/bindings/quic_libevent_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
884a1873-2492-4e33-affb-1854f596ef62 | cpp | google/libphonenumber | phonenumbermatcher | cpp/src/phonenumbers/phonenumbermatcher.cc | cpp/test/phonenumbers/phonenumbermatcher_test.cc | #include "phonenumbers/phonenumbermatcher.h"
#ifndef I18N_PHONENUMBERS_USE_ICU_REGEXP
#error phonenumbermatcher depends on ICU \
(i.e. I18N_PHONENUMBERS_USE_ICU_REGEXP must be set)
#endif
#include <ctype.h>
#include <stddef.h>
#include <limits>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <unicode/uchar.h>
#include "phonenumbers/alternate_format.h"
#include "phonenumbers/base/logging.h"
#include "phonenumbers/base/memory/scoped_ptr.h"
#include "phonenumbers/base/memory/singleton.h"
#include "phonenumbers/callback.h"
#include "phonenumbers/default_logger.h"
#include "phonenumbers/encoding_utils.h"
#include "phonenumbers/normalize_utf8.h"
#include "phonenumbers/phonemetadata.pb.h"
#include "phonenumbers/phonenumber.pb.h"
#include "phonenumbers/phonenumbermatch.h"
#include "phonenumbers/phonenumberutil.h"
#include "phonenumbers/regexp_adapter.h"
#include "phonenumbers/regexp_adapter_icu.h"
#include "phonenumbers/regexp_cache.h"
#include "phonenumbers/stringutil.h"
#include "phonenumbers/utf/unicodetext.h"
#ifdef I18N_PHONENUMBERS_USE_RE2
#include "phonenumbers/regexp_adapter_re2.h"
#endif
using std::map;
using std::numeric_limits;
using std::string;
namespace i18n {
namespace phonenumbers {
namespace {
string Limit(int lower, int upper) {
DCHECK_GE(lower, 0);
DCHECK_GT(upper, 0);
DCHECK_LT(lower, upper);
return StrCat("{", lower, ",", upper, "}");
}
bool IsInvalidPunctuationSymbol(char32 character) {
return character == '%' || u_charType(character) == U_CURRENCY_SYMBOL;
}
bool ContainsOnlyValidXChars(const PhoneNumber& number, const string& candidate,
const PhoneNumberUtil& util) {
size_t found;
found = candidate.find_first_of("xX");
while (found != string::npos && found < candidate.length() - 1) {
char next_char = candidate[found + 1];
if (next_char == 'x' || next_char == 'X') {
++found;
if (util.IsNumberMatchWithOneString(
number, candidate.substr(found, candidate.length() - found))
!= PhoneNumberUtil::NSN_MATCH) {
return false;
}
} else {
string normalized_extension(candidate.substr(found,
candidate.length() - found));
util.NormalizeDigitsOnly(&normalized_extension);
if (normalized_extension != number.extension()) {
return false;
}
}
found = candidate.find_first_of("xX", found + 1);
}
return true;
}
bool AllNumberGroupsRemainGrouped(
const PhoneNumberUtil& util,
const PhoneNumber& number,
const string& normalized_candidate,
const std::vector<string>& formatted_number_groups) {
size_t from_index = 0;
if (number.country_code_source() != PhoneNumber::FROM_DEFAULT_COUNTRY) {
string country_code = SimpleItoa(number.country_code());
from_index = normalized_candidate.find(country_code) + country_code.size();
}
for (size_t i = 0; i < formatted_number_groups.size(); ++i) {
from_index = normalized_candidate.find(formatted_number_groups.at(i),
from_index);
if (from_index == string::npos) {
return false;
}
from_index += formatted_number_groups.at(i).length();
if (i == 0 && from_index < normalized_candidate.length()) {
string region;
util.GetRegionCodeForCountryCode(number.country_code(), ®ion);
string ndd_prefix;
util.GetNddPrefixForRegion(region, true, &ndd_prefix);
if (!ndd_prefix.empty() && isdigit(normalized_candidate.at(from_index))) {
string national_significant_number;
util.GetNationalSignificantNumber(number, &national_significant_number);
return HasPrefixString(normalized_candidate.substr(
from_index - formatted_number_groups.at(i).length()),
national_significant_number);
}
}
}
return normalized_candidate.substr(from_index)
.find(number.extension()) != string::npos;
}
bool LoadAlternateFormats(PhoneMetadataCollection* alternate_formats) {
#if defined(I18N_PHONENUMBERS_USE_ALTERNATE_FORMATS)
if (!alternate_formats->ParseFromArray(alternate_format_get(),
alternate_format_size())) {
LOG(ERROR) << "Could not parse binary data.";
return false;
}
return true;
#else
return false;
#endif
}
}
class PhoneNumberMatcherRegExps : public Singleton<PhoneNumberMatcherRegExps> {
private:
friend class Singleton<PhoneNumberMatcherRegExps>;
string opening_parens_;
string closing_parens_;
string non_parens_;
string bracket_pair_limit_;
string leading_maybe_matched_bracket_;
string bracket_pairs_;
string lead_limit_;
string punctuation_limit_;
int digit_block_limit_;
string block_limit_;
string punctuation_;
string digit_sequence_;
string lead_class_chars_;
string lead_class_;
public:
scoped_ptr<const AbstractRegExpFactory> regexp_factory_for_pattern_;
scoped_ptr<const AbstractRegExpFactory> regexp_factory_;
mutable RegExpCache regexp_cache_;
scoped_ptr<const RegExp> pub_pages_;
scoped_ptr<const RegExp> slash_separated_dates_;
scoped_ptr<const RegExp> time_stamps_;
scoped_ptr<const RegExp> time_stamps_suffix_;
scoped_ptr<const RegExp> matching_brackets_;
scoped_ptr<std::vector<const RegExp*> > inner_matches_;
scoped_ptr<const RegExp> capture_up_to_second_number_start_pattern_;
scoped_ptr<const RegExp> capturing_ascii_digits_pattern_;
scoped_ptr<const RegExp> lead_class_pattern_;
scoped_ptr<const RegExp> pattern_;
PhoneNumberMatcherRegExps()
: opening_parens_("(\\[\xEF\xBC\x88\xEF\xBC\xBB" ),
closing_parens_(")\\]\xEF\xBC\x89\xEF\xBC\xBD" ),
non_parens_(StrCat("[^", opening_parens_, closing_parens_, "]")),
bracket_pair_limit_(Limit(0, 3)),
leading_maybe_matched_bracket_(StrCat(
"(?:[", opening_parens_, "])?",
"(?:", non_parens_, "+[", closing_parens_, "])?")),
bracket_pairs_(StrCat(
"(?:[", opening_parens_, "]", non_parens_, "+",
"[", closing_parens_, "])", bracket_pair_limit_)),
lead_limit_(Limit(0, 2)),
punctuation_limit_(Limit(0, 4)),
digit_block_limit_(PhoneNumberUtil::kMaxLengthForNsn +
PhoneNumberUtil::kMaxLengthCountryCode),
block_limit_(Limit(0, digit_block_limit_)),
punctuation_(StrCat("[", PhoneNumberUtil::kValidPunctuation, "]",
punctuation_limit_)),
digit_sequence_(StrCat("\\p{Nd}", Limit(1, digit_block_limit_))),
lead_class_chars_(StrCat(opening_parens_, PhoneNumberUtil::kPlusChars)),
lead_class_(StrCat("[", lead_class_chars_, "]")),
regexp_factory_for_pattern_(new ICURegExpFactory()),
#ifdef I18N_PHONENUMBERS_USE_RE2
regexp_factory_(new RE2RegExpFactory()),
#else
regexp_factory_(new ICURegExpFactory()),
#endif
regexp_cache_(*regexp_factory_, 32),
pub_pages_(regexp_factory_->CreateRegExp(
"\\d{1,5}-+\\d{1,5}\\s{0,4}\\(\\d{1,4}")),
slash_separated_dates_(regexp_factory_->CreateRegExp(
"(?:(?:[0-3]?\\d/[01]?\\d)|"
"(?:[01]?\\d/[0-3]?\\d))/(?:[12]\\d)?\\d{2}")),
time_stamps_(regexp_factory_->CreateRegExp(
"[12]\\d{3}[-/]?[01]\\d[-/]?[0-3]\\d +[0-2]\\d$")),
time_stamps_suffix_(regexp_factory_->CreateRegExp(":[0-5]\\d")),
matching_brackets_(regexp_factory_->CreateRegExp(
StrCat(leading_maybe_matched_bracket_, non_parens_, "+",
bracket_pairs_, non_parens_, "*"))),
inner_matches_(new std::vector<const RegExp*>()),
capture_up_to_second_number_start_pattern_(
regexp_factory_->CreateRegExp(
PhoneNumberUtil::kCaptureUpToSecondNumberStart)),
capturing_ascii_digits_pattern_(
regexp_factory_->CreateRegExp("(\\d+)")),
lead_class_pattern_(regexp_factory_->CreateRegExp(lead_class_)),
pattern_(regexp_factory_for_pattern_->CreateRegExp(StrCat(
"((?:", lead_class_, punctuation_, ")", lead_limit_,
digit_sequence_, "(?:", punctuation_, digit_sequence_, ")",
block_limit_, "(?i)(?:",
PhoneNumberUtil::GetInstance()->GetExtnPatternsForMatching(),
")?)"))) {
inner_matches_->push_back(
regexp_factory_->CreateRegExp("/+(.*)"));
inner_matches_->push_back(
regexp_factory_->CreateRegExp("(\\([^(]*)"));
inner_matches_->push_back(
regexp_factory_->CreateRegExp("(?:\\p{Z}-|-\\p{Z})\\p{Z}*(.+)"));
inner_matches_->push_back(
regexp_factory_->CreateRegExp(
"[\xE2\x80\x92-\xE2\x80\x95\xEF\xBC\x8D]"
"\\p{Z}*(.+)"));
inner_matches_->push_back(
regexp_factory_->CreateRegExp("\\.+\\p{Z}*([^.]+)"));
inner_matches_->push_back(
regexp_factory_->CreateRegExp("\\p{Z}+(\\P{Z}+)"));
}
private:
DISALLOW_COPY_AND_ASSIGN(PhoneNumberMatcherRegExps);
};
class AlternateFormats : public Singleton<AlternateFormats> {
public:
PhoneMetadataCollection format_data_;
map<int, const PhoneMetadata*> calling_code_to_alternate_formats_map_;
AlternateFormats()
: format_data_(),
calling_code_to_alternate_formats_map_() {
if (!LoadAlternateFormats(&format_data_)) {
LOG(DFATAL) << "Could not parse compiled-in metadata.";
return;
}
for (RepeatedPtrField<PhoneMetadata>::const_iterator it =
format_data_.metadata().begin();
it != format_data_.metadata().end();
++it) {
calling_code_to_alternate_formats_map_.insert(
std::make_pair(it->country_code(), &*it));
}
}
const PhoneMetadata* GetAlternateFormatsForCountry(int country_calling_code)
const {
map<int, const PhoneMetadata*>::const_iterator it =
calling_code_to_alternate_formats_map_.find(country_calling_code);
if (it != calling_code_to_alternate_formats_map_.end()) {
return it->second;
}
return NULL;
}
private:
DISALLOW_COPY_AND_ASSIGN(AlternateFormats);
};
PhoneNumberMatcher::PhoneNumberMatcher(const PhoneNumberUtil& util,
const string& text,
const string& region_code,
PhoneNumberMatcher::Leniency leniency,
int max_tries)
: reg_exps_(PhoneNumberMatcherRegExps::GetInstance()),
alternate_formats_(AlternateFormats::GetInstance()),
phone_util_(util),
text_(text),
preferred_region_(region_code),
leniency_(leniency),
max_tries_(max_tries),
state_(NOT_READY),
last_match_(NULL),
search_index_(0),
is_input_valid_utf8_(true) {
is_input_valid_utf8_ = IsInputUtf8();
}
PhoneNumberMatcher::PhoneNumberMatcher(const string& text,
const string& region_code)
: reg_exps_(PhoneNumberMatcherRegExps::GetInstance()),
alternate_formats_(NULL),
phone_util_(*PhoneNumberUtil::GetInstance()),
text_(text),
preferred_region_(region_code),
leniency_(VALID),
max_tries_(numeric_limits<int>::max()),
state_(NOT_READY),
last_match_(NULL),
search_index_(0),
is_input_valid_utf8_(true) {
is_input_valid_utf8_ = IsInputUtf8();
}
PhoneNumberMatcher::~PhoneNumberMatcher() {
}
bool PhoneNumberMatcher::IsInputUtf8() {
UnicodeText number_as_unicode;
number_as_unicode.PointToUTF8(text_.c_str(), text_.size());
return number_as_unicode.UTF8WasValid();
}
bool PhoneNumberMatcher::IsLatinLetter(char32 letter) {
if (!u_isalpha(letter) && (u_charType(letter) != U_NON_SPACING_MARK)) {
return false;
}
UBlockCode block = ublock_getCode(letter);
return ((block == UBLOCK_BASIC_LATIN) ||
(block == UBLOCK_LATIN_1_SUPPLEMENT) ||
(block == UBLOCK_LATIN_EXTENDED_A) ||
(block == UBLOCK_LATIN_EXTENDED_ADDITIONAL) ||
(block == UBLOCK_LATIN_EXTENDED_B) ||
(block == UBLOCK_COMBINING_DIACRITICAL_MARKS));
}
bool PhoneNumberMatcher::ParseAndVerify(const string& candidate, int offset,
PhoneNumberMatch* match) {
DCHECK(match);
if (!reg_exps_->matching_brackets_->FullMatch(candidate) ||
reg_exps_->pub_pages_->PartialMatch(candidate)) {
return false;
}
if (leniency_ >= VALID) {
scoped_ptr<RegExpInput> candidate_input(
reg_exps_->regexp_factory_->CreateInput(candidate));
if (offset > 0 &&
!reg_exps_->lead_class_pattern_->Consume(candidate_input.get())) {
char32 previous_char;
const char* previous_char_ptr =
EncodingUtils::BackUpOneUTF8Character(text_.c_str(),
text_.c_str() + offset);
EncodingUtils::DecodeUTF8Char(previous_char_ptr, &previous_char);
if (IsInvalidPunctuationSymbol(previous_char) ||
IsLatinLetter(previous_char)) {
return false;
}
}
size_t lastCharIndex = offset + candidate.length();
if (lastCharIndex < text_.length()) {
char32 next_char;
const char* next_char_ptr =
EncodingUtils::AdvanceOneUTF8Character(
text_.c_str() + lastCharIndex - 1);
EncodingUtils::DecodeUTF8Char(next_char_ptr, &next_char);
if (IsInvalidPunctuationSymbol(next_char) || IsLatinLetter(next_char)) {
return false;
}
}
}
PhoneNumber number;
if (phone_util_.ParseAndKeepRawInput(candidate, preferred_region_, &number) !=
PhoneNumberUtil::NO_PARSING_ERROR) {
return false;
}
if (VerifyAccordingToLeniency(leniency_, number, candidate)) {
match->set_start(offset);
match->set_raw_string(candidate);
number.clear_country_code_source();
number.clear_preferred_domestic_carrier_code();
number.clear_raw_input();
match->set_number(number);
return true;
}
return false;
}
bool PhoneNumberMatcher::VerifyAccordingToLeniency(
Leniency leniency, const PhoneNumber& number,
const string& candidate) const {
switch (leniency) {
case PhoneNumberMatcher::POSSIBLE:
return phone_util_.IsPossibleNumber(number);
case PhoneNumberMatcher::VALID:
if (!phone_util_.IsValidNumber(number) ||
!ContainsOnlyValidXChars(number, candidate, phone_util_)) {
return false;
}
return IsNationalPrefixPresentIfRequired(number);
case PhoneNumberMatcher::STRICT_GROUPING: {
if (!phone_util_.IsValidNumber(number) ||
!ContainsOnlyValidXChars(number, candidate, phone_util_) ||
ContainsMoreThanOneSlashInNationalNumber(
number, candidate, phone_util_) ||
!IsNationalPrefixPresentIfRequired(number)) {
return false;
}
ResultCallback4<bool, const PhoneNumberUtil&, const PhoneNumber&,
const string&, const std::vector<string>&>* callback =
NewPermanentCallback(&AllNumberGroupsRemainGrouped);
bool is_valid = CheckNumberGroupingIsValid(number, candidate, callback);
delete(callback);
return is_valid;
}
case PhoneNumberMatcher::EXACT_GROUPING: {
if (!phone_util_.IsValidNumber(number) ||
!ContainsOnlyValidXChars(number, candidate, phone_util_) ||
ContainsMoreThanOneSlashInNationalNumber(
number, candidate, phone_util_) ||
!IsNationalPrefixPresentIfRequired(number)) {
return false;
}
ResultCallback4<bool, const PhoneNumberUtil&, const PhoneNumber&,
const string&, const std::vector<string>&>* callback =
NewPermanentCallback(
this, &PhoneNumberMatcher::AllNumberGroupsAreExactlyPresent);
bool is_valid = CheckNumberGroupingIsValid(number, candidate, callback);
delete(callback);
return is_valid;
}
default:
LOG(ERROR) << "No implementation defined for verification for leniency "
<< static_cast<int>(leniency);
return false;
}
}
bool PhoneNumberMatcher::ExtractInnerMatch(const string& candidate, int offset,
PhoneNumberMatch* match) {
DCHECK(match);
for (std::vector<const RegExp*>::const_iterator regex =
reg_exps_->inner_matches_->begin();
regex != reg_exps_->inner_matches_->end(); regex++) {
scoped_ptr<RegExpInput> candidate_input(
reg_exps_->regexp_factory_->CreateInput(candidate));
bool is_first_match = true;
string group;
while ((*regex)->FindAndConsume(candidate_input.get(), &group) &&
max_tries_ > 0) {
int group_start_index = static_cast<int>(candidate.length() -
candidate_input->ToString().length() - group.length());
if (is_first_match) {
string first_group_only = candidate.substr(0, group_start_index);
phone_util_.TrimUnwantedEndChars(&first_group_only);
bool success = ParseAndVerify(first_group_only, offset, match);
if (success) {
return true;
}
--max_tries_;
is_first_match = false;
}
phone_util_.TrimUnwantedEndChars(&group);
bool success = ParseAndVerify(group, offset + group_start_index, match);
if (success) {
return true;
}
--max_tries_;
}
}
return false;
}
bool PhoneNumberMatcher::ExtractMatch(const string& candidate, int offset,
PhoneNumberMatch* match) {
DCHECK(match);
if (reg_exps_->slash_separated_dates_->PartialMatch(candidate)) {
return false;
}
if (reg_exps_->time_stamps_->PartialMatch(candidate)) {
scoped_ptr<RegExpInput> following_text(
reg_exps_->regexp_factory_->CreateInput(
text_.substr(offset + candidate.size())));
if (reg_exps_->time_stamps_suffix_->Consume(following_text.get())) {
return false;
}
}
if (ParseAndVerify(candidate, offset, match)) {
return true;
}
return ExtractInnerMatch(candidate, offset, match);
}
bool PhoneNumberMatcher::HasNext() {
if (!is_input_valid_utf8_) {
state_ = DONE;
return false;
}
if (state_ == NOT_READY) {
PhoneNumberMatch temp_match;
if (!Find(search_index_, &temp_match)) {
state_ = DONE;
} else {
last_match_.reset(new PhoneNumberMatch(temp_match.start(),
temp_match.raw_string(),
temp_match.number()));
search_index_ = last_match_->end();
state_ = READY;
}
}
return state_ == READY;
}
bool PhoneNumberMatcher::Next(PhoneNumberMatch* match) {
DCHECK(match);
if (!HasNext()) {
return false;
}
match->CopyFrom(*last_match_);
state_ = NOT_READY;
last_match_.reset(NULL);
return true;
}
bool PhoneNumberMatcher::Find(int index, PhoneNumberMatch* match) {
DCHECK(match);
scoped_ptr<RegExpInput> text(
reg_exps_->regexp_factory_for_pattern_->CreateInput(text_.substr(index)));
string candidate;
while ((max_tries_ > 0) &&
reg_exps_->pattern_->FindAndConsume(text.get(), &candidate)) {
int start = static_cast<int>(text_.length() - text->ToString().length() - candidate.length());
reg_exps_->capture_up_to_second_number_start_pattern_->
PartialMatch(candidate, &candidate);
if (ExtractMatch(candidate, start, match)) {
return true;
}
index = static_cast<int>(start + candidate.length());
--max_tries_;
}
return false;
}
bool PhoneNumberMatcher::CheckNumberGroupingIsValid(
const PhoneNumber& phone_number,
const string& candidate,
ResultCallback4<bool, const PhoneNumberUtil&, const PhoneNumber&,
const string&, const std::vector<string>&>* checker) const {
DCHECK(checker);
string normalized_candidate =
NormalizeUTF8::NormalizeDecimalDigits(candidate);
std::vector<string> formatted_number_groups;
GetNationalNumberGroups(phone_number, &formatted_number_groups);
if (checker->Run(phone_util_, phone_number, normalized_candidate,
formatted_number_groups)) {
return true;
}
const PhoneMetadata* alternate_formats =
alternate_formats_->GetAlternateFormatsForCountry(
phone_number.country_code());
if (alternate_formats) {
string national_significant_number;
phone_util_.GetNationalSignificantNumber(phone_number,
&national_significant_number);
for (RepeatedPtrField<NumberFormat>::const_iterator it =
alternate_formats->number_format().begin();
it != alternate_formats->number_format().end(); ++it) {
if (it->leading_digits_pattern_size() > 0) {
std::unique_ptr<RegExpInput> nsn_input(
reg_exps_->regexp_factory_->CreateInput(
national_significant_number));
if (!reg_exps_->regexp_cache_.GetRegExp(
it->leading_digits_pattern(0)).Consume(nsn_input.get())) {
continue;
}
}
formatted_number_groups.clear();
GetNationalNumberGroupsForPattern(phone_number, &*it,
&formatted_number_groups);
if (checker->Run(phone_util_, phone_number, normalized_candidate,
formatted_number_groups)) {
return true;
}
}
}
return false;
}
void PhoneNumberMatcher::GetNationalNumberGroups(
const PhoneNumber& number,
std::vector<string>* digit_blocks) const {
string rfc3966_format;
phone_util_.Format(number, PhoneNumberUtil::RFC3966, &rfc3966_format);
size_t end_index = rfc3966_format.find(';');
if (end_index == string::npos) {
end_index = rfc3966_format.length();
}
size_t start_index = rfc3966_format.find('-') + 1;
SplitStringUsing(rfc3966_format.substr(start_index,
end_index - start_index),
'-', digit_blocks);
}
void PhoneNumberMatcher::GetNationalNumberGroupsForPattern(
const PhoneNumber& number,
const NumberFormat* formatting_pattern,
std::vector<string>* digit_blocks) const {
string rfc3966_format;
string national_significant_number;
phone_util_.GetNationalSignificantNumber(number,
&national_significant_number);
phone_util_.FormatNsnUsingPattern(national_significant_number,
*formatting_pattern,
PhoneNumberUtil::RFC3966,
&rfc3966_format);
SplitStringUsing(rfc3966_format, '-', digit_blocks);
}
bool PhoneNumberMatcher::IsNationalPrefixPresentIfRequired(
const PhoneNumber& number) const {
if (number.country_code_source() != PhoneNumber::FROM_DEFAULT_COUNTRY) {
return true;
}
string phone_number_region;
phone_util_.GetRegionCodeForCountryCode(
number.country_code(), &phone_number_region);
const PhoneMetadata* metadata =
phone_util_.GetMetadataForRegion(phone_number_region);
if (!metadata) {
return true;
}
string national_number;
phone_util_.GetNationalSignificantNumber(number, &national_number);
const NumberFormat* format_rule =
phone_util_.ChooseFormattingPatternForNumber(metadata->number_format(),
national_number);
if (format_rule && !format_rule->national_prefix_formatting_rule().empty()) {
if (format_rule->national_prefix_optional_when_formatting()) {
return true;
}
if (phone_util_.FormattingRuleHasFirstGroupOnly(
format_rule->national_prefix_formatting_rule())) {
return true;
}
string raw_input_copy(number.raw_input());
phone_util_.NormalizeDigitsOnly(&raw_input_copy);
return phone_util_.MaybeStripNationalPrefixAndCarrierCode(
*metadata,
&raw_input_copy,
NULL);
}
return true;
}
bool PhoneNumberMatcher::AllNumberGroupsAreExactlyPresent(
const PhoneNumberUtil& util,
const PhoneNumber& phone_number,
const string& normalized_candidate,
const std::vector<string>& formatted_number_groups) const {
const scoped_ptr<RegExpInput> candidate_number(
reg_exps_->regexp_factory_->CreateInput(normalized_candidate));
std::vector<string> candidate_groups;
string digit_block;
while (reg_exps_->capturing_ascii_digits_pattern_->FindAndConsume(
candidate_number.get(),
&digit_block)) {
candidate_groups.push_back(digit_block);
}
int candidate_number_group_index = static_cast<int>(
phone_number.has_extension() ? candidate_groups.size() - 2
: candidate_groups.size() - 1);
string national_significant_number;
util.GetNationalSignificantNumber(phone_number,
&national_significant_number);
if (candidate_groups.size() == 1 ||
candidate_groups.at(candidate_number_group_index).find(
national_significant_number) != string::npos) {
return true;
}
for (int formatted_number_group_index =
static_cast<int>(formatted_number_groups.size() - 1);
formatted_number_group_index > 0 &&
candidate_number_group_index >= 0;
--formatted_number_group_index, --candidate_number_group_index) {
if (candidate_groups.at(candidate_number_group_index) !=
formatted_number_groups.at(formatted_number_group_index)) {
return false;
}
}
return (candidate_number_group_index >= 0 &&
HasSuffixString(candidate_groups.at(candidate_number_group_index),
formatted_number_groups.at(0)));
}
bool PhoneNumberMatcher::ContainsMoreThanOneSlashInNationalNumber(
const PhoneNumber& number,
const string& candidate,
const PhoneNumberUtil& util) {
size_t first_slash_in_body = candidate.find('/');
if (first_slash_in_body == string::npos) {
return false;
}
size_t second_slash_in_body = candidate.find('/', first_slash_in_body + 1);
if (second_slash_in_body == string::npos) {
return false;
}
if (number.country_code_source() == PhoneNumber::FROM_NUMBER_WITH_PLUS_SIGN ||
number.country_code_source() ==
PhoneNumber::FROM_NUMBER_WITHOUT_PLUS_SIGN) {
string normalized_country_code =
candidate.substr(0, first_slash_in_body);
util.NormalizeDigitsOnly(&normalized_country_code);
if (normalized_country_code == SimpleItoa(number.country_code())) {
return candidate.find('/', second_slash_in_body + 1) != string::npos;
}
}
return true;
}
}
} | #include "phonenumbers/phonenumbermatcher.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include <unicode/unistr.h>
#include "phonenumbers/base/basictypes.h"
#include "phonenumbers/base/memory/scoped_ptr.h"
#include "phonenumbers/base/memory/singleton.h"
#include "phonenumbers/default_logger.h"
#include "phonenumbers/phonenumber.h"
#include "phonenumbers/phonenumber.pb.h"
#include "phonenumbers/phonenumbermatch.h"
#include "phonenumbers/phonenumberutil.h"
#include "phonenumbers/stringutil.h"
#include "phonenumbers/test_util.h"
namespace i18n {
namespace phonenumbers {
using std::string;
using icu::UnicodeString;
namespace {
struct NumberContext {
string leading_text_;
string trailing_text_;
NumberContext(const string& leading_text, const string& trailing_text)
: leading_text_(leading_text),
trailing_text_(trailing_text) {
}
};
struct NumberTest {
string raw_string_;
string region_;
string ToString() const {
return StrCat(raw_string_, " (", region_, ")");
}
NumberTest(const string& raw_string, const string& region)
: raw_string_(raw_string),
region_(region) {
}
};
}
class PhoneNumberMatcherTest : public testing::Test {
protected:
PhoneNumberMatcherTest()
: phone_util_(*PhoneNumberUtil::GetInstance()),
matcher_(phone_util_, "",
RegionCode::US(),
PhoneNumberMatcher::VALID, 5),
offset_(0) {
PhoneNumberUtil::GetInstance()->SetLogger(new StdoutLogger());
}
bool IsLatinLetter(char32 letter) {
return PhoneNumberMatcher::IsLatinLetter(letter);
}
bool ContainsMoreThanOneSlashInNationalNumber(
const PhoneNumber& phone_number, const string& candidate) {
return PhoneNumberMatcher::ContainsMoreThanOneSlashInNationalNumber(
phone_number, candidate, phone_util_);
}
bool ExtractMatch(const string& text, PhoneNumberMatch* match) {
return matcher_.ExtractMatch(text, offset_, match);
}
PhoneNumberMatcher* GetMatcherWithLeniency(
const string& text, const string& region,
PhoneNumberMatcher::Leniency leniency) const {
return new PhoneNumberMatcher(phone_util_, text, region, leniency,
100 );
}
void DoTestNumberMatchesForLeniency(
const std::vector<NumberTest>& test_cases,
PhoneNumberMatcher::Leniency leniency) const {
scoped_ptr<PhoneNumberMatcher> matcher;
for (std::vector<NumberTest>::const_iterator test = test_cases.begin();
test != test_cases.end(); ++test) {
matcher.reset(GetMatcherWithLeniency(
test->raw_string_, test->region_, leniency));
EXPECT_TRUE(matcher->HasNext())
<< "No match found in " << test->ToString()
<< " for leniency: " << leniency;
if (matcher->HasNext()) {
PhoneNumberMatch match;
matcher->Next(&match);
EXPECT_EQ(test->raw_string_, match.raw_string())
<< "Found wrong match in test " << test->ToString()
<< ". Found " << match.raw_string();
}
}
}
void DoTestNumberNonMatchesForLeniency(
const std::vector<NumberTest>& test_cases,
PhoneNumberMatcher::Leniency leniency) const {
scoped_ptr<PhoneNumberMatcher> matcher;
for (std::vector<NumberTest>::const_iterator test = test_cases.begin();
test != test_cases.end(); ++test) {
matcher.reset(GetMatcherWithLeniency(
test->raw_string_, test->region_, leniency));
EXPECT_FALSE(matcher->HasNext()) << "Match found in " << test->ToString()
<< " for leniency: " << leniency;
}
}
void AssertMatchProperties(const PhoneNumberMatch& match, const string& text,
const string& number, const string& region_code) {
PhoneNumber expected_result;
phone_util_.Parse(number, region_code, &expected_result);
EXPECT_EQ(expected_result, match.number());
EXPECT_EQ(number, match.raw_string()) << " Wrong number found in " << text;
}
void AssertEqualRange(const string& text, int index, int start, int end) {
string sub = text.substr(index);
PhoneNumberMatcher matcher(phone_util_, sub, RegionCode::NZ(),
PhoneNumberMatcher::POSSIBLE,
1000000 );
PhoneNumberMatch match;
ASSERT_TRUE(matcher.HasNext());
matcher.Next(&match);
EXPECT_EQ(start - index, match.start());
EXPECT_EQ(end - index, match.end());
EXPECT_EQ(sub.substr(match.start(), match.length()), match.raw_string());
}
void DoTestFindInContext(const string& number,
const string& default_country) {
FindPossibleInContext(number, default_country);
PhoneNumber parsed;
phone_util_.Parse(number, default_country, &parsed);
if (phone_util_.IsValidNumber(parsed)) {
FindValidInContext(number, default_country);
}
}
void FindMatchesInContexts(const std::vector<NumberContext>& contexts,
bool is_valid, bool is_possible,
const string& region, const string& number) {
if (is_valid) {
DoTestInContext(number, region, contexts, PhoneNumberMatcher::VALID);
} else {
for (std::vector<NumberContext>::const_iterator it = contexts.begin();
it != contexts.end(); ++it) {
string text = StrCat(it->leading_text_, number, it->trailing_text_);
PhoneNumberMatcher matcher(text, region);
EXPECT_FALSE(matcher.HasNext());
}
}
if (is_possible) {
DoTestInContext(number, region, contexts, PhoneNumberMatcher::POSSIBLE);
} else {
for (std::vector<NumberContext>::const_iterator it = contexts.begin();
it != contexts.end(); ++it) {
string text = StrCat(it->leading_text_, number, it->trailing_text_);
PhoneNumberMatcher matcher(phone_util_, text, region,
PhoneNumberMatcher::POSSIBLE,
10000);
EXPECT_FALSE(matcher.HasNext());
}
}
}
void FindMatchesInContexts(const std::vector<NumberContext>& contexts,
bool is_valid, bool is_possible) {
const string& region = RegionCode::US();
const string number("415-666-7777");
FindMatchesInContexts(contexts, is_valid, is_possible, region, number);
}
void FindPossibleInContext(const string& number,
const string& default_country) {
std::vector<NumberContext> context_pairs;
context_pairs.push_back(NumberContext("", ""));
context_pairs.push_back(NumberContext(" ", "\t"));
context_pairs.push_back(NumberContext("Hello ", ""));
context_pairs.push_back(NumberContext("", " to call me!"));
context_pairs.push_back(NumberContext("Hi there, call ", " to reach me!"));
context_pairs.push_back(NumberContext("Hi there, call ", ", or don't"));
context_pairs.push_back(NumberContext("Hi call", ""));
context_pairs.push_back(NumberContext("", "forme"));
context_pairs.push_back(NumberContext("Hi call", "forme"));
context_pairs.push_back(NumberContext("It's cheap! Call ", " before 6:30"));
context_pairs.push_back(NumberContext("Call ", " or +1800-123-4567!"));
context_pairs.push_back(NumberContext("Call me on June 2 at", ""));
context_pairs.push_back(NumberContext(
"As quoted by Alfonso 12-15 (2009), you may call me at ", ""));
context_pairs.push_back(NumberContext(
"As quoted by Alfonso et al. 12-15 (2009), you may call me at ", ""));
context_pairs.push_back(NumberContext(
"As I said on 03/10/2011, you may call me at ", ""));
context_pairs.push_back(NumberContext("", ", 45 days a year"));
context_pairs.push_back(NumberContext("", ";x 7246433"));
context_pairs.push_back(NumberContext("Call ", "/x12 more"));
DoTestInContext(number, default_country, context_pairs,
PhoneNumberMatcher::POSSIBLE);
}
void FindValidInContext(const string& number, const string& default_country) {
std::vector<NumberContext> context_pairs;
context_pairs.push_back(NumberContext("It's only 9.99! Call ", " to buy"));
context_pairs.push_back(NumberContext("Call me on 21.6.1984 at ", ""));
context_pairs.push_back(NumberContext("Call me on 06/21 at ", ""));
context_pairs.push_back(NumberContext("Call me on 21.6. at ", ""));
context_pairs.push_back(NumberContext("Call me on 06/21/84 at ", ""));
DoTestInContext(number, default_country, context_pairs,
PhoneNumberMatcher::VALID);
}
void DoTestInContext(const string& number, const string& default_country,
const std::vector<NumberContext>& context_pairs,
PhoneNumberMatcher::Leniency leniency) {
for (std::vector<NumberContext>::const_iterator it = context_pairs.begin();
it != context_pairs.end(); ++it) {
string prefix = it->leading_text_;
string text = StrCat(prefix, number, it->trailing_text_);
int start = prefix.length();
int end = start + number.length();
PhoneNumberMatcher matcher(phone_util_, text, default_country, leniency,
1000000 );
PhoneNumberMatch match;
ASSERT_TRUE(matcher.HasNext())
<< "Did not find a number in '" << text << "'; expected '"
<< number << "'";
matcher.Next(&match);
string extracted = text.substr(match.start(), match.length());
EXPECT_EQ(start, match.start());
EXPECT_EQ(end, match.end());
EXPECT_EQ(number, extracted);
EXPECT_EQ(extracted, match.raw_string())
<< "Unexpected phone region in '" << text << "'; extracted '"
<< extracted << "'";
EnsureTermination(text, default_country, leniency);
}
}
void EnsureTermination(const string& text, const string& default_country,
PhoneNumberMatcher::Leniency leniency) {
for (size_t index = 0; index <= text.length(); ++index) {
string sub = text.substr(index);
PhoneNumberMatcher matcher(phone_util_, text, default_country, leniency,
1000000 );
string matches;
PhoneNumberMatch match;
int match_count = 0;
while (matcher.HasNext()) {
matcher.Next(&match);
StrAppend(&matches, ",", match.ToString());
++match_count;
}
ASSERT_LT(match_count, 10);
}
}
const PhoneNumberUtil& phone_util_;
private:
PhoneNumberMatcher matcher_;
int offset_;
};
TEST_F(PhoneNumberMatcherTest, ContainsMoreThanOneSlashInNationalNumber) {
PhoneNumber number;
number.set_country_code(1);
number.set_country_code_source(PhoneNumber::FROM_DEFAULT_COUNTRY);
string candidate = "1/05/2013";
EXPECT_TRUE(ContainsMoreThanOneSlashInNationalNumber(number, candidate));
number.Clear();
number.set_country_code(274);
number.set_country_code_source(PhoneNumber::FROM_NUMBER_WITHOUT_PLUS_SIGN);
candidate = "27/4/2013";
EXPECT_TRUE(ContainsMoreThanOneSlashInNationalNumber(number, candidate));
number.Clear();
number.set_country_code(49);
number.set_country_code_source(PhoneNumber::FROM_NUMBER_WITH_PLUS_SIGN);
candidate = "49/69/2013";
EXPECT_FALSE(ContainsMoreThanOneSlashInNationalNumber(number, candidate));
number.Clear();
number.set_country_code(49);
number.set_country_code_source(PhoneNumber::FROM_NUMBER_WITHOUT_PLUS_SIGN);
candidate = "+49/69/2013";
EXPECT_FALSE(ContainsMoreThanOneSlashInNationalNumber(number, candidate));
candidate = "+ 49/69/2013";
EXPECT_FALSE(ContainsMoreThanOneSlashInNationalNumber(number, candidate));
candidate = "+ 49/69/20/13";
EXPECT_TRUE(ContainsMoreThanOneSlashInNationalNumber(number, candidate));
number.Clear();
number.set_country_code(49);
number.set_country_code_source(PhoneNumber::FROM_DEFAULT_COUNTRY);
candidate = "49/69/2013";
EXPECT_TRUE(ContainsMoreThanOneSlashInNationalNumber(number, candidate));
}
TEST_F(PhoneNumberMatcherTest, FindNationalNumber) {
DoTestFindInContext("033316005", RegionCode::NZ());
DoTestFindInContext("03-331 6005", RegionCode::NZ());
DoTestFindInContext("03 331 6005", RegionCode::NZ());
DoTestFindInContext("0064 3 331 6005", RegionCode::NZ());
DoTestFindInContext("01164 3 331 6005", RegionCode::US());
DoTestFindInContext("+64 3 331 6005", RegionCode::US());
DoTestFindInContext("64(0)64123456", RegionCode::NZ());
DoTestFindInContext("0123/456789", RegionCode::PL());
DoTestFindInContext("123-456-7890", RegionCode::US());
}
TEST_F(PhoneNumberMatcherTest, FindWithInternationalPrefixes) {
DoTestFindInContext("+1 (650) 333-6000", RegionCode::NZ());
DoTestFindInContext("1-650-333-6000", RegionCode::US());
DoTestFindInContext("0011-650-333-6000", RegionCode::SG());
DoTestFindInContext("0081-650-333-6000", RegionCode::SG());
DoTestFindInContext("0191-650-333-6000", RegionCode::SG());
DoTestFindInContext("0~01-650-333-6000", RegionCode::PL());
DoTestFindInContext("++1 (650) 333-6000", RegionCode::PL());
DoTestFindInContext(
"\xEF\xBC\x8B""1 (650) 333-6000" ,
RegionCode::SG());
DoTestFindInContext(
"\xEF\xBC\x8B\xEF\xBC\x91\xE3\x80\x80\xEF\xBC\x88\xEF\xBC\x96\xEF\xBC\x95"
"\xEF\xBC\x90\xEF\xBC\x89\xE3\x80\x80\xEF\xBC\x93\xEF\xBC\x93\xEF\xBC\x93"
"\xEF\xBC\x8D\xEF\xBC\x96\xEF\xBC\x90\xEF\xBC\x90\xEF\xBC\x90",
RegionCode::SG());
}
TEST_F(PhoneNumberMatcherTest, FindWithLeadingZero) {
DoTestFindInContext("+39 02-36618 300", RegionCode::NZ());
DoTestFindInContext("02-36618 300", RegionCode::IT());
DoTestFindInContext("312 345 678", RegionCode::IT());
}
TEST_F(PhoneNumberMatcherTest, FindNationalNumberArgentina) {
DoTestFindInContext("+54 9 343 555 1212", RegionCode::AR());
DoTestFindInContext("0343 15 555 1212", RegionCode::AR());
DoTestFindInContext("+54 9 3715 65 4320", RegionCode::AR());
DoTestFindInContext("03715 15 65 4320", RegionCode::AR());
DoTestFindInContext("+54 11 3797 0000", RegionCode::AR());
DoTestFindInContext("011 3797 0000", RegionCode::AR());
DoTestFindInContext("+54 3715 65 4321", RegionCode::AR());
DoTestFindInContext("03715 65 4321", RegionCode::AR());
DoTestFindInContext("+54 23 1234 0000", RegionCode::AR());
DoTestFindInContext("023 1234 0000", RegionCode::AR());
}
TEST_F(PhoneNumberMatcherTest, FindWithXInNumber) {
DoTestFindInContext("(0xx) 123456789", RegionCode::AR());
DoTestFindInContext("(0xx) 123456789 x 1234", RegionCode::AR());
DoTestFindInContext("011xx5481429712", RegionCode::US());
}
TEST_F(PhoneNumberMatcherTest, FindNumbersMexico) {
DoTestFindInContext("+52 (449)978-0001", RegionCode::MX());
DoTestFindInContext("01 (449)978-0001", RegionCode::MX());
DoTestFindInContext("(449)978-0001", RegionCode::MX());
DoTestFindInContext("+52 1 33 1234-5678", RegionCode::MX());
DoTestFindInContext("044 (33) 1234-5678", RegionCode::MX());
DoTestFindInContext("045 33 1234-5678", RegionCode::MX());
}
TEST_F(PhoneNumberMatcherTest, FindNumbersWithPlusWithNoRegion) {
DoTestFindInContext("+64 3 331 6005", RegionCode::ZZ());
}
TEST_F(PhoneNumberMatcherTest, FindExtensions) {
DoTestFindInContext("03 331 6005 ext 3456", RegionCode::NZ());
DoTestFindInContext("03-3316005x3456", RegionCode::NZ());
DoTestFindInContext("03-3316005 int.3456", RegionCode::NZ());
DoTestFindInContext("03 3316005 #3456", RegionCode::NZ());
DoTestFindInContext("0~0 1800 7493 524", RegionCode::PL());
DoTestFindInContext("(1800) 7493.524", RegionCode::US());
DoTestFindInContext("0~0 1800 7493 524 ~1234", RegionCode::PL());
DoTestFindInContext("+44 2034567890x456", RegionCode::NZ());
DoTestFindInContext("+44 2034567890x456", RegionCode::GB());
DoTestFindInContext("+44 2034567890 x456", RegionCode::GB());
DoTestFindInContext("+44 2034567890 X456", RegionCode::GB());
DoTestFindInContext("+44 2034567890 X 456", RegionCode::GB());
DoTestFindInContext("+44 2034567890 X 456", RegionCode::GB());
DoTestFindInContext("+44 2034567890 X 456", RegionCode::GB());
DoTestFindInContext("(800) 901-3355 x 7246433", RegionCode::US());
DoTestFindInContext("(800) 901-3355 , ext 7246433", RegionCode::US());
DoTestFindInContext("(800) 901-3355 ,extension 7246433", RegionCode::US());
DoTestFindInContext("(800) 901-3355 ,x 7246433", RegionCode::US());
DoTestFindInContext("(800) 901-3355 ext: 7246433", RegionCode::US());
}
TEST_F(PhoneNumberMatcherTest, FindInterspersedWithSpace) {
DoTestFindInContext("0 3 3 3 1 6 0 0 5", RegionCode::NZ());
}
TEST_F(PhoneNumberMatcherTest, IntermediateParsePositions) {
string text = "Call 033316005 or 032316005!";
for (int i = 0; i <= 5; ++i) {
AssertEqualRange(text, i, 5, 14);
}
AssertEqualRange(text, 6, 6, 14);
AssertEqualRange(text, 7, 7, 14);
for (int i = 8; i <= 19; ++i) {
AssertEqualRange(text, i, 19, 28);
}
}
TEST_F(PhoneNumberMatcherTest, FourMatchesInARow) {
string number1 = "415-666-7777";
string number2 = "800-443-1223";
string number3 = "212-443-1223";
string number4 = "650-443-1223";
string text = StrCat(number1, " - ", number2, " - ", number3, " - ", number4);
PhoneNumberMatcher matcher(text, RegionCode::US());
PhoneNumberMatch match;
EXPECT_TRUE(matcher.HasNext());
EXPECT_TRUE(matcher.Next(&match));
AssertMatchProperties(match, text, number1, RegionCode::US());
EXPECT_TRUE(matcher.HasNext());
EXPECT_TRUE(matcher.Next(&match));
AssertMatchProperties(match, text, number2, RegionCode::US());
EXPECT_TRUE(matcher.HasNext());
EXPECT_TRUE(matcher.Next(&match));
AssertMatchProperties(match, text, number3, RegionCode::US());
EXPECT_TRUE(matcher.HasNext());
EXPECT_TRUE(matcher.Next(&match));
AssertMatchProperties(match, text, number4, RegionCode::US());
}
TEST_F(PhoneNumberMatcherTest, MatchesFoundWithMultipleSpaces) {
string number1 = "415-666-7777";
string number2 = "800-443-1223";
string text = StrCat(number1, " ", number2);
PhoneNumberMatcher matcher(text, RegionCode::US());
PhoneNumberMatch match;
EXPECT_TRUE(matcher.HasNext());
EXPECT_TRUE(matcher.Next(&match));
AssertMatchProperties(match, text, number1, RegionCode::US());
EXPECT_TRUE(matcher.HasNext());
EXPECT_TRUE(matcher.Next(&match));
AssertMatchProperties(match, text, number2, RegionCode::US());
}
TEST_F(PhoneNumberMatcherTest, MatchWithSurroundingZipcodes) {
string number = "415-666-7777";
string zip_preceding =
StrCat("My address is CA 34215 - ", number, " is my number.");
PhoneNumber expected_result;
phone_util_.Parse(number, RegionCode::US(), &expected_result);
scoped_ptr<PhoneNumberMatcher> matcher(
GetMatcherWithLeniency(zip_preceding, RegionCode::US(),
PhoneNumberMatcher::VALID));
PhoneNumberMatch match;
EXPECT_TRUE(matcher->HasNext());
EXPECT_TRUE(matcher->Next(&match));
AssertMatchProperties(match, zip_preceding, number, RegionCode::US());
number = "(415) 666 7777";
string zip_following =
StrCat("My number is ", number, ". 34215 is my zip-code.");
matcher.reset(
GetMatcherWithLeniency(zip_following, RegionCode::US(),
PhoneNumberMatcher::VALID));
PhoneNumberMatch match_with_spaces;
EXPECT_TRUE(matcher->HasNext());
EXPECT_TRUE(matcher->Next(&match_with_spaces));
AssertMatchProperties(
match_with_spaces, zip_following, number, RegionCode::US());
}
TEST_F(PhoneNumberMatcherTest, IsLatinLetter) {
EXPECT_TRUE(IsLatinLetter('c'));
EXPECT_TRUE(IsLatinLetter('C'));
EXPECT_TRUE(IsLatinLetter(UnicodeString::fromUTF8("\xC3\x89" )[0]));
EXPECT_TRUE(IsLatinLetter(UnicodeString::fromUTF8("\xCC\x81")[0]));
EXPECT_FALSE(IsLatinLetter(':'));
EXPECT_FALSE(IsLatinLetter('5'));
EXPECT_FALSE(IsLatinLetter('-'));
EXPECT_FALSE(IsLatinLetter('.'));
EXPECT_FALSE(IsLatinLetter(' '));
EXPECT_FALSE(
IsLatinLetter(UnicodeString::fromUTF8("\xE6\x88\x91" )[0]));
EXPECT_FALSE(IsLatinLetter(UnicodeString::fromUTF8("\xE3\x81\xAE")[0]));
EXPECT_FALSE(IsLatinLetter(UnicodeString::fromUTF8("\xE3\x81\xAE")[2]));
}
TEST_F(PhoneNumberMatcherTest, MatchesWithSurroundingLatinChars) {
std::vector<NumberContext> possible_only_contexts;
possible_only_contexts.push_back(NumberContext("abc", "def"));
possible_only_contexts.push_back(NumberContext("abc", ""));
possible_only_contexts.push_back(NumberContext("", "def"));
possible_only_contexts.push_back(NumberContext("\xC3\x89" , ""));
possible_only_contexts.push_back(
NumberContext("\x20\x22\xCC\x81""e\xCC\x81" , ""));
FindMatchesInContexts(possible_only_contexts, false, true);
}
TEST_F(PhoneNumberMatcherTest, MoneyNotSeenAsPhoneNumber) {
std::vector<NumberContext> possible_only_contexts;
possible_only_contexts.push_back(NumberContext("$", ""));
possible_only_contexts.push_back(NumberContext("", "$"));
possible_only_contexts.push_back(NumberContext("\xC2\xA3" , ""));
possible_only_contexts.push_back(NumberContext("\xC2\xA5" , ""));
FindMatchesInContexts(possible_only_contexts, false, true);
}
TEST_F(PhoneNumberMatcherTest, PercentageNotSeenAsPhoneNumber) {
std::vector<NumberContext> possible_only_contexts;
possible_only_contexts.push_back(NumberContext("", "%"));
FindMatchesInContexts(possible_only_contexts, false, true);
}
TEST_F(PhoneNumberMatcherTest, PhoneNumberWithLeadingOrTrailingMoneyMatches) {
std::vector<NumberContext> contexts;
contexts.push_back(NumberContext("$20 ", ""));
contexts.push_back(NumberContext("", " 100$"));
FindMatchesInContexts(contexts, true, true);
}
TEST_F(PhoneNumberMatcherTest,
MatchesWithSurroundingLatinCharsAndLeadingPunctuation) {
std::vector<NumberContext> possible_only_contexts;
possible_only_contexts.push_back(NumberContext("abc", "def"));
possible_only_contexts.push_back(NumberContext("", "def"));
possible_only_contexts.push_back(NumberContext("", "\xC3\x89" ));
string number_with_plus = "+14156667777";
string number_with_brackets = "(415)6667777";
FindMatchesInContexts(possible_only_contexts, false, true, RegionCode::US(),
number_with_plus);
FindMatchesInContexts(possible_only_contexts, false, true, RegionCode::US(),
number_with_brackets);
std::vector<NumberContext> valid_contexts;
valid_contexts.push_back(NumberContext("abc", ""));
valid_contexts.push_back(NumberContext("\xC3\x89" , ""));
valid_contexts.push_back(
NumberContext("\xC3\x89" , "."));
valid_contexts.push_back(NumberContext("\xC3\x89" , " def"));
FindMatchesInContexts(valid_contexts, true, true, RegionCode::US(),
number_with_plus);
FindMatchesInContexts(valid_contexts, true, true, RegionCode::US(),
number_with_brackets);
}
TEST_F(PhoneNumberMatcherTest, MatchesWithSurroundingChineseChars) {
std::vector<NumberContext> valid_contexts;
valid_contexts.push_back(NumberContext(
"\xE6\x88\x91\xE7\x9A\x84\xE7\x94\xB5\xE8\xAF\x9D\xE5\x8F\xB7\xE7\xA0\x81"
"\xE6\x98\xAF", ""));
valid_contexts.push_back(NumberContext(
"",
"\xE6\x98\xAF\xE6\x88\x91\xE7\x9A\x84\xE7\x94\xB5\xE8\xAF\x9D\xE5\x8F\xB7"
"\xE7\xA0\x81"));
valid_contexts.push_back(NumberContext(
"\xE8\xAF\xB7\xE6\x8B\xA8\xE6\x89\x93" ,
"\xE6\x88\x91\xE5\x9C\xA8\xE6\x98\x8E\xE5\xA4\xA9" ));
FindMatchesInContexts(valid_contexts, true, true);
}
TEST_F(PhoneNumberMatcherTest, MatchesWithSurroundingPunctuation) {
std::vector<NumberContext> valid_contexts;
valid_contexts.push_back(NumberContext("My number-", ""));
valid_contexts.push_back(NumberContext("", ".Nice day."));
valid_contexts.push_back(NumberContext("Tel:", "."));
valid_contexts.push_back(NumberContext("Tel: ", " on Saturdays."));
FindMatchesInContexts(valid_contexts, true, true);
}
TEST_F(PhoneNumberMatcherTest,
MatchesMultiplePhoneNumbersSeparatedByPhoneNumberPunctuation) {
const string text = "Call 650-253-4561 -- 455-234-3451";
const string& region = RegionCode::US();
PhoneNumber number1;
number1.set_country_code(phone_util_.GetCountryCodeForRegion(region));
number1.set_national_number(6502534561ULL);
PhoneNumberMatch match1(5, "650-253-4561", number1);
PhoneNumber number2;
number2.set_country_code(phone_util_.GetCountryCodeForRegion(region));
number2.set_national_number(4552343451ULL);
PhoneNumberMatch match2(21, "455-234-3451", number2);
PhoneNumberMatcher matcher(
phone_util_, text, region, PhoneNumberMatcher::VALID, 100);
PhoneNumberMatch actual_match1;
PhoneNumberMatch actual_match2;
matcher.Next(&actual_match1);
matcher.Next(&actual_match2);
EXPECT_TRUE(match1.Equals(actual_match1))
<< "Got: " << actual_match1.ToString();
EXPECT_TRUE(match2.Equals(actual_match2))
<< "Got: " << actual_match2.ToString();
}
TEST_F(PhoneNumberMatcherTest,
DoesNotMatchMultiplePhoneNumbersSeparatedWithNoWhiteSpace) {
const string text = "Call 650-253-4561--455-234-3451";
const string& region = RegionCode::US();
PhoneNumberMatcher matcher(
phone_util_, text, region, PhoneNumberMatcher::VALID, 100);
EXPECT_FALSE(matcher.HasNext());
}
static const NumberTest kImpossibleCases[] = {
NumberTest("12345", RegionCode::US()),
NumberTest("23456789", RegionCode::US()),
NumberTest("234567890112", RegionCode::US()),
NumberTest("650+253+1234", RegionCode::US()),
NumberTest("3/10/1984", RegionCode::CA()),
NumberTest("03/27/2011", RegionCode::US()),
NumberTest("31/8/2011", RegionCode::US()),
NumberTest("1/12/2011", RegionCode::US()),
NumberTest("10/12/82", RegionCode::DE()),
NumberTest("650x2531234", RegionCode::US()),
NumberTest("2012-01-02 08:00", RegionCode::US()),
NumberTest("2012/01/02 08:00", RegionCode::US()),
NumberTest("20120102 08:00", RegionCode::US()),
NumberTest("2014-04-12 04:04 PM", RegionCode::US()),
NumberTest("2014-04-12 04:04 PM", RegionCode::US()),
NumberTest("2014-04-12 04:04 PM", RegionCode::US()),
NumberTest("2014-04-12 04:04 PM", RegionCode::US()),
};
static const NumberTest kPossibleOnlyCases[] = {
NumberTest("7121115678", RegionCode::US()),
NumberTest("1650 x 253 - 1234", RegionCode::US()),
NumberTest("650 x 253 - 1234", RegionCode::US()),
NumberTest("6502531x234", RegionCode::US()),
NumberTest("(20) 3346 1234", RegionCode::GB()),
};
static const NumberTest kValidCases[] = {
NumberTest("65 02 53 00 00", RegionCode::US()),
NumberTest("6502 538365", RegionCode::US()),
NumberTest("650
NumberTest("650/253/1234", RegionCode::US()),
NumberTest("9002309. 158", RegionCode::US()),
NumberTest("12 7/8 - 14 12/34 - 5", RegionCode::US()),
NumberTest("12.1 - 23.71 - 23.45", RegionCode::US()),
NumberTest("800 234 1 111x1111", RegionCode::US()),
NumberTest("1979-2011 100", RegionCode::US()),
NumberTest("+494949-4-94", RegionCode::DE()),
NumberTest(
"\xEF\xBC\x94\xEF\xBC\x91\xEF\xBC\x95\xEF\xBC\x96\xEF\xBC\x96\xEF\xBC\x96"
"\x2D\xEF\xBC\x97\xEF\xBC\x97\xEF\xBC\x97\xEF\xBC\x97", RegionCode::US()),
NumberTest("2012-0102 08", RegionCode::US()),
NumberTest("2012-01-02 08", RegionCode::US()),
NumberTest("1800-1-0-10 22", RegionCode::AU()),
NumberTest("030-3-2 23 12 34", RegionCode::DE()),
NumberTest("03 0 -3 2 23 12 34", RegionCode::DE()),
NumberTest("(0)3 0 -3 2 23 12 34", RegionCode::DE()),
NumberTest("0 3 0 -3 2 23 12 34", RegionCode::DE()),
#ifdef I18N_PHONENUMBERS_USE_ALTERNATE_FORMATS
NumberTest("+52 332 123 23 23", RegionCode::MX()),
#endif
};
static const NumberTest kStrictGroupingCases[] = {
NumberTest("(415) 6667777", RegionCode::US()),
NumberTest("415-6667777", RegionCode::US()),
NumberTest("0800-2491234", RegionCode::DE()),
#ifdef I18N_PHONENUMBERS_USE_ALTERNATE_FORMATS
NumberTest("0900-1 123123", RegionCode::DE()),
NumberTest("(0)900-1 123123", RegionCode::DE()),
NumberTest("0 900-1 123123", RegionCode::DE()),
#endif
NumberTest("+33 3 34 2312", RegionCode::FR()),
};
static const NumberTest kExactGroupingCases[] = {
NumberTest(
"\xEF\xBC\x94\xEF\xBC\x91\xEF\xBC\x95\xEF\xBC\x96\xEF\xBC\x96\xEF\xBC\x96"
"\xEF\xBC\x97\xEF\xBC\x97\xEF\xBC\x97\xEF\xBC\x97", RegionCode::US()),
NumberTest(
"\xEF\xBC\x94\xEF\xBC\x91\xEF\xBC\x95\xEF\xBC\x8D\xEF\xBC\x96\xEF\xBC\x96"
"\xEF\xBC\x96\xEF\xBC\x8D\xEF\xBC\x97\xEF\xBC\x97\xEF\xBC\x97"
"\xEF\xBC\x97", RegionCode::US()),
NumberTest("4156667777", RegionCode::US()),
NumberTest("4156667777 x 123", RegionCode::US()),
NumberTest("415-666-7777", RegionCode::US()),
NumberTest("415/666-7777", RegionCode::US()),
NumberTest("415-666-7777 ext. 503", RegionCode::US()),
NumberTest("1 415 666 7777 x 123", RegionCode::US()),
NumberTest("+1 415-666-7777", RegionCode::US()),
NumberTest("+494949 49", RegionCode::DE()),
NumberTest("+49-49-34", RegionCode::DE()),
NumberTest("+49-4931-49", RegionCode::DE()),
NumberTest("04931-49", RegionCode::DE()),
NumberTest("+49-494949", RegionCode::DE()),
NumberTest("+49-494949 ext. 49", RegionCode::DE()),
NumberTest("+49494949 ext. 49", RegionCode::DE()),
NumberTest("0494949", RegionCode::DE()),
NumberTest("0494949 ext. 49", RegionCode::DE()),
NumberTest("01 (33) 3461 2234", RegionCode::MX()),
NumberTest("(33) 3461 2234", RegionCode::MX()),
#ifdef I18N_PHONENUMBERS_USE_ALTERNATE_FORMATS
NumberTest("1800-10-10 22", RegionCode::AU()),
NumberTest("0900-1 123 123", RegionCode::DE()),
NumberTest("(0)900-1 123 123", RegionCode::DE()),
NumberTest("0 900-1 123 123", RegionCode::DE()),
#endif
NumberTest("+33 3 34 23 12", RegionCode::FR()),
};
TEST_F(PhoneNumberMatcherTest, MatchesWithPossibleLeniency) {
std::vector<NumberTest> test_cases;
test_cases.insert(test_cases.begin(), kPossibleOnlyCases,
kPossibleOnlyCases + arraysize(kPossibleOnlyCases));
test_cases.insert(test_cases.begin(), kValidCases,
kValidCases + arraysize(kValidCases));
test_cases.insert(
test_cases.begin(), kStrictGroupingCases,
kStrictGroupingCases + arraysize(kStrictGroupingCases));
test_cases.insert(test_cases.begin(), kExactGroupingCases,
kExactGroupingCases + arraysize(kExactGroupingCases));
DoTestNumberMatchesForLeniency(test_cases, PhoneNumberMatcher::POSSIBLE);
}
TEST_F(PhoneNumberMatcherTest, NonMatchesWithPossibleLeniency) {
std::vector<NumberTest> test_cases;
test_cases.insert(test_cases.begin(), kImpossibleCases,
kImpossibleCases + arraysize(kImpossibleCases));
DoTestNumberNonMatchesForLeniency(test_cases, PhoneNumberMatcher::POSSIBLE);
}
TEST_F(PhoneNumberMatcherTest, MatchesWithValidLeniency) {
std::vector<NumberTest> test_cases;
test_cases.insert(test_cases.begin(), kValidCases,
kValidCases + arraysize(kValidCases));
test_cases.insert(
test_cases.begin(), kStrictGroupingCases,
kStrictGroupingCases + arraysize(kStrictGroupingCases));
test_cases.insert(test_cases.begin(), kExactGroupingCases,
kExactGroupingCases + arraysize(kExactGroupingCases));
DoTestNumberMatchesForLeniency(test_cases, PhoneNumberMatcher::VALID);
}
TEST_F(PhoneNumberMatcherTest, NonMatchesWithValidLeniency) {
std::vector<NumberTest> test_cases;
test_cases.insert(test_cases.begin(), kImpossibleCases,
kImpossibleCases + arraysize(kImpossibleCases));
test_cases.insert(test_cases.begin(), kPossibleOnlyCases,
kPossibleOnlyCases + arraysize(kPossibleOnlyCases));
DoTestNumberNonMatchesForLeniency(test_cases, PhoneNumberMatcher::VALID);
}
TEST_F(PhoneNumberMatcherTest, MatchesWithStrictGroupingLeniency) {
std::vector<NumberTest> test_cases;
test_cases.insert(
test_cases.begin(), kStrictGroupingCases,
kStrictGroupingCases + arraysize(kStrictGroupingCases));
test_cases.insert(test_cases.begin(), kExactGroupingCases,
kExactGroupingCases + arraysize(kExactGroupingCases));
DoTestNumberMatchesForLeniency(test_cases,
PhoneNumberMatcher::STRICT_GROUPING);
}
TEST_F(PhoneNumberMatcherTest, NonMatchesWithStrictGroupingLeniency) {
std::vector<NumberTest> test_cases;
test_cases.insert(test_cases.begin(), kImpossibleCases,
kImpossibleCases + arraysize(kImpossibleCases));
test_cases.insert(test_cases.begin(), kPossibleOnlyCases,
kPossibleOnlyCases + arraysize(kPossibleOnlyCases));
test_cases.insert(test_cases.begin(), kValidCases,
kValidCases + arraysize(kValidCases));
DoTestNumberNonMatchesForLeniency(test_cases,
PhoneNumberMatcher::STRICT_GROUPING);
}
TEST_F(PhoneNumberMatcherTest, MatchesWithExactGroupingLeniency) {
std::vector<NumberTest> test_cases;
test_cases.insert(test_cases.begin(), kExactGroupingCases,
kExactGroupingCases + arraysize(kExactGroupingCases));
DoTestNumberMatchesForLeniency(test_cases,
PhoneNumberMatcher::EXACT_GROUPING);
}
TEST_F(PhoneNumberMatcherTest, NonMatchesWithExactGroupingLeniency) {
std::vector<NumberTest> test_cases;
test_cases.insert(test_cases.begin(), kImpossibleCases,
kImpossibleCases + arraysize(kImpossibleCases));
test_cases.insert(test_cases.begin(), kPossibleOnlyCases,
kPossibleOnlyCases + arraysize(kPossibleOnlyCases));
test_cases.insert(test_cases.begin(), kValidCases,
kValidCases + arraysize(kValidCases));
test_cases.insert(
test_cases.begin(), kStrictGroupingCases,
kStrictGroupingCases + arraysize(kStrictGroupingCases));
DoTestNumberNonMatchesForLeniency(test_cases,
PhoneNumberMatcher::EXACT_GROUPING);
}
TEST_F(PhoneNumberMatcherTest, ExtractMatchIgnoresAmericanDates) {
PhoneNumberMatch match;
string text = "As I said on 03/10/2011, you may call me at ";
EXPECT_FALSE(ExtractMatch(text, &match));
text = "As I said on 03/27/2011, you may call me at ";
EXPECT_FALSE(ExtractMatch(text, &match));
text = "As I said on 31/8/2011, you may call me at ";
EXPECT_FALSE(ExtractMatch(text, &match));
text = "As I said on 1/12/2011, you may call me at ";
EXPECT_FALSE(ExtractMatch(text, &match));
text = "I was born on 10/12/82. Please call me at ";
EXPECT_FALSE(ExtractMatch(text, &match));
}
TEST_F(PhoneNumberMatcherTest, NonMatchingBracketsAreInvalid) {
scoped_ptr<PhoneNumberMatcher> matcher(GetMatcherWithLeniency(
"80.585 [79.964, 81.191]", RegionCode::US(),
PhoneNumberMatcher::VALID));
EXPECT_FALSE(matcher->HasNext());
matcher.reset(GetMatcherWithLeniency(
"80.585 [79.964]", RegionCode::US(), PhoneNumberMatcher::VALID));
EXPECT_FALSE(matcher->HasNext());
matcher.reset(GetMatcherWithLeniency(
"80.585 ((79.964)", RegionCode::US(), PhoneNumberMatcher::VALID));
EXPECT_FALSE(matcher->HasNext());
matcher.reset(GetMatcherWithLeniency(
"(80).(585) (79).(9)64", RegionCode::US(), PhoneNumberMatcher::VALID));
EXPECT_FALSE(matcher->HasNext());
}
TEST_F(PhoneNumberMatcherTest, NoMatchIfRegionIsUnknown) {
scoped_ptr<PhoneNumberMatcher> matcher(GetMatcherWithLeniency(
"Random text body - number is 0331 6005, see you there",
RegionCode::ZZ(), PhoneNumberMatcher::VALID));
EXPECT_FALSE(matcher->HasNext());
}
TEST_F(PhoneNumberMatcherTest, NoMatchInEmptyString) {
scoped_ptr<PhoneNumberMatcher> matcher(GetMatcherWithLeniency(
"", RegionCode::US(), PhoneNumberMatcher::VALID));
EXPECT_FALSE(matcher->HasNext());
matcher.reset(GetMatcherWithLeniency(" ", RegionCode::US(),
PhoneNumberMatcher::VALID));
EXPECT_FALSE(matcher->HasNext());
}
TEST_F(PhoneNumberMatcherTest, NoMatchIfNoNumber) {
scoped_ptr<PhoneNumberMatcher> matcher(GetMatcherWithLeniency(
"Random text body - number is foobar, see you there", RegionCode::US(),
PhoneNumberMatcher::VALID));
EXPECT_FALSE(matcher->HasNext());
}
TEST_F(PhoneNumberMatcherTest, NoErrorWithSpecialCharacters) {
string stringWithSpecialCharacters =
"Myfuzzvar1152: \"My info:%415-666-7777 123 fake street\"\nfuzzvar1155: "
"47\nfuzzvar1158: %415-666-1234 "
"i18n_phonenumbers_Pho\356eNumberMatcher_Leniency_VALID_1"
"\nfuzzvar1159: 20316 info:%415-666-7777 123 fake str79ee\nt";
string Numbers;
for (int i = 0; i < 100; ++i)
Numbers.append(stringWithSpecialCharacters);
scoped_ptr<PhoneNumberMatcher> matcher(
GetMatcherWithLeniency(Numbers, RegionCode::US(),
PhoneNumberMatcher::POSSIBLE));
EXPECT_FALSE(matcher->HasNext());
}
TEST_F(PhoneNumberMatcherTest, Sequences) {
const string text = "Call 033316005 or 032316005!";
const string& region = RegionCode::NZ();
PhoneNumber number1;
number1.set_country_code(phone_util_.GetCountryCodeForRegion(region));
number1.set_national_number(33316005ULL);
PhoneNumberMatch match1(5, "033316005", number1);
PhoneNumber number2;
number2.set_country_code(phone_util_.GetCountryCodeForRegion(region));
number2.set_national_number(32316005ULL);
PhoneNumberMatch match2(19, "032316005", number2);
PhoneNumberMatcher matcher(
phone_util_, text, region, PhoneNumberMatcher::POSSIBLE, 100);
PhoneNumberMatch actual_match1;
PhoneNumberMatch actual_match2;
matcher.Next(&actual_match1);
matcher.Next(&actual_match2);
EXPECT_TRUE(match1.Equals(actual_match1));
EXPECT_TRUE(match2.Equals(actual_match2));
}
TEST_F(PhoneNumberMatcherTest, MaxMatches) {
string numbers;
for (int i = 0; i < 100; ++i) {
numbers.append("My info: 415-666-7777,");
}
PhoneNumber number;
phone_util_.Parse("+14156667777", RegionCode::US(), &number);
std::vector<PhoneNumber> expected(100, number);
PhoneNumberMatcher matcher(
phone_util_, numbers, RegionCode::US(), PhoneNumberMatcher::VALID, 10);
std::vector<PhoneNumber> actual;
PhoneNumberMatch match;
while (matcher.HasNext()) {
matcher.Next(&match);
actual.push_back(match.number());
}
EXPECT_EQ(expected, actual);
}
TEST_F(PhoneNumberMatcherTest, MaxMatchesInvalid) {
string numbers;
for (int i = 0; i < 10; ++i) {
numbers.append("My address 949-8945-0");
}
for (int i = 0; i < 100; ++i) {
numbers.append("My info: 415-666-7777,");
}
PhoneNumberMatcher matcher(
phone_util_, numbers, RegionCode::US(), PhoneNumberMatcher::VALID, 10);
EXPECT_FALSE(matcher.HasNext());
}
TEST_F(PhoneNumberMatcherTest, MaxMatchesMixed) {
string numbers;
for (int i = 0; i < 100; ++i) {
numbers.append("My info: 415-666-7777 123 fake street");
}
PhoneNumber number;
phone_util_.Parse("+14156667777", RegionCode::ZZ(), &number);
std::vector<PhoneNumber> expected(10, number);
PhoneNumberMatcher matcher(
phone_util_, numbers, RegionCode::US(), PhoneNumberMatcher::VALID, 10);
std::vector<PhoneNumber> actual;
PhoneNumberMatch match;
while (matcher.HasNext()) {
matcher.Next(&match);
actual.push_back(match.number());
}
EXPECT_EQ(expected, actual);
}
TEST_F(PhoneNumberMatcherTest, NonPlusPrefixedNumbersNotFoundForInvalidRegion) {
PhoneNumberMatch match;
scoped_ptr<PhoneNumberMatcher> matcher(
GetMatcherWithLeniency("1 456 764 156", RegionCode::GetUnknown(),
PhoneNumberMatcher::VALID));
EXPECT_FALSE(matcher->HasNext());
EXPECT_FALSE(matcher->Next(&match));
EXPECT_FALSE(matcher->HasNext());
}
TEST_F(PhoneNumberMatcherTest, EmptyIteration) {
PhoneNumberMatch match;
scoped_ptr<PhoneNumberMatcher> matcher(
GetMatcherWithLeniency("", RegionCode::GetUnknown(),
PhoneNumberMatcher::VALID));
EXPECT_FALSE(matcher->HasNext());
EXPECT_FALSE(matcher->HasNext());
EXPECT_FALSE(matcher->Next(&match));
EXPECT_FALSE(matcher->HasNext());
}
TEST_F(PhoneNumberMatcherTest, SingleIteration) {
PhoneNumberMatch match;
scoped_ptr<PhoneNumberMatcher> matcher(
GetMatcherWithLeniency("+14156667777", RegionCode::GetUnknown(),
PhoneNumberMatcher::VALID));
EXPECT_TRUE(matcher->HasNext());
EXPECT_TRUE(matcher->HasNext());
EXPECT_TRUE(matcher->Next(&match));
EXPECT_FALSE(matcher->HasNext());
EXPECT_FALSE(matcher->Next(&match));
}
TEST_F(PhoneNumberMatcherTest, SingleIteration_WithNextOnly) {
PhoneNumberMatch match;
scoped_ptr<PhoneNumberMatcher> matcher(
GetMatcherWithLeniency("+14156667777", RegionCode::GetUnknown(),
PhoneNumberMatcher::VALID));
EXPECT_TRUE(matcher->Next(&match));
EXPECT_FALSE(matcher->Next(&match));
}
TEST_F(PhoneNumberMatcherTest, DoubleIteration) {
PhoneNumberMatch match;
scoped_ptr<PhoneNumberMatcher> matcher(
GetMatcherWithLeniency("+14156667777 foobar +14156667777 ",
RegionCode::GetUnknown(),
PhoneNumberMatcher::VALID));
EXPECT_TRUE(matcher->HasNext());
EXPECT_TRUE(matcher->HasNext());
EXPECT_TRUE(matcher->Next(&match));
EXPECT_TRUE(matcher->HasNext());
EXPECT_TRUE(matcher->HasNext());
EXPECT_TRUE(matcher->Next(&match));
EXPECT_FALSE(matcher->HasNext());
EXPECT_FALSE(matcher->Next(&match));
EXPECT_FALSE(matcher->HasNext());
}
TEST_F(PhoneNumberMatcherTest, DoubleIteration_WithNextOnly) {
PhoneNumberMatch match;
scoped_ptr<PhoneNumberMatcher> matcher(
GetMatcherWithLeniency("+14156667777 foobar +14156667777 ",
RegionCode::GetUnknown(),
PhoneNumberMatcher::VALID));
EXPECT_TRUE(matcher->Next(&match));
EXPECT_TRUE(matcher->Next(&match));
EXPECT_FALSE(matcher->Next(&match));
}
}
} | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/src/phonenumbers/phonenumbermatcher.cc | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/test/phonenumbers/phonenumbermatcher_test.cc | 9aa9aaa39ad8098aef56071d2df4f6f8d251c98b |
7d0598b4-bb58-4832-ae50-e5f28c105d5d | cpp | tensorflow/tensorflow | acceleration_test_util_internal | tensorflow/lite/kernels/acceleration_test_util_internal.cc | tensorflow/lite/kernels/acceleration_test_util_internal_test.cc | #include "tensorflow/lite/kernels/acceleration_test_util_internal.h"
#include <ctype.h>
#include <algorithm>
#include <functional>
#include <iterator>
#include <sstream>
#include <string>
namespace tflite {
void ReadAccelerationConfig(
const char* config,
const std::function<void(std::string, std::string, bool)>& consumer) {
if (config) {
std::istringstream istream{config};
std::string curr_config_line;
while (std::getline(istream, curr_config_line)) {
curr_config_line.erase(
curr_config_line.begin(),
std::find_if_not(curr_config_line.begin(), curr_config_line.end(),
[](int ch) { return std::isspace(ch); }));
if (curr_config_line.empty() || curr_config_line.at(0) == '#') {
continue;
}
auto first_sep_pos =
std::find(curr_config_line.begin(), curr_config_line.end(), ',');
bool is_denylist = false;
std::string key = curr_config_line;
std::string value{};
if (first_sep_pos != curr_config_line.end()) {
key = std::string(curr_config_line.begin(), first_sep_pos);
value = std::string(first_sep_pos + 1, curr_config_line.end());
}
if (key[0] == '-') {
key = key.substr(1);
is_denylist = true;
}
consumer(key, value, is_denylist);
}
}
}
} | #include "tensorflow/lite/kernels/acceleration_test_util_internal.h"
#include <functional>
#include <optional>
#include <string>
#include <unordered_map>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
using ::testing::Eq;
using ::testing::Not;
using ::testing::Test;
struct SimpleConfig {
public:
static constexpr char kAccelerationTestConfig[] =
R"(
#test-id,some-other-data
test-1,data-1
test-2,
test-3,data-3
test-4.*,data-4
-test-5
test-6
test-7,data-7
)";
static const char* AccelerationTestConfig() {
return kAccelerationTestConfig;
}
static SimpleConfig ParseConfigurationLine(const std::string& conf_line) {
return {conf_line};
}
std::string value;
};
class ReadAccelerationConfigTest : public ::testing::Test {
public:
std::unordered_map<std::string, SimpleConfig> allowlist_;
std::unordered_map<std::string, SimpleConfig> denylist_;
std::function<void(std::string, std::string, bool)> consumer_ =
[this](std::string key, std::string value, bool is_denylist) {
if (is_denylist) {
denylist_[key] = {value};
} else {
allowlist_[key] = {value};
}
};
};
TEST_F(ReadAccelerationConfigTest, ReadsAKeyOnlyLine) {
ReadAccelerationConfig("key", consumer_);
EXPECT_THAT(allowlist_.find("key"), Not(Eq(allowlist_.end())));
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, ReadsADenylistKeyOnlyLine) {
ReadAccelerationConfig("-key", consumer_);
EXPECT_THAT(denylist_.find("key"), Not(Eq(allowlist_.end())));
EXPECT_TRUE(allowlist_.empty());
}
TEST_F(ReadAccelerationConfigTest, ReadsAKeyValueLine) {
ReadAccelerationConfig("key,value", consumer_);
EXPECT_THAT(allowlist_["key"].value, Eq("value"));
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, ReadsADenyListKeyValueLine) {
ReadAccelerationConfig("-key,value", consumer_);
EXPECT_THAT(denylist_["key"].value, Eq("value"));
EXPECT_TRUE(allowlist_.empty());
}
TEST_F(ReadAccelerationConfigTest, KeysAreLeftTrimmed) {
ReadAccelerationConfig(" key,value", consumer_);
EXPECT_THAT(allowlist_["key"].value, Eq("value"));
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, BlKeysAreLeftTrimmed) {
ReadAccelerationConfig(" -key,value", consumer_);
EXPECT_THAT(denylist_["key"].value, Eq("value"));
EXPECT_TRUE(allowlist_.empty());
}
TEST_F(ReadAccelerationConfigTest, IgnoresCommentedLines) {
ReadAccelerationConfig("#key,value", consumer_);
EXPECT_TRUE(allowlist_.empty());
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, CommentCanHaveTrailingBlanks) {
ReadAccelerationConfig(" #key,value", consumer_);
EXPECT_TRUE(allowlist_.empty());
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, CommentsAreOnlyForTheFullLine) {
ReadAccelerationConfig("key,value #comment", consumer_);
EXPECT_THAT(allowlist_["key"].value, Eq("value #comment"));
}
TEST_F(ReadAccelerationConfigTest, IgnoresEmptyLines) {
ReadAccelerationConfig("", consumer_);
EXPECT_TRUE(allowlist_.empty());
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, ParsesMultipleLines) {
ReadAccelerationConfig("key1,value1\nkey2,value2\n-key3,value3", consumer_);
EXPECT_THAT(allowlist_["key1"].value, Eq("value1"));
EXPECT_THAT(allowlist_["key2"].value, Eq("value2"));
EXPECT_THAT(denylist_["key3"].value, Eq("value3"));
}
TEST_F(ReadAccelerationConfigTest, ParsesMultipleLinesWithCommentsAndSpaces) {
ReadAccelerationConfig("key1,value1\n#comment\n\nkey2,value2", consumer_);
EXPECT_THAT(allowlist_["key1"].value, Eq("value1"));
EXPECT_THAT(allowlist_["key2"].value, Eq("value2"));
}
TEST_F(ReadAccelerationConfigTest, ParsesMultipleLinesWithMissingConfigValues) {
ReadAccelerationConfig("key1\nkey2,value2\nkey3\nkey4,value4", consumer_);
EXPECT_THAT(allowlist_["key1"].value, Eq(""));
EXPECT_THAT(allowlist_["key2"].value, Eq("value2"));
EXPECT_THAT(allowlist_["key3"].value, Eq(""));
EXPECT_THAT(allowlist_["key4"].value, Eq("value4"));
}
TEST(GetAccelerationTestParam, LoadsTestConfig) {
const auto config_value_maybe =
GetAccelerationTestParam<SimpleConfig>("test-3");
ASSERT_TRUE(config_value_maybe.has_value());
ASSERT_THAT(config_value_maybe.value().value, Eq("data-3"));
}
TEST(GetAccelerationTestParam, LoadsTestConfigWithEmptyValue) {
const auto config_value_maybe =
GetAccelerationTestParam<SimpleConfig>("test-2");
ASSERT_TRUE(config_value_maybe.has_value());
ASSERT_THAT(config_value_maybe.value().value, Eq(""));
}
TEST(GetAccelerationTestParam, SupportsWildcards) {
const auto config_value_maybe =
GetAccelerationTestParam<SimpleConfig>("test-41");
ASSERT_TRUE(config_value_maybe.has_value());
ASSERT_THAT(config_value_maybe.value().value, Eq("data-4"));
}
TEST(GetAccelerationTestParam, SupportDenylist) {
const auto config_value_maybe =
GetAccelerationTestParam<SimpleConfig>("test-5");
ASSERT_FALSE(config_value_maybe.has_value());
}
struct UnmatchedSimpleConfig {
public:
static constexpr const char* kAccelerationTestConfig = nullptr;
static const char* AccelerationTestConfig() {
return kAccelerationTestConfig;
}
static UnmatchedSimpleConfig ParseConfigurationLine(
const std::string& conf_line) {
return {conf_line};
}
std::string value;
};
TEST(GetAccelerationTestParam, ReturnEmptyOptionalForNullConfig) {
ASSERT_FALSE(
GetAccelerationTestParam<UnmatchedSimpleConfig>("test-3").has_value());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/acceleration_test_util_internal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/acceleration_test_util_internal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3560fe2e-68b3-49ac-ba97-3b1490679161 | cpp | abseil/abseil-cpp | explicit_seed_seq | absl/random/internal/explicit_seed_seq.h | absl/random/internal/explicit_seed_seq_test.cc | #ifndef ABSL_RANDOM_INTERNAL_EXPLICIT_SEED_SEQ_H_
#define ABSL_RANDOM_INTERNAL_EXPLICIT_SEED_SEQ_H_
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <initializer_list>
#include <iterator>
#include <vector>
#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
class ExplicitSeedSeq {
public:
using result_type = uint32_t;
ExplicitSeedSeq() : state_() {}
ExplicitSeedSeq(const ExplicitSeedSeq& other) = default;
ExplicitSeedSeq& operator=(const ExplicitSeedSeq& other) = default;
ExplicitSeedSeq(ExplicitSeedSeq&& other) = default;
ExplicitSeedSeq& operator=(ExplicitSeedSeq&& other) = default;
template <typename Iterator>
ExplicitSeedSeq(Iterator begin, Iterator end) {
for (auto it = begin; it != end; it++) {
state_.push_back(*it & 0xffffffff);
}
}
template <typename T>
ExplicitSeedSeq(std::initializer_list<T> il)
: ExplicitSeedSeq(il.begin(), il.end()) {}
size_t size() const { return state_.size(); }
template <typename OutIterator>
void param(OutIterator out) const {
std::copy(std::begin(state_), std::end(state_), out);
}
template <typename OutIterator>
void generate(OutIterator begin, OutIterator end) {
for (size_t index = 0; begin != end; begin++) {
*begin = state_.empty() ? 0 : state_[index++];
if (index >= state_.size()) {
index = 0;
}
}
}
protected:
std::vector<uint32_t> state_;
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/internal/explicit_seed_seq.h"
#include <iterator>
#include <random>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/random/seed_sequences.h"
namespace {
using ::absl::random_internal::ExplicitSeedSeq;
template <typename Sseq>
bool ConformsToInterface() {
{ Sseq default_constructed_seq; }
{
uint32_t init_array[] = {1, 3, 5, 7, 9};
Sseq iterator_constructed_seq(init_array, &init_array[5]);
}
{ Sseq list_constructed_seq = {1, 3, 5, 7, 9, 11, 13}; }
{
uint32_t init_array[] = {1, 2, 3, 4, 5};
Sseq seq(init_array, &init_array[ABSL_ARRAYSIZE(init_array)]);
EXPECT_EQ(seq.size(), ABSL_ARRAYSIZE(init_array));
uint32_t state_array[ABSL_ARRAYSIZE(init_array)];
seq.param(state_array);
for (int i = 0; i < ABSL_ARRAYSIZE(state_array); i++) {
EXPECT_EQ(state_array[i], i + 1);
}
}
{
Sseq seq;
uint32_t seeds[5];
seq.generate(seeds, &seeds[ABSL_ARRAYSIZE(seeds)]);
}
return true;
}
}
TEST(SeedSequences, CheckInterfaces) {
EXPECT_TRUE(ConformsToInterface<std::seed_seq>());
EXPECT_TRUE(ConformsToInterface<ExplicitSeedSeq>());
}
TEST(ExplicitSeedSeq, DefaultConstructorGeneratesZeros) {
const size_t kNumBlocks = 128;
uint32_t outputs[kNumBlocks];
ExplicitSeedSeq seq;
seq.generate(outputs, &outputs[kNumBlocks]);
for (uint32_t& seed : outputs) {
EXPECT_EQ(seed, 0);
}
}
TEST(ExplicitSeeqSeq, SeedMaterialIsForwardedIdentically) {
const size_t kNumBlocks = 128;
uint32_t seed_material[kNumBlocks];
std::random_device urandom{"/dev/urandom"};
for (uint32_t& seed : seed_material) {
seed = urandom();
}
ExplicitSeedSeq seq(seed_material, &seed_material[kNumBlocks]);
{
const size_t kNumGenerated = kNumBlocks / 2;
uint32_t outputs[kNumGenerated];
seq.generate(outputs, &outputs[kNumGenerated]);
for (size_t i = 0; i < kNumGenerated; i++) {
EXPECT_EQ(outputs[i], seed_material[i]);
}
}
{
const size_t kNumGenerated = kNumBlocks;
uint32_t outputs[kNumGenerated];
seq.generate(outputs, &outputs[kNumGenerated]);
for (size_t i = 0; i < kNumGenerated; i++) {
EXPECT_EQ(outputs[i], seed_material[i]);
}
}
{
const size_t kNumGenerated = kNumBlocks * 2;
uint32_t outputs[kNumGenerated];
seq.generate(outputs, &outputs[kNumGenerated]);
for (size_t i = 0; i < kNumGenerated; i++) {
EXPECT_EQ(outputs[i], seed_material[i % kNumBlocks]);
}
}
}
TEST(ExplicitSeedSeq, CopyAndMoveConstructors) {
using testing::Each;
using testing::Eq;
using testing::Not;
using testing::Pointwise;
uint32_t entropy[4];
std::random_device urandom("/dev/urandom");
for (uint32_t& entry : entropy) {
entry = urandom();
}
ExplicitSeedSeq seq_from_entropy(std::begin(entropy), std::end(entropy));
{
ExplicitSeedSeq seq_copy(seq_from_entropy);
EXPECT_EQ(seq_copy.size(), seq_from_entropy.size());
std::vector<uint32_t> seeds_1(1000, 0);
std::vector<uint32_t> seeds_2(1000, 1);
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
seq_copy.generate(seeds_2.begin(), seeds_2.end());
EXPECT_THAT(seeds_1, Pointwise(Eq(), seeds_2));
}
{
for (uint32_t& entry : entropy) {
entry = urandom();
}
ExplicitSeedSeq another_seq(std::begin(entropy), std::end(entropy));
std::vector<uint32_t> seeds_1(1000, 0);
std::vector<uint32_t> seeds_2(1000, 0);
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
another_seq.generate(seeds_2.begin(), seeds_2.end());
EXPECT_THAT(seeds_1, Not(Pointwise(Eq(), seeds_2)));
#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstringop-overflow"
#endif
another_seq = seq_from_entropy;
#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
#pragma GCC diagnostic pop
#endif
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
another_seq.generate(seeds_2.begin(), seeds_2.end());
EXPECT_THAT(seeds_1, Pointwise(Eq(), seeds_2));
}
{
std::vector<uint32_t> seeds_1(1000, 0);
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
absl::random_internal::ExplicitSeedSeq moved_seq(
std::move(seq_from_entropy));
std::vector<uint32_t> seeds_2(1000, 1);
moved_seq.generate(seeds_2.begin(), seeds_2.end());
EXPECT_THAT(seeds_1, Pointwise(Eq(), seeds_2));
EXPECT_EQ(seq_from_entropy.size(), 0);
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
EXPECT_THAT(seeds_1, Each(Eq(0)));
}
}
TEST(ExplicitSeedSeq, StdURBGGoldenTests) {
{
ExplicitSeedSeq seed_sequence{12, 34, 56};
std::minstd_rand rng(seed_sequence);
std::minstd_rand::result_type values[4] = {rng(), rng(), rng(), rng()};
EXPECT_THAT(values,
testing::ElementsAre(579252, 43785881, 464353103, 1501811174));
}
{
ExplicitSeedSeq seed_sequence{12, 34, 56};
std::mt19937 rng(seed_sequence);
std::mt19937::result_type values[4] = {rng(), rng(), rng(), rng()};
EXPECT_THAT(values, testing::ElementsAre(138416803, 151130212, 33817739,
138416803));
}
{
ExplicitSeedSeq seed_sequence{12, 34, 56};
std::mt19937_64 rng(seed_sequence);
std::mt19937_64::result_type values[4] = {rng(), rng(), rng(), rng()};
EXPECT_THAT(values,
testing::ElementsAre(19738651785169348, 1464811352364190456,
18054685302720800, 19738651785169348));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/explicit_seed_seq.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/explicit_seed_seq_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
b7ffdda7-6fd5-418d-bc6a-e72a47e2948e | cpp | google/cel-cpp | basic_struct_type | common/types/basic_struct_type.cc | common/types/basic_struct_type_test.cc | #include <array>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "common/type.h"
namespace cel {
bool IsWellKnownMessageType(absl::string_view name) {
static constexpr absl::string_view kPrefix = "google.protobuf.";
static constexpr std::array<absl::string_view, 15> kNames = {
"Any",
"BoolValue",
"BytesValue",
"DoubleValue",
"Duration",
"FloatValue",
"Int32Value",
"Int64Value",
"ListValue",
"StringValue",
"Struct",
"Timestamp",
"UInt32Value",
"UInt64Value",
"Value",
};
if (!absl::ConsumePrefix(&name, kPrefix)) {
return false;
}
return absl::c_binary_search(kNames, name);
}
} | #include "common/type.h"
#include "common/type_kind.h"
#include "internal/testing.h"
namespace cel::common_internal {
namespace {
using ::testing::Eq;
using ::testing::IsEmpty;
TEST(BasicStructType, Kind) {
EXPECT_EQ(BasicStructType::kind(), TypeKind::kStruct);
}
TEST(BasicStructType, Default) {
BasicStructType type;
EXPECT_FALSE(type);
EXPECT_THAT(type.DebugString(), Eq(""));
EXPECT_EQ(type, BasicStructType());
}
TEST(BasicStructType, Name) {
BasicStructType type = MakeBasicStructType("test.Struct");
EXPECT_TRUE(type);
EXPECT_THAT(type.name(), Eq("test.Struct"));
EXPECT_THAT(type.DebugString(), Eq("test.Struct"));
EXPECT_THAT(type.GetParameters(), IsEmpty());
EXPECT_NE(type, BasicStructType());
EXPECT_NE(BasicStructType(), type);
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/basic_struct_type.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/basic_struct_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
f8e58a8a-64b4-41fc-96b5-e19ca6c2683f | cpp | google/tsl | retrying_utils | tsl/platform/retrying_utils.cc | tsl/platform/retrying_utils_test.cc | #include "tsl/platform/retrying_utils.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <limits>
#include "absl/time/time.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/random.h"
namespace tsl {
namespace {
bool IsRetriable(absl::StatusCode code) {
switch (code) {
case absl::StatusCode::kUnavailable:
case absl::StatusCode::kDeadlineExceeded:
case absl::StatusCode::kUnknown:
return true;
default:
return false;
}
}
double GenerateUniformRandomNumber() {
return random::New64() * (1.0 / std::numeric_limits<uint64_t>::max());
}
double GenerateUniformRandomNumberBetween(double a, double b) {
if (a == b) return a;
DCHECK_LT(a, b);
return a + GenerateUniformRandomNumber() * (b - a);
}
}
absl::Status RetryingUtils::CallWithRetries(
const std::function<absl::Status()>& f, const RetryConfig& config) {
return CallWithRetries(
f,
[](int64_t micros) {
return Env::Default()->SleepForMicroseconds(micros);
},
config);
}
absl::Status RetryingUtils::CallWithRetries(
const std::function<absl::Status()>& f,
const std::function<void(int64_t)>& sleep_usec, const RetryConfig& config) {
int retries = 0;
while (true) {
auto status = f();
if (!IsRetriable(status.code())) {
return status;
}
if (retries >= config.max_retries) {
return absl::Status(
absl::StatusCode::kAborted,
strings::StrCat(
"All ", config.max_retries,
" retry attempts failed. The last failure: ", status.message()));
}
int64_t delay_micros = 0;
if (config.init_delay_time_us > 0) {
const int64_t random_micros = random::New64() % 1000000;
delay_micros = std::min(config.init_delay_time_us << retries,
config.max_delay_time_us) +
random_micros;
}
VLOG(1) << "The operation failed and will be automatically retried in "
<< (delay_micros / 1000000.0) << " seconds (attempt "
<< (retries + 1) << " out of " << config.max_retries
<< "), caused by: " << status.ToString();
sleep_usec(delay_micros);
retries++;
}
}
absl::Status RetryingUtils::DeleteWithRetries(
const std::function<absl::Status()>& delete_func,
const RetryConfig& config) {
bool is_retried = false;
return RetryingUtils::CallWithRetries(
[delete_func, &is_retried]() {
const absl::Status status = delete_func();
if (is_retried && status.code() == error::NOT_FOUND) {
return absl::OkStatus();
}
is_retried = true;
return status;
},
config);
}
absl::Duration ComputeRetryBackoff(int current_retry_attempt,
absl::Duration min_delay,
absl::Duration max_delay) {
DCHECK_GE(current_retry_attempt, 0);
constexpr double kBackoffBase = 1.3;
constexpr double kBackoffRandMult = 0.4;
const absl::Duration first_term = min_delay * kBackoffRandMult;
absl::Duration uncapped_second_term =
min_delay * std::pow(kBackoffBase, current_retry_attempt);
absl::Duration second_term =
std::min(uncapped_second_term, max_delay - first_term);
second_term *=
GenerateUniformRandomNumberBetween(1.0 - kBackoffRandMult, 1.0);
return std::max(first_term + second_term, min_delay);
}
} | #include "tsl/platform/retrying_utils.h"
#include <cmath>
#include <fstream>
#include "absl/time/time.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
TEST(RetryingUtilsTest, CallWithRetries_RetryDelays) {
std::vector<double> requested_delays;
std::function<void(int64_t)> sleep = [&requested_delays](int64_t delay) {
requested_delays.emplace_back(delay / 1000000.0);
};
std::function<absl::Status()> f = []() {
return errors::Unavailable("Failed.");
};
const auto& status = RetryingUtils::CallWithRetries(
f, sleep, RetryConfig(500000 ));
EXPECT_TRUE(errors::IsAborted(status));
EXPECT_TRUE(absl::StrContains(
status.message(),
"All 10 retry attempts failed. The last failure: Failed."))
<< status;
EXPECT_EQ(10, requested_delays.size());
EXPECT_NEAR(0.5, requested_delays[0], 1.0);
EXPECT_NEAR(1.0, requested_delays[1], 1.0);
EXPECT_NEAR(2.0, requested_delays[2], 1.0);
EXPECT_NEAR(4.0, requested_delays[3], 1.0);
EXPECT_NEAR(8.0, requested_delays[4], 1.0);
EXPECT_NEAR(16.0, requested_delays[5], 1.0);
EXPECT_NEAR(32.0, requested_delays[6], 1.0);
EXPECT_NEAR(32.0, requested_delays[7], 1.0);
EXPECT_NEAR(32.0, requested_delays[8], 1.0);
EXPECT_NEAR(32.0, requested_delays[9], 1.0);
}
TEST(RetryingUtilsTest, CallWithRetries_NotFoundIsNotRetried) {
std::vector<absl::Status> results(
{errors::Unavailable("Failed."), errors::NotFound("Not found.")});
std::function<absl::Status()> f = [&results]() {
auto result = results[0];
results.erase(results.begin());
return result;
};
EXPECT_TRUE(errors::IsNotFound(RetryingUtils::CallWithRetries(
f, RetryConfig(0 ))));
}
TEST(RetryingUtilsTest, CallWithRetries_ImmediateSuccess) {
std::vector<absl::Status> results({absl::OkStatus()});
std::function<void(int64_t)> sleep = [](int64_t delay) {
ADD_FAILURE() << "Unexpected call to sleep.";
};
std::function<absl::Status()> f = [&results]() {
auto result = results[0];
results.erase(results.begin());
return result;
};
TF_EXPECT_OK(RetryingUtils::CallWithRetries(
f, sleep, RetryConfig(1L )));
}
TEST(RetryingUtilsTest, CallWithRetries_EventualSuccess) {
std::vector<absl::Status> results({errors::Unavailable("Failed."),
errors::Unavailable("Failed again."),
absl::OkStatus()});
std::function<absl::Status()> f = [&results]() {
auto result = results[0];
results.erase(results.begin());
return result;
};
TF_EXPECT_OK(RetryingUtils::CallWithRetries(
f, RetryConfig(0 )));
}
TEST(RetryingUtilsTest, DeleteWithRetries_ImmediateSuccess) {
std::vector<absl::Status> delete_results({absl::OkStatus()});
const auto delete_func = [&delete_results]() {
auto result = delete_results[0];
delete_results.erase(delete_results.begin());
return result;
};
TF_EXPECT_OK(RetryingUtils::DeleteWithRetries(
delete_func, RetryConfig(0 )));
}
TEST(RetryingUtilsTest, DeleteWithRetries_EventualSuccess) {
std::vector<absl::Status> delete_results(
{errors::Unavailable(""), absl::OkStatus()});
const auto delete_func = [&delete_results]() {
auto result = delete_results[0];
delete_results.erase(delete_results.begin());
return result;
};
TF_EXPECT_OK(RetryingUtils::DeleteWithRetries(
delete_func, RetryConfig(0 )));
}
TEST(RetryingUtilsTest, DeleteWithRetries_PermissionDeniedNotRetried) {
std::vector<absl::Status> delete_results(
{errors::Unavailable(""), errors::PermissionDenied("")});
const auto delete_func = [&delete_results]() {
auto result = delete_results[0];
delete_results.erase(delete_results.begin());
return result;
};
EXPECT_TRUE(errors::IsPermissionDenied(RetryingUtils::DeleteWithRetries(
delete_func, RetryConfig(0 ))));
}
TEST(RetryingUtilsTest, DeleteWithRetries_SuccessThroughFileNotFound) {
std::vector<absl::Status> delete_results(
{errors::Unavailable(""), errors::NotFound("")});
const auto delete_func = [&delete_results]() {
auto result = delete_results[0];
delete_results.erase(delete_results.begin());
return result;
};
TF_EXPECT_OK(RetryingUtils::DeleteWithRetries(
delete_func, RetryConfig(0 )));
}
TEST(RetryingUtilsTest, DeleteWithRetries_FirstNotFoundReturnedAsIs) {
std::vector<absl::Status> delete_results({errors::NotFound("")});
const auto delete_func = [&delete_results]() {
auto result = delete_results[0];
delete_results.erase(delete_results.begin());
return result;
};
EXPECT_EQ(error::NOT_FOUND,
RetryingUtils::DeleteWithRetries(
delete_func, RetryConfig(0 ))
.code());
}
TEST(RetryingUtilsTest, ComputeRetryBackoff) {
for (int i = 0; i < 30; ++i) {
EXPECT_LE(0.4 * absl::Milliseconds(1) +
0.6 * absl::Milliseconds(1) * std::pow(1.3, i),
ComputeRetryBackoff(i));
EXPECT_LE(
ComputeRetryBackoff(i),
0.4 * absl::Milliseconds(1) + absl::Milliseconds(1) * std::pow(1.3, i));
}
}
TEST(RetryingUtilsTest, ComputeRetryBackoff_MinMaxDelays) {
for (int i = 0; i < 30; ++i) {
EXPECT_EQ(ComputeRetryBackoff(i,
absl::Seconds(10)),
absl::Seconds(10));
EXPECT_EQ(ComputeRetryBackoff(i,
absl::Microseconds(1),
absl::Microseconds(1)),
absl::Microseconds(1));
}
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/retrying_utils.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/retrying_utils_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
1c8ca7d3-f69b-4413-a156-dc617e13bd25 | cpp | tensorflow/tensorflow | mfcc_dct | tensorflow/lite/kernels/internal/mfcc_dct.cc | tensorflow/core/kernels/mfcc_dct_test.cc | #include "tensorflow/lite/kernels/internal/mfcc_dct.h"
#include <math.h>
namespace tflite {
namespace internal {
MfccDct::MfccDct() : initialized_(false) {}
bool MfccDct::Initialize(int input_length, int coefficient_count) {
coefficient_count_ = coefficient_count;
input_length_ = input_length;
if (coefficient_count_ < 1) {
return false;
}
if (input_length < 1) {
return false;
}
if (coefficient_count_ > input_length_) {
return false;
}
cosines_.resize(coefficient_count_);
double fnorm = sqrt(2.0 / input_length_);
const double pi = atan(1.0) * 4.0;
double arg = pi / input_length_;
for (int i = 0; i < coefficient_count_; ++i) {
cosines_[i].resize(input_length_);
for (int j = 0; j < input_length_; ++j) {
cosines_[i][j] = fnorm * cos(i * arg * (j + 0.5));
}
}
initialized_ = true;
return true;
}
void MfccDct::Compute(const std::vector<double> &input,
std::vector<double> *output) const {
if (!initialized_) {
return;
}
output->resize(coefficient_count_);
int length = input.size();
if (length > input_length_) {
length = input_length_;
}
for (int i = 0; i < coefficient_count_; ++i) {
double sum = 0.0;
for (int j = 0; j < length; ++j) {
sum += cosines_[i][j] * input[j];
}
(*output)[i] = sum;
}
}
}
} | #include "tensorflow/core/kernels/mfcc_dct.h"
#include <vector>
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
TEST(MfccDctTest, AgreesWithMatlab) {
MfccDct dct;
std::vector<double> input = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0};
const int kCoefficientCount = 6;
ASSERT_TRUE(dct.Initialize(input.size(), kCoefficientCount));
std::vector<double> output;
dct.Compute(input, &output);
std::vector<double> expected = {12.1243556530, -4.1625617959, 0.0,
-0.4082482905, 0.0, -0.0800788912};
ASSERT_EQ(output.size(), kCoefficientCount);
for (int i = 0; i < kCoefficientCount; ++i) {
EXPECT_NEAR(output[i], expected[i], 1e-10);
}
}
TEST(MfccDctTest, InitializeFailsOnInvalidInput) {
MfccDct dct1;
EXPECT_FALSE(dct1.Initialize(-50, 1));
EXPECT_FALSE(dct1.Initialize(10, -4));
EXPECT_FALSE(dct1.Initialize(-1, -1));
EXPECT_FALSE(dct1.Initialize(20, 21));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/mfcc_dct.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mfcc_dct_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8ae863ea-b859-4a67-abeb-3bdd5ff08373 | cpp | google/quiche | chacha20_poly1305_encrypter | quiche/quic/core/crypto/chacha20_poly1305_encrypter.cc | quiche/quic/core/crypto/chacha20_poly1305_encrypter_test.cc | #include "quiche/quic/core/crypto/chacha20_poly1305_encrypter.h"
#include <limits>
#include "openssl/evp.h"
namespace quic {
namespace {
const size_t kKeySize = 32;
const size_t kNonceSize = 12;
}
ChaCha20Poly1305Encrypter::ChaCha20Poly1305Encrypter()
: ChaChaBaseEncrypter(EVP_aead_chacha20_poly1305, kKeySize, kAuthTagSize,
kNonceSize,
false) {
static_assert(kKeySize <= kMaxKeySize, "key size too big");
static_assert(kNonceSize <= kMaxNonceSize, "nonce size too big");
}
ChaCha20Poly1305Encrypter::~ChaCha20Poly1305Encrypter() {}
QuicPacketCount ChaCha20Poly1305Encrypter::GetConfidentialityLimit() const {
return std::numeric_limits<QuicPacketCount>::max();
}
} | #include "quiche/quic/core/crypto/chacha20_poly1305_encrypter.h"
#include <memory>
#include <string>
#include "absl/base/macros.h"
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/chacha20_poly1305_decrypter.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace {
struct TestVector {
const char* key;
const char* pt;
const char* iv;
const char* fixed;
const char* aad;
const char* ct;
};
const TestVector test_vectors[] = {
{
"808182838485868788898a8b8c8d8e8f"
"909192939495969798999a9b9c9d9e9f",
"4c616469657320616e642047656e746c"
"656d656e206f662074686520636c6173"
"73206f66202739393a20496620492063"
"6f756c64206f6666657220796f75206f"
"6e6c79206f6e652074697020666f7220"
"746865206675747572652c2073756e73"
"637265656e20776f756c642062652069"
"742e",
"4041424344454647",
"07000000",
"50515253c0c1c2c3c4c5c6c7",
"d31a8d34648e60db7b86afbc53ef7ec2"
"a4aded51296e08fea9e2b5a736ee62d6"
"3dbea45e8ca9671282fafb69da92728b"
"1a71de0a9e060b2905d6a5b67ecd3b36"
"92ddbd7f2d778b8c9803aee328091b58"
"fab324e4fad675945585808b4831d7bc"
"3ff4def08e4b7a9de576d26586cec64b"
"6116"
"1ae10b594f09e26a7e902ecb",
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
}
namespace quic {
namespace test {
QuicData* EncryptWithNonce(ChaCha20Poly1305Encrypter* encrypter,
absl::string_view nonce,
absl::string_view associated_data,
absl::string_view plaintext) {
size_t ciphertext_size = encrypter->GetCiphertextSize(plaintext.length());
std::unique_ptr<char[]> ciphertext(new char[ciphertext_size]);
if (!encrypter->Encrypt(nonce, associated_data, plaintext,
reinterpret_cast<unsigned char*>(ciphertext.get()))) {
return nullptr;
}
return new QuicData(ciphertext.release(), ciphertext_size, true);
}
class ChaCha20Poly1305EncrypterTest : public QuicTest {};
TEST_F(ChaCha20Poly1305EncrypterTest, EncryptThenDecrypt) {
ChaCha20Poly1305Encrypter encrypter;
ChaCha20Poly1305Decrypter decrypter;
std::string key;
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[0].key, &key));
ASSERT_TRUE(encrypter.SetKey(key));
ASSERT_TRUE(decrypter.SetKey(key));
ASSERT_TRUE(encrypter.SetNoncePrefix("abcd"));
ASSERT_TRUE(decrypter.SetNoncePrefix("abcd"));
uint64_t packet_number = UINT64_C(0x123456789ABC);
std::string associated_data = "associated_data";
std::string plaintext = "plaintext";
char encrypted[1024];
size_t len;
ASSERT_TRUE(encrypter.EncryptPacket(packet_number, associated_data, plaintext,
encrypted, &len,
ABSL_ARRAYSIZE(encrypted)));
absl::string_view ciphertext(encrypted, len);
char decrypted[1024];
ASSERT_TRUE(decrypter.DecryptPacket(packet_number, associated_data,
ciphertext, decrypted, &len,
ABSL_ARRAYSIZE(decrypted)));
}
TEST_F(ChaCha20Poly1305EncrypterTest, Encrypt) {
for (size_t i = 0; test_vectors[i].key != nullptr; i++) {
std::string key;
std::string pt;
std::string iv;
std::string fixed;
std::string aad;
std::string ct;
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].key, &key));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].pt, &pt));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].iv, &iv));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].fixed, &fixed));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].aad, &aad));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].ct, &ct));
ChaCha20Poly1305Encrypter encrypter;
ASSERT_TRUE(encrypter.SetKey(key));
std::unique_ptr<QuicData> encrypted(EncryptWithNonce(
&encrypter, fixed + iv,
absl::string_view(aad.length() ? aad.data() : nullptr, aad.length()),
pt));
ASSERT_TRUE(encrypted.get());
EXPECT_EQ(12u, ct.size() - pt.size());
EXPECT_EQ(12u, encrypted->length() - pt.size());
quiche::test::CompareCharArraysWithHexError("ciphertext", encrypted->data(),
encrypted->length(), ct.data(),
ct.length());
}
}
TEST_F(ChaCha20Poly1305EncrypterTest, GetMaxPlaintextSize) {
ChaCha20Poly1305Encrypter encrypter;
EXPECT_EQ(1000u, encrypter.GetMaxPlaintextSize(1012));
EXPECT_EQ(100u, encrypter.GetMaxPlaintextSize(112));
EXPECT_EQ(10u, encrypter.GetMaxPlaintextSize(22));
}
TEST_F(ChaCha20Poly1305EncrypterTest, GetCiphertextSize) {
ChaCha20Poly1305Encrypter encrypter;
EXPECT_EQ(1012u, encrypter.GetCiphertextSize(1000));
EXPECT_EQ(112u, encrypter.GetCiphertextSize(100));
EXPECT_EQ(22u, encrypter.GetCiphertextSize(10));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/chacha20_poly1305_encrypter.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/chacha20_poly1305_encrypter_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
a843f051-effe-47f8-874c-478a730b6a98 | cpp | google/quiche | quiche_mem_slice_storage | quiche/common/quiche_mem_slice_storage.cc | quiche/common/quiche_mem_slice_storage_test.cc | #include "quiche/common/quiche_mem_slice_storage.h"
#include <algorithm>
#include <utility>
#include "quiche/quic/core/quic_utils.h"
namespace quiche {
QuicheMemSliceStorage::QuicheMemSliceStorage(
const struct iovec* iov, int iov_count, QuicheBufferAllocator* allocator,
const quic::QuicByteCount max_slice_len) {
if (iov == nullptr) {
return;
}
quic::QuicByteCount write_len = 0;
for (int i = 0; i < iov_count; ++i) {
write_len += iov[i].iov_len;
}
QUICHE_DCHECK_LT(0u, write_len);
size_t io_offset = 0;
while (write_len > 0) {
size_t slice_len = std::min(write_len, max_slice_len);
QuicheBuffer buffer = QuicheBuffer::CopyFromIovec(allocator, iov, iov_count,
io_offset, slice_len);
storage_.push_back(QuicheMemSlice(std::move(buffer)));
write_len -= slice_len;
io_offset += slice_len;
}
}
} | #include "quiche/common/quiche_mem_slice_storage.h"
#include <string>
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/simple_buffer_allocator.h"
namespace quiche {
namespace test {
namespace {
class QuicheMemSliceStorageImplTest : public QuicheTest {
public:
QuicheMemSliceStorageImplTest() = default;
};
TEST_F(QuicheMemSliceStorageImplTest, EmptyIov) {
QuicheMemSliceStorage storage(nullptr, 0, nullptr, 1024);
EXPECT_TRUE(storage.ToSpan().empty());
}
TEST_F(QuicheMemSliceStorageImplTest, SingleIov) {
SimpleBufferAllocator allocator;
std::string body(3, 'c');
struct iovec iov = {const_cast<char*>(body.data()), body.length()};
QuicheMemSliceStorage storage(&iov, 1, &allocator, 1024);
auto span = storage.ToSpan();
EXPECT_EQ("ccc", span[0].AsStringView());
EXPECT_NE(static_cast<const void*>(span[0].data()), body.data());
}
TEST_F(QuicheMemSliceStorageImplTest, MultipleIovInSingleSlice) {
SimpleBufferAllocator allocator;
std::string body1(3, 'a');
std::string body2(4, 'b');
struct iovec iov[] = {{const_cast<char*>(body1.data()), body1.length()},
{const_cast<char*>(body2.data()), body2.length()}};
QuicheMemSliceStorage storage(iov, 2, &allocator, 1024);
auto span = storage.ToSpan();
EXPECT_EQ("aaabbbb", span[0].AsStringView());
}
TEST_F(QuicheMemSliceStorageImplTest, MultipleIovInMultipleSlice) {
SimpleBufferAllocator allocator;
std::string body1(4, 'a');
std::string body2(4, 'b');
struct iovec iov[] = {{const_cast<char*>(body1.data()), body1.length()},
{const_cast<char*>(body2.data()), body2.length()}};
QuicheMemSliceStorage storage(iov, 2, &allocator, 4);
auto span = storage.ToSpan();
EXPECT_EQ("aaaa", span[0].AsStringView());
EXPECT_EQ("bbbb", span[1].AsStringView());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_mem_slice_storage.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_mem_slice_storage_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
a7970c6c-3eda-4e9f-9136-c7919de2cdd1 | cpp | google/tensorstore | ec2_credential_provider | tensorstore/kvstore/s3/credentials/ec2_credential_provider.cc | tensorstore/kvstore/s3/credentials/ec2_credential_provider_test.cc | #include "tensorstore/kvstore/s3/credentials/ec2_credential_provider.h"
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/flags/flag.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/kvstore/s3/s3_metadata.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#include "tensorstore/internal/json_binding/absl_time.h"
#include "tensorstore/internal/json_binding/std_optional.h"
ABSL_FLAG(std::optional<std::string>,
tensorstore_aws_ec2_metadata_service_endpoint, std::nullopt,
"Endpoint to used for http access AWS metadata service. "
"Overrides AWS_EC2_METADATA_SERVICE_ENDPOINT.");
using ::tensorstore::Result;
using ::tensorstore::internal::GetFlagOrEnvValue;
using ::tensorstore::internal::ParseJson;
using ::tensorstore::internal_http::HttpRequestBuilder;
namespace jb = tensorstore::internal_json_binding;
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
static constexpr char kMetadataTokenHeader[] = "x-aws-ec2-metadata-token:";
static constexpr char kIamCredentialsPath[] =
"/latest/meta-data/iam/security-credentials/";
static constexpr absl::Duration kConnectTimeout = absl::Milliseconds(200);
static constexpr absl::Duration kDefaultTimeout = absl::Minutes(5);
static constexpr char kSuccess[] = "Success";
std::string GetEC2MetadataServiceEndpoint() {
return GetFlagOrEnvValue(FLAGS_tensorstore_aws_ec2_metadata_service_endpoint,
"AWS_EC2_METADATA_SERVICE_ENDPOINT")
.value_or("http:
}
struct EC2CredentialsResponse {
std::string code;
std::optional<absl::Time> last_updated;
std::optional<std::string> type;
std::optional<std::string> access_key_id;
std::optional<std::string> secret_access_key;
std::optional<std::string> token;
std::optional<absl::Time> expiration;
};
inline constexpr auto EC2CredentialsResponseBinder = jb::Object(
jb::Member("Code", jb::Projection(&EC2CredentialsResponse::code)),
jb::OptionalMember("LastUpdated",
jb::Projection(&EC2CredentialsResponse::last_updated)),
jb::OptionalMember("Type", jb::Projection(&EC2CredentialsResponse::type)),
jb::OptionalMember("AccessKeyId",
jb::Projection(&EC2CredentialsResponse::access_key_id)),
jb::OptionalMember(
"SecretAccessKey",
jb::Projection(&EC2CredentialsResponse::secret_access_key)),
jb::OptionalMember("Token", jb::Projection(&EC2CredentialsResponse::token)),
jb::OptionalMember("Expiration",
jb::Projection(&EC2CredentialsResponse::expiration)));
Result<absl::Cord> GetEC2ApiToken(std::string_view endpoint,
internal_http::HttpTransport& transport) {
const std::string token_url =
tensorstore::StrCat(endpoint, "/latest/api/token");
const std::string request_header =
"x-aws-ec2-metadata-token-ttl-seconds: 21600";
const auto request_options = internal_http::IssueRequestOptions()
.SetRequestTimeout(absl::InfiniteDuration())
.SetConnectTimeout(kConnectTimeout);
for (auto method : {std::string_view("POST"), std::string_view("PUT")}) {
auto token_request = HttpRequestBuilder(method, token_url)
.AddHeader(request_header)
.BuildRequest();
TENSORSTORE_ASSIGN_OR_RETURN(
auto token_response,
transport.IssueRequest(token_request, request_options).result());
if (method == "POST" && (token_response.status_code == 405 ||
token_response.status_code == 401)) {
continue;
}
bool is_retryable = false;
TENSORSTORE_RETURN_IF_ERROR(
AwsHttpResponseToStatus(token_response, is_retryable));
return std::move(token_response.payload);
}
return absl::NotFoundError(
"Failed to obtain EC2 API token from either IMDSv1 or IMDSv2");
}
}
Result<AwsCredentials> EC2MetadataCredentialProvider::GetCredentials() {
if (endpoint_.empty()) {
endpoint_ = GetEC2MetadataServiceEndpoint();
}
TENSORSTORE_ASSIGN_OR_RETURN(auto api_token,
GetEC2ApiToken(endpoint_, *transport_));
auto token_header = tensorstore::StrCat(kMetadataTokenHeader, api_token);
auto iam_role_request =
HttpRequestBuilder("GET",
tensorstore::StrCat(endpoint_, kIamCredentialsPath))
.AddHeader(token_header)
.BuildRequest();
TENSORSTORE_ASSIGN_OR_RETURN(
auto iam_role_response,
transport_->IssueRequest(iam_role_request, {}).result());
auto iam_role_plain_text = iam_role_response.payload.Flatten();
bool is_retryable = false;
TENSORSTORE_RETURN_IF_ERROR(
AwsHttpResponseToStatus(iam_role_response, is_retryable));
std::vector<std::string_view> iam_roles =
absl::StrSplit(iam_role_plain_text, '\n', absl::SkipWhitespace());
if (iam_roles.empty()) {
return absl::NotFoundError("Empty EC2 Role list");
}
auto iam_credentials_request_url =
tensorstore::StrCat(endpoint_, kIamCredentialsPath, iam_roles[0]);
auto iam_credentials_request =
HttpRequestBuilder("GET", iam_credentials_request_url)
.AddHeader(token_header)
.BuildRequest();
TENSORSTORE_ASSIGN_OR_RETURN(
auto iam_credentials_response,
transport_->IssueRequest(iam_credentials_request, {}).result());
auto iam_credentials_plain_text = iam_credentials_response.payload.Flatten();
TENSORSTORE_RETURN_IF_ERROR(
AwsHttpResponseToStatus(iam_credentials_response, is_retryable));
auto json_credentials = ParseJson(iam_credentials_plain_text);
TENSORSTORE_ASSIGN_OR_RETURN(
auto iam_credentials,
jb::FromJson<EC2CredentialsResponse>(json_credentials,
EC2CredentialsResponseBinder));
if (iam_credentials.code != kSuccess) {
return absl::NotFoundError(
absl::StrCat("EC2Metadata request to [", iam_credentials_request_url,
"] failed with code ", iam_credentials.code));
}
auto default_timeout = absl::Now() + kDefaultTimeout;
auto expires_at =
iam_credentials.expiration.value_or(default_timeout) - absl::Seconds(60);
return AwsCredentials{iam_credentials.access_key_id.value_or(""),
iam_credentials.secret_access_key.value_or(""),
iam_credentials.token.value_or(""), expires_at};
}
}
} | #include "tensorstore/kvstore/s3/credentials/ec2_credential_provider.h"
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/mock_http_transport.h"
#include "tensorstore/kvstore/s3/credentials/test_utils.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::internal::SetEnv;
using ::tensorstore::internal::UnsetEnv;
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_http::DefaultMockHttpTransport;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_kvstore_s3::DefaultEC2MetadataFlow;
using ::tensorstore::internal_kvstore_s3::EC2MetadataCredentialProvider;
static constexpr char kDefaultEndpoint[] = "http:
static constexpr char kCustomEndpoint[] = "http:
static constexpr char kApiToken[] = "1234567890";
static constexpr char kAccessKey[] = "ASIA1234567890";
static constexpr char kSecretKey[] = "1234567890abcdef";
static constexpr char kSessionToken[] = "abcdef123456790";
class EC2MetadataCredentialProviderTest : public ::testing::Test {
protected:
void SetUp() override { UnsetEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT"); }
};
TEST_F(EC2MetadataCredentialProviderTest, CredentialRetrievalFlow) {
auto expiry = absl::Now() + absl::Seconds(200);
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
DefaultEC2MetadataFlow(kDefaultEndpoint, kApiToken, kAccessKey,
kSecretKey, kSessionToken, expiry));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
TENSORSTORE_CHECK_OK_AND_ASSIGN(auto credentials, provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kDefaultEndpoint);
ASSERT_EQ(credentials.access_key, kAccessKey);
ASSERT_EQ(credentials.secret_key, kSecretKey);
ASSERT_EQ(credentials.session_token, kSessionToken);
ASSERT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
}
TEST_F(EC2MetadataCredentialProviderTest, EnvironmentVariableMetadataServer) {
SetEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT", kCustomEndpoint);
auto expiry = absl::Now() + absl::Seconds(200);
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
DefaultEC2MetadataFlow(kCustomEndpoint, kApiToken, kAccessKey, kSecretKey,
kSessionToken, expiry));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
TENSORSTORE_CHECK_OK_AND_ASSIGN(auto credentials, provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kCustomEndpoint);
ASSERT_EQ(credentials.access_key, kAccessKey);
ASSERT_EQ(credentials.secret_key, kSecretKey);
ASSERT_EQ(credentials.session_token, kSessionToken);
ASSERT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
}
TEST_F(EC2MetadataCredentialProviderTest, InjectedMetadataServer) {
auto expiry = absl::Now() + absl::Seconds(200);
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
DefaultEC2MetadataFlow(kCustomEndpoint, kApiToken, kAccessKey, kSecretKey,
kSessionToken, expiry));
auto provider = std::make_shared<EC2MetadataCredentialProvider>(
kCustomEndpoint, mock_transport);
TENSORSTORE_CHECK_OK_AND_ASSIGN(auto credentials, provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kCustomEndpoint);
ASSERT_EQ(credentials.access_key, kAccessKey);
ASSERT_EQ(credentials.secret_key, kSecretKey);
ASSERT_EQ(credentials.session_token, kSessionToken);
ASSERT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
}
TEST_F(EC2MetadataCredentialProviderTest, NoIamRolesInSecurityCredentials) {
auto url_to_response = absl::flat_hash_map<std::string, HttpResponse>{
{"POST http:
HttpResponse{200, absl::Cord{kApiToken}}},
{"GET http:
HttpResponse{
200, absl::Cord{""}, {{"x-aws-ec2-metadata-token", kApiToken}}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(std::move(url_to_response));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
ASSERT_FALSE(provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kDefaultEndpoint);
EXPECT_THAT(provider->GetCredentials().status().ToString(),
::testing::HasSubstr("Empty EC2 Role list"));
}
TEST_F(EC2MetadataCredentialProviderTest, UnsuccessfulJsonResponse) {
auto url_to_response = absl::flat_hash_map<std::string, HttpResponse>{
{"POST http:
HttpResponse{200, absl::Cord{kApiToken}}},
{"GET http:
HttpResponse{
200, absl::Cord{"info"}, {{"x-aws-ec2-metadata-token", kApiToken}}}},
{"GET http:
HttpResponse{200,
absl::Cord{"mock-iam-role"},
{{"x-aws-ec2-metadata-token", kApiToken}}}},
{"GET "
"http:
"mock-iam-role",
HttpResponse{200,
absl::Cord(R"({"Code": "EntirelyUnsuccessful"})"),
{{"x-aws-ec2-metadata-token", kApiToken}}}}};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(std::move(url_to_response));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
auto credentials = provider->GetCredentials();
EXPECT_THAT(credentials.status(), MatchesStatus(absl::StatusCode::kNotFound));
EXPECT_THAT(credentials.status().ToString(),
::testing::AllOf(::testing::HasSubstr("EC2Metadata request"),
::testing::HasSubstr("EntirelyUnsuccessful")));
}
TEST_F(EC2MetadataCredentialProviderTest, IMDSv2AfterFailure) {
auto url_to_response = absl::flat_hash_map<std::string, HttpResponse>{
{"POST http:
HttpResponse{405, absl::Cord()}},
{"PUT http:
HttpResponse{401, absl::Cord{}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(std::move(url_to_response));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
auto credentials = provider->GetCredentials();
EXPECT_THAT(credentials.status(),
MatchesStatus(absl::StatusCode::kPermissionDenied));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/credentials/ec2_credential_provider.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/credentials/ec2_credential_provider_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
a702c183-894e-41e6-b224-457072d64d80 | cpp | tensorflow/tensorflow | canonicalize_value | tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value.cc | tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value_test.cc | #include "tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value.h"
#include <iterator>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/strings/ascii.h"
#include "absl/strings/string_view.h"
#include "re2/re2.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/variables.h"
namespace tflite::acceleration {
namespace {
inline char ascii_normalise(const unsigned char c) {
if (c == ' ' || c == '-') {
return '_';
}
return absl::ascii_tolower(c);
}
}
std::string CanonicalizeValue(absl::string_view value) {
std::string output;
absl::c_transform(value, std::back_inserter(output),
tflite::acceleration::ascii_normalise);
return output;
}
std::string CanonicalizeValueWithKey(absl::string_view key,
absl::string_view value) {
std::string output = CanonicalizeValue(value);
std::string gpu_output;
return key == kGPUModel &&
RE2::FullMatch(
output,
R"((angle_\(samsung_xclipse_[0-9]*\)_on_vulkan).*$)",
&gpu_output)
? gpu_output
: output;
}
} | #include "tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/acceleration/compatibility/variables.h"
namespace tflite::acceleration {
namespace {
TEST(CanonicalizeValue, CharactersAreLowercased) {
EXPECT_EQ(CanonicalizeValue("hElLo"), "hello");
}
TEST(CanonicalizeValue, HyphensAreReplaced) {
EXPECT_EQ(CanonicalizeValue("-"), "_");
}
TEST(CanonicalizeValue, SpacesAreReplaced) {
EXPECT_EQ(CanonicalizeValue(" "), "_");
}
TEST(CanonicalizeValue, OtherSpecialCharactersAreUnaffected) {
for (unsigned char c = 0; c < 65; ++c) {
if (c == ' ' || c == '-') continue;
std::string s = {1, static_cast<char>(c)};
EXPECT_EQ(CanonicalizeValue(s), s);
}
}
TEST(CanonicalizeValue, SamsungXclipseGpuNormalized) {
EXPECT_EQ(CanonicalizeValueWithKey(
kGPUModel, "ANGLE (Samsung Xclipse 920) on Vulkan 1.1.179"),
"angle_(samsung_xclipse_920)_on_vulkan");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
553bcb62-c346-40ab-86a4-569637b54c24 | cpp | google/tensorstore | heterogeneous_container | tensorstore/internal/container/heterogeneous_container.h | tensorstore/internal/container/heterogeneous_container_test.cc | #ifndef TENSORSTORE_INTERNAL_CONTAINER_HETEROGENEOUS_CONTAINER_H_
#define TENSORSTORE_INTERNAL_CONTAINER_HETEROGENEOUS_CONTAINER_H_
#include <functional>
#include "absl/container/flat_hash_set.h"
namespace tensorstore {
namespace internal {
template <typename T>
struct SupportsHeterogeneous : public T {
using is_transparent = void;
};
template <typename EntryPointer, typename T, auto Getter>
struct KeyAdapter {
template <typename U,
typename = std::enable_if_t<std::is_convertible_v<U, T>>>
KeyAdapter(U&& key) : value(std::forward<U>(key)) {}
KeyAdapter(const EntryPointer& e) : value(std::invoke(Getter, *e)) {}
template <typename H>
friend H AbslHashValue(H h, const KeyAdapter& key) {
return H::combine(std::move(h), key.value);
}
friend bool operator==(const KeyAdapter& a, const KeyAdapter& b) {
return a.value == b.value;
}
T value;
};
template <typename EntryPointer, typename T, auto Getter>
using HeterogeneousHashSet = absl::flat_hash_set<
EntryPointer,
SupportsHeterogeneous<absl::Hash<KeyAdapter<EntryPointer, T, Getter>>>,
SupportsHeterogeneous<std::equal_to<KeyAdapter<EntryPointer, T, Getter>>>>;
}
}
#endif | #include "tensorstore/internal/container/heterogeneous_container.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::HeterogeneousHashSet;
struct Entry {
std::string id;
};
using Set =
HeterogeneousHashSet<std::shared_ptr<Entry>, std::string_view, &Entry::id>;
TEST(HeterogeneousHashSetTest, Basic) {
Set set;
auto a = std::make_shared<Entry>(Entry{"a"});
auto b = std::make_shared<Entry>(Entry{"b"});
EXPECT_TRUE(set.insert(a).second);
EXPECT_TRUE(set.insert(b).second);
{
auto it = set.find("a");
ASSERT_NE(set.end(), it);
EXPECT_EQ(a, *it);
}
{
auto it = set.find(a);
ASSERT_NE(set.end(), it);
EXPECT_EQ(a, *it);
}
{
auto it = set.find("b");
ASSERT_NE(set.end(), it);
EXPECT_EQ(b, *it);
}
{
auto it = set.find(b);
ASSERT_NE(set.end(), it);
EXPECT_EQ(b, *it);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/heterogeneous_container.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/heterogeneous_container_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
4c512995-bf4a-4627-b748-73069394ca01 | cpp | tensorflow/tensorflow | elementwise_unary | tensorflow/lite/experimental/shlo/legacy/src/elementwise_unary.cc | tensorflow/lite/experimental/shlo/legacy/test/elementwise_unary_test.cc | #include <bit>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <type_traits>
#include <version>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/bf16.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/f16.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/util.h"
namespace stablehlo {
namespace {
template <typename Value>
absl::Status CheckParameters(const Value& operand, Value& result) {
if (operand.baseline_type() != result.baseline_type()) {
return absl::InvalidArgumentError(
"Constraint violation: baseline_type(operand) = baseline_type(result)");
}
if constexpr (std::is_same_v<Value, QuantizedTensor>) {
if (!(operand.is_per_tensor_quantized() and
result.is_per_tensor_quantized())) {
return absl::InvalidArgumentError("Expected per-tensor quantization");
}
}
if (operand.layout().has_strides() || result.layout().has_strides()) {
return absl::InvalidArgumentError("Stides not supported yet");
}
return absl::OkStatus();
}
template <ElementType storage_type, ElementType expressed_type, typename Value,
typename Op>
absl::Status ElementwiseUnaryOp(const Value& operand, Value& result, Op&& op) {
if (auto check = CheckParameters(operand, result); !check.ok()) {
return check;
}
using S = Storage<storage_type>;
auto operand_buffer = operand.buffer();
auto result_buffer = result.buffer();
size_t n = operand.num_elements();
if constexpr (std::is_same_v<Value, Tensor>) {
if (storage_type != operand.element_type()) {
return absl::InvalidArgumentError("Unexpected tensor element type");
}
for (size_t i = 0; i < n; ++i) {
auto x = S::Get(operand_buffer, i);
auto y = op(x);
S::Set(result_buffer, i, y);
}
} else {
static_assert(std::is_same_v<Value, QuantizedTensor>);
if (storage_type != result.storage_type()) {
return absl::InvalidArgumentError("Unexpected storage type");
} else if (expressed_type != result.expressed_type()) {
return absl::InvalidArgumentError("Unexpected expressed type");
}
const QuantizedParameter& operand_quant_param =
operand.type().element_type().parameters(0);
const QuantizedParameter& result_quant_param =
result.type().element_type().parameters(0);
using ET = typename Storage<expressed_type>::Type;
ET result_scale_inv = ET(1.0) / static_cast<ET>(result_quant_param.scale);
for (size_t i = 0; i < n; ++i) {
auto operand_storage = S::Get(operand_buffer, i);
auto result_storage =
DequantizeOpQuantizePartial<storage_type, expressed_type>(
operand_storage, operand_quant_param, result_scale_inv,
result_quant_param.zero_point, op);
S::Set(result_buffer, i, result_storage);
}
if (auto status = CompleteQuantization<storage_type>(result);
!status.ok()) {
return status;
}
}
return absl::OkStatus();
}
#define DEFINE_ELEMENTWISE_UNARY_OP(name, element_type, expression) \
absl::Status name(const Tensor& operand, Tensor& result) { \
return ElementwiseUnaryOp<element_type, element_type>( \
operand, result, [](auto x) { return expression; }); \
}
#define DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP(name, storage_type, \
expressed_type, expression) \
absl::Status name(const QuantizedTensor& operand, QuantizedTensor& result) { \
return ElementwiseUnaryOp<storage_type, expressed_type>( \
operand, result, [](auto x) { return expression; }); \
}
#define DEFINE_ELEMENTWISE_UNARY_OP_BOOL(name, expression) \
DEFINE_ELEMENTWISE_UNARY_OP(name##_i1, ElementType::kI1, expression);
#define DEFINE_ELEMENTWISE_UNARY_OP_INT(name, expression) \
DEFINE_ELEMENTWISE_UNARY_OP(name##_si8, ElementType::kSI8, expression); \
DEFINE_ELEMENTWISE_UNARY_OP(name##_si16, ElementType::kSI16, expression); \
DEFINE_ELEMENTWISE_UNARY_OP(name##_si32, ElementType::kSI32, expression);
#define DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(name, expression) \
DEFINE_ELEMENTWISE_UNARY_OP(name##_bf16, ElementType::kBF16, expression); \
DEFINE_ELEMENTWISE_UNARY_OP(name##_f16, ElementType::kF16, expression); \
DEFINE_ELEMENTWISE_UNARY_OP(name##_f32, ElementType::kF32, expression); \
DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP(name##_q_si8_bf16, ElementType::kSI8, \
ElementType::kBF16, expression); \
DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP(name##_q_si8_f16, ElementType::kSI8, \
ElementType::kF16, expression); \
DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP(name##_q_si8_f32, ElementType::kSI8, \
ElementType::kF32, expression); \
DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP( \
name##_q_si16_bf16, ElementType::kSI16, ElementType::kBF16, expression); \
DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP(name##_q_si16_f16, ElementType::kSI16, \
ElementType::kF16, expression); \
DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP(name##_q_si16_f32, ElementType::kSI16, \
ElementType::kF32, expression); \
DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP( \
name##_q_si32_bf16, ElementType::kSI32, ElementType::kBF16, expression); \
DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP(name##_q_si32_f16, ElementType::kSI32, \
ElementType::kF16, expression); \
DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP(name##_q_si32_f32, ElementType::kSI32, \
ElementType::kF32, expression);
#define CALL_UNARY_OP_BOOL_HELPER(name, operand, result) \
case ElementType::kI1: \
return name##_i1(operand, result);
#define CALL_UNARY_OP_INT_HELPER(name, operand, result) \
case ElementType::kSI8: \
return name##_si8(operand, result); \
case ElementType::kSI16: \
return name##_si16(operand, result); \
case ElementType::kSI32: \
return name##_si32(operand, result);
#define CALL_UNARY_OP_FLOAT_HELPER(name, operand, result) \
case ElementType::kBF16: \
return name##_bf16(operand, result); \
case ElementType::kF16: \
return name##_f16(operand, result); \
case ElementType::kF32: \
return name##_f32(operand, result);
#define CALL_UNARY_OP_BOOL_INT(name, operand, result) \
{ \
auto element_type = operand.element_type(); \
switch (element_type) { \
CALL_UNARY_OP_BOOL_HELPER(name, operand, result); \
CALL_UNARY_OP_INT_HELPER(name, operand, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_UNARY_OP_INT(name, operand, result) \
{ \
auto element_type = operand.element_type(); \
switch (element_type) { \
CALL_UNARY_OP_INT_HELPER(name, operand, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_UNARY_OP_FLOAT(name, operand, result) \
{ \
auto element_type = operand.element_type(); \
switch (element_type) { \
CALL_UNARY_OP_FLOAT_HELPER(name, operand, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_UNARY_OP_INT_FLOAT(name, operand, result) \
{ \
auto element_type = operand.element_type(); \
switch (element_type) { \
CALL_UNARY_OP_INT_HELPER(name, operand, result); \
CALL_UNARY_OP_FLOAT_HELPER(name, operand, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_UNARY_OP_BOOL_INT_FLOAT(name, operand, result) \
{ \
auto element_type = operand.element_type(); \
switch (element_type) { \
CALL_UNARY_OP_BOOL_HELPER(name, operand, result); \
CALL_UNARY_OP_INT_HELPER(name, operand, result); \
CALL_UNARY_OP_FLOAT_HELPER(name, operand, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_UNARY_QUANTIZED_OP(name, operand, result) \
{ \
auto storage_type = operand.storage_type(); \
auto expressed_type = operand.expressed_type(); \
switch (storage_type) { \
case ElementType::kSI8: \
switch (expressed_type) { \
case ElementType::kBF16: \
return name##_q_si8_bf16(operand, result); \
case ElementType::kF16: \
return name##_q_si8_f16(operand, result); \
case ElementType::kF32: \
return name##_q_si8_f32(operand, result); \
default: \
return absl::InvalidArgumentError("Unexpected expressed type"); \
} \
case ElementType::kSI16: \
switch (expressed_type) { \
case ElementType::kBF16: \
return name##_q_si16_bf16(operand, result); \
case ElementType::kF16: \
return name##_q_si16_f16(operand, result); \
case ElementType::kF32: \
return name##_q_si16_f32(operand, result); \
default: \
return absl::InvalidArgumentError("Unexpected expressed type"); \
} \
case ElementType::kSI32: \
switch (expressed_type) { \
case ElementType::kBF16: \
return name##_q_si32_bf16(operand, result); \
case ElementType::kF16: \
return name##_q_si32_f16(operand, result); \
case ElementType::kF32: \
return name##_q_si32_f32(operand, result); \
default: \
return absl::InvalidArgumentError("Unexpected expressed type"); \
} \
default: \
return absl::InvalidArgumentError("Unexpected storage type"); \
} \
}
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_INT(Abs, ((x > 0) ? x : -x));
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Abs, ((x > 0) ? x : -x));
}
absl::Status Abs(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_INT_FLOAT(Abs, operand, result);
}
absl::Status Abs(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Abs, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Cbrt, std::cbrt(static_cast<float>(x)));
}
absl::Status Cbrt(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Cbrt, operand, result);
}
absl::Status Cbrt(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Cbrt, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Ceil, std::ceil(static_cast<float>(x)));
}
absl::Status Ceil(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Ceil, operand, result);
}
absl::Status Ceil(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Ceil, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Cosine, std::cos(static_cast<float>(x)));
}
absl::Status Cosine(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Cosine, operand, result);
}
absl::Status Cosine(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Cosine, operand, result);
}
namespace {
template <typename Int>
inline Int CountLeadingZeros(Int x) {
using UInt = typename std::make_unsigned<Int>::type;
#if __cpp_lib_bitops >= 201907L
return std::countl_zero(static_cast<UInt>(x));
#else
if (!x) {
return 8 * sizeof(x);
}
Int result = 0;
auto mask = UInt(1) << (8 * (sizeof(x) - 1) + 7);
for (auto t = static_cast<UInt>(x); t > 0; t <<= 1) {
if (t & mask) break;
result++;
}
return result;
#endif
}
DEFINE_ELEMENTWISE_UNARY_OP_INT(CountLeadingZeros, CountLeadingZeros(x));
}
absl::Status CountLeadingZeros(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_INT(CountLeadingZeros, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Exponential, std::exp(static_cast<float>(x)));
}
absl::Status Exponential(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Exponential, operand, result);
}
absl::Status Exponential(const QuantizedTensor& operand,
QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Exponential, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(ExponentialMinusOne,
std::expm1(static_cast<float>(x)));
}
absl::Status ExponentialMinusOne(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(ExponentialMinusOne, operand, result);
}
absl::Status ExponentialMinusOne(const QuantizedTensor& operand,
QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(ExponentialMinusOne, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Floor, std::floor(static_cast<float>(x)));
}
absl::Status Floor(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Floor, operand, result);
}
absl::Status Floor(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Floor, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Log, std::log(static_cast<float>(x)));
}
absl::Status Log(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Log, operand, result);
}
absl::Status Log(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Log, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(LogPlusOne,
std::log1p(static_cast<float>(x)));
}
absl::Status LogPlusOne(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(LogPlusOne, operand, result);
}
absl::Status LogPlusOne(const QuantizedTensor& operand,
QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(LogPlusOne, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Logistic,
1.0f / (1.0f +
std::exp(static_cast<float>(-x))));
}
absl::Status Logistic(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Logistic, operand, result);
}
absl::Status Logistic(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Logistic, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_INT(Negate, -x);
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Negate, -x);
}
absl::Status Negate(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_INT_FLOAT(Negate, operand, result);
}
absl::Status Negate(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Negate, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_BOOL(Not, !x);
DEFINE_ELEMENTWISE_UNARY_OP_INT(Not, ~x);
}
absl::Status Not(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_BOOL_INT(Not, operand, result);
}
namespace {
template <typename Int>
Int Popcount(Int x) {
#if __cpp_lib_bitops >= 201907L
return std::popcount(static_cast<uint32_t>(x));
#else
using UInt = typename std::make_unsigned<Int>::type;
Int result = 0;
UInt mask = 0x1;
for (auto t = static_cast<UInt>(x); t > 0; t >>= 1) {
result += (t & mask);
}
return result;
#endif
}
DEFINE_ELEMENTWISE_UNARY_OP_INT(Popcnt, Popcount(x));
}
absl::Status Popcnt(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_INT(Popcnt, operand, result);
}
namespace {
template <typename Float>
inline Float RoundNearestAfz(Float x) {
return std::round(static_cast<float>(x));
}
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(RoundNearestAfz, RoundNearestAfz(x));
}
absl::Status RoundNearestAfz(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(RoundNearestAfz, operand, result);
}
absl::Status RoundNearestAfz(const QuantizedTensor& operand,
QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(RoundNearestAfz, operand, result);
}
namespace {
template <typename Float>
inline Float RoundNearestEven(Float x) {
return x - static_cast<Float>(std::remainder(static_cast<float>(x), 1.0f));
}
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(RoundNearestEven, RoundNearestEven(x));
}
absl::Status RoundNearestEven(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(RoundNearestEven, operand, result);
}
absl::Status RoundNearestEven(const QuantizedTensor& operand,
QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(RoundNearestEven, operand, result);
}
namespace {
template <typename Float>
inline Float Rsqrt(Float x) {
return Float{1} / static_cast<Float>(std::sqrt(static_cast<float>(x)));
}
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Rsqrt, Rsqrt(x));
}
absl::Status Rsqrt(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Rsqrt, operand, result);
}
absl::Status Rsqrt(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Rsqrt, operand, result);
}
namespace {
template <typename Number>
inline Number Sign(Number x) {
if constexpr (std::is_integral<Number>::value) {
return x < 0 ? -1 : (x > 0 ? 1 : 0);
} else {
static_assert(std::is_floating_point<Number>::value ||
std::is_same_v<Number, BF16> || std::is_same_v<Number, F16>);
if (std::isnan(x)) {
return NAN;
}
return (x < 0 ? -1 : (x > 0 ? 1 : 0));
}
}
DEFINE_ELEMENTWISE_UNARY_OP_INT(Sign, Sign(x));
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Sign, Sign(x));
}
absl::Status Sign(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_INT_FLOAT(Sign, operand, result);
}
absl::Status Sign(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Sign, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Sine, std::sin(static_cast<float>(x)));
}
absl::Status Sine(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Sine, operand, result);
}
absl::Status Sine(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Sine, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Sqrt, std::sqrt(static_cast<float>(x)));
}
absl::Status Sqrt(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Sqrt, operand, result);
}
absl::Status Sqrt(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Sqrt, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Tanh, std::tanh(static_cast<float>(x)));
}
absl::Status Tanh(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Tanh, operand, result);
}
absl::Status Tanh(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Tanh, operand, result);
}
} | #include <cmath>
#include <cstdint>
#include <initializer_list>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/debug.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/test/matchers.h"
#include "tensorflow/lite/experimental/shlo/legacy/test/util.h"
namespace stablehlo {
namespace testing {
template <ElementType element_type>
void test(absl::Status (*op)(const Tensor&, Tensor&),
std::initializer_list<DimensionSize>&& shape,
std::vector<typename Storage<element_type>::Type>&& input_values,
std::vector<typename Storage<element_type>::Type>&& expected_values) {
Tensor input(TensorType(Shape(shape), element_type), std::data(input_values));
Tensor expected(TensorType(Shape(shape), element_type),
std::data(expected_values));
std::vector<typename Storage<element_type>::Type> result_values(
expected_values.size());
Tensor result(TensorType(Shape(shape), element_type), result_values.data());
ASSERT_OK(op(input, result));
EXPECT_THAT(result, IsAlmostSame(expected)) << "input: " << input;
}
template <ElementType storage_type, ElementType expressed_type>
void test(
absl::Status (*op)(const QuantizedTensor&, QuantizedTensor&),
std::initializer_list<DimensionSize>&& shape,
QuantizedParameter&& quantized_parameter,
std::vector<typename Storage<expressed_type>::Type>&& input_values,
std::vector<typename Storage<expressed_type>::Type>&& expected_values) {
auto input_quant_values = QuantizeVector<storage_type, expressed_type>(
input_values, quantized_parameter);
auto expected_quant_values = QuantizeVector<storage_type, expressed_type>(
expected_values, quantized_parameter);
std::vector<typename Storage<storage_type>::Type> result_quant_values(
expected_quant_values.size());
QuantizedTensorElementType element_type(storage_type, expressed_type,
std::move(quantized_parameter));
QuantizedTensor input(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
input_quant_values.data());
QuantizedTensor expected(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
expected_quant_values.data());
QuantizedTensor result(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
result_quant_values.data());
ASSERT_OK(op(input, result));
EXPECT_THAT(result, IsAlmostSame(expected)) << "input: " << input;
}
TEST(ElementwiseUnary, Abs) {
test<ElementType::kSI8>(Abs, {5}, {0, 1, -2, 3, -4}, {0, 1, 2, 3, 4});
test<ElementType::kSI16>(Abs, {5}, {0, 1, -2, 3, -4}, {0, 1, 2, 3, 4});
test<ElementType::kSI32>(Abs, {5}, {0, 1, -2, 3, -4}, {0, 1, 2, 3, 4});
test<ElementType::kBF16>(Abs, {5}, {0, 1, -2, 3, -4}, {0, 1, 2, 3, 4});
test<ElementType::kF16>(Abs, {5}, {0, 1, -2, 3, -4}, {0, 1, 2, 3, 4});
test<ElementType::kF32>(Abs, {5}, {0, 1, -2, 3, -4}, {0, 1, 2, 3, 4});
}
TEST(ElementwiseBinary, AbsQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Abs, {5}, {.scale = 1, .zero_point = 0}, {0, 1, -2, 3, -4},
{0, 1, 2, 3, 4});
test<ElementType::kSI8, ElementType::kF16>(
Abs, {5}, {.scale = 1e-1, .zero_point = 1}, {0, 1, -2, 3, -4},
{0, 1, 2, 3, 4});
test<ElementType::kSI8, ElementType::kF32>(
Abs, {5}, {.scale = 1e-1, .zero_point = -1}, {0, 1, -2, 3, -4},
{0, 1, 2, 3, 4});
test<ElementType::kSI16, ElementType::kF32>(
Abs, {5}, {.scale = 1e-3, .zero_point = -1}, {0, 1, -2, 3, -4},
{0, 1, 2, 3, 4});
}
TEST(ElementwiseUnary, Cbrt) {
test<ElementType::kBF16>(
Cbrt, {4}, {0, 1, -2, 3},
{0, 1, -1.25992104989487316476f, 1.44224957030740838232f});
test<ElementType::kF16>(
Cbrt, {4}, {0, 1, -2, 3},
{0, 1, -1.25992104989487316476f, 1.44224957030740838232f});
test<ElementType::kF32>(
Cbrt, {4}, {0, 1, -2, 3},
{0, 1, -1.25992104989487316476f, 1.44224957030740838232f});
}
TEST(ElementwiseUnary, CbrtQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Cbrt, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 1, -2, 3},
{0, 1, -1.25992104989487316476f, 1.44224957030740838232f});
test<ElementType::kSI8, ElementType::kF16>(
Cbrt, {4}, {.scale = 1e-1, .zero_point = -2}, {0, 1, -2, 3},
{0, 1, -1.25992104989487316476f, 1.44224957030740838232f});
test<ElementType::kSI8, ElementType::kF32>(
Cbrt, {4}, {.scale = 1e-1, .zero_point = 4}, {0, 1, -2, 3},
{0, 1, -1.25992104989487316476f, 1.44224957030740838232f});
test<ElementType::kSI16, ElementType::kF32>(
Cbrt, {4}, {.scale = 1e-1, .zero_point = 4}, {0, 1, -2, 3},
{0, 1, -1.25992104989487316476f, 1.44224957030740838232f});
}
TEST(ElementwiseUnary, Ceil) {
test<ElementType::kBF16>(Ceil, {4}, {0, 1.1, -2.7, 3.5}, {0, 2, -2, 4});
test<ElementType::kF16>(Ceil, {4}, {0, 1.1, -2.7, 3.5}, {0, 2, -2, 4});
test<ElementType::kF32>(Ceil, {4}, {0, 1.1, -2.7, 3.5}, {0, 2, -2, 4});
}
TEST(ElementwiseUnary, CeilQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Ceil, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 1.1, -2.7, 3.5},
{0, 2, -2, 4});
test<ElementType::kSI8, ElementType::kF16>(
Ceil, {4}, {.scale = 1e-1, .zero_point = 4}, {0, 1.1, -2.7, 3.5},
{0, 2, -2, 4});
test<ElementType::kSI8, ElementType::kF32>(
Ceil, {4}, {.scale = 1e-1, .zero_point = -4}, {0, 1.1, -2.7, 3.5},
{0, 2, -2, 4});
test<ElementType::kSI16, ElementType::kF32>(
Ceil, {4}, {.scale = 1e-2, .zero_point = -4}, {0, 1.11, -2.77, 3.55},
{0, 2, -2, 4});
}
TEST(ElementwiseUnary, Cosine) {
test<ElementType::kBF16>(Cosine, {4}, {0, 1.1, -1.1, 2.3},
{1, 0.45359612142557738777f, 0.45359612142557738777f,
-0.66627602127982419331f});
test<ElementType::kF16>(Cosine, {4}, {0, 1.1, -1.1, 2.3},
{1, 0.45359612142557738777f, 0.45359612142557738777f,
-0.66627602127982419331f});
test<ElementType::kF32>(Cosine, {4}, {0, 1.1, -1.1, 2.3},
{1, 0.45359612142557738777f, 0.45359612142557738777f,
-0.66627602127982419331f});
}
TEST(ElementwiseUnary, CosineQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Cosine, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 1.1, -1.1, 2.3},
{1, 0.45359612142557738777f, 0.45359612142557738777f,
-0.66627602127982419331f});
test<ElementType::kSI8, ElementType::kF16>(
Cosine, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 1.1, -1.1, 2.3},
{1, 0.45359612142557738777f, 0.45359612142557738777f,
-0.66627602127982419331f});
test<ElementType::kSI8, ElementType::kF32>(
Cosine, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 1.1, -1.1, 2.3},
{1, 0.45359612142557738777f, 0.45359612142557738777f,
-0.66627602127982419331f});
test<ElementType::kSI16, ElementType::kF32>(
Cosine, {4}, {.scale = 1e-4, .zero_point = 0}, {0, 1.1, -1.1, 2.3},
{1, 0.45359612142557738777f, 0.45359612142557738777f,
-0.66627602127982419331f});
}
TEST(ElementwiseUnary, CountLeadingZeros) {
test<ElementType::kSI8>(CountLeadingZeros, {4}, {0, 1, 127, -1},
{8, 7, 1, 0});
test<ElementType::kSI16>(CountLeadingZeros, {4}, {0, 1, 32767, -1},
{16, 15, 1, 0});
test<ElementType::kSI32>(CountLeadingZeros, {4}, {0, 1, 2147483647, -1},
{32, 31, 1, 0});
}
TEST(ElementwiseUnary, Exponential) {
test<ElementType::kBF16>(Exponential, {4}, {0, 0.5, 1, 1.5},
{1, 1.64872127070012814684f, 2.71828182845904523536f,
4.48168907033806482260f});
test<ElementType::kF16>(Exponential, {4}, {0, 0.5, 1, 1.5},
{1, 1.64872127070012814684f, 2.71828182845904523536f,
4.48168907033806482260f});
test<ElementType::kF32>(Exponential, {4}, {0, 0.5, 1, 1.5},
{1, 1.64872127070012814684f, 2.71828182845904523536f,
4.48168907033806482260f});
}
TEST(ElementwiseUnary, ExponentialQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Exponential, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 0.5, 1, 1.5},
{1, 1.64872127070012814684f, 2.71828182845904523536f,
4.48168907033806482260f});
test<ElementType::kSI8, ElementType::kF16>(
Exponential, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 0.5, 1, 1.5},
{1, 1.64872127070012814684f, 2.71828182845904523536f,
4.48168907033806482260f});
test<ElementType::kSI8, ElementType::kF32>(
Exponential, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 0.5, 1, 1.5},
{1, 1.64872127070012814684f, 2.71828182845904523536f,
4.48168907033806482260f});
test<ElementType::kSI16, ElementType::kF32>(
Exponential, {4}, {.scale = 1e-2, .zero_point = 0}, {0, 0.5, 1, 1.5},
{1, 1.64872127070012814684f, 2.71828182845904523536f,
4.48168907033806482260f});
}
TEST(ElementwiseUnary, ExponentialMinusOne) {
test<ElementType::kBF16>(ExponentialMinusOne, {4}, {0, 0.5, 1, 1.5},
{0, 0.64872127070012814684f, 1.71828182845904523536f,
3.48168907033806482260f});
test<ElementType::kF16>(ExponentialMinusOne, {4}, {0, 0.5, 1, 1.5},
{0, 0.64872127070012814684f, 1.71828182845904523536f,
3.48168907033806482260f});
test<ElementType::kF32>(ExponentialMinusOne, {4}, {0, 0.5, 1, 1.5},
{0, 0.64872127070012814684f, 1.71828182845904523536f,
3.48168907033806482260f});
}
TEST(ElementwiseUnary, ExponentialMinusOneQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
ExponentialMinusOne, {4}, {.scale = 1e-1, .zero_point = 0},
{0, 0.5, 1, 1.5},
{0, 0.64872127070012814684f, 1.71828182845904523536f,
3.48168907033806482260f});
test<ElementType::kSI8, ElementType::kF16>(
ExponentialMinusOne, {4}, {.scale = 1e-1, .zero_point = 0},
{0, 0.5, 1, 1.5},
{0, 0.64872127070012814684f, 1.71828182845904523536f,
3.48168907033806482260f});
test<ElementType::kSI8, ElementType::kF32>(
ExponentialMinusOne, {4}, {.scale = 1e-1, .zero_point = 0},
{0, 0.5, 1, 1.5},
{0, 0.64872127070012814684f, 1.71828182845904523536f,
3.48168907033806482260f});
test<ElementType::kSI16, ElementType::kF32>(
ExponentialMinusOne, {4}, {.scale = 1e-2, .zero_point = 0},
{0, 0.5, 1, 1.5},
{0, 0.64872127070012814684f, 1.71828182845904523536f,
3.48168907033806482260f});
}
TEST(ElementwiseUnary, Floor) {
test<ElementType::kBF16>(Floor, {4}, {0, 1.1, -2.7, 3.5}, {0, 1, -3, 3});
test<ElementType::kF16>(Floor, {4}, {0, 1.1, -2.7, 3.5}, {0, 1, -3, 3});
test<ElementType::kF32>(Floor, {4}, {0, 1.1, -2.7, 3.5}, {0, 1, -3, 3});
}
TEST(ElementwiseUnary, FloorQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Floor, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 1.1, -2.7, 3.5},
{0, 1, -3, 3});
test<ElementType::kSI8, ElementType::kF16>(
Floor, {4}, {.scale = 1e-1, .zero_point = 4}, {0, 1.1, -2.7, 3.5},
{0, 1, -3, 3});
test<ElementType::kSI8, ElementType::kF32>(
Floor, {4}, {.scale = 1e-1, .zero_point = -4}, {0, 1.1, -2.7, 3.5},
{0, 1, -3, 3});
test<ElementType::kSI16, ElementType::kF32>(
Floor, {4}, {.scale = 1e-2, .zero_point = -4}, {0, 1.11, -2.77, 3.55},
{0, 1, -3, 3});
}
TEST(ElementwiseUnary, Log) {
test<ElementType::kBF16>(Log, {4}, {0.1, 0.5, 1, 1.5},
{-2.30258509299404568401f, -0.69314718055994530941f,
0, 0.40546510810816438197f});
test<ElementType::kF16>(Log, {4}, {0.1, 0.5, 1, 1.5},
{-2.30258509299404568401f, -0.69314718055994530941f,
0, 0.40546510810816438197f});
test<ElementType::kF32>(Log, {4}, {0.1, 0.5, 1, 1.5},
{-2.30258509299404568401f, -0.69314718055994530941f,
0, 0.40546510810816438197f});
}
TEST(ElementwiseUnary, LogQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Log, {4}, {.scale = 1e-1, .zero_point = -4}, {0.1, 0.5, 1, 1.5},
{-2.30258509299404568401f, -0.69314718055994530941f, 0,
0.40546510810816438197f});
test<ElementType::kSI8, ElementType::kF16>(
Log, {4}, {.scale = 1e-1, .zero_point = -4}, {0.1, 0.5, 1, 1.5},
{-2.30258509299404568401f, -0.69314718055994530941f, 0,
0.40546510810816438197f});
test<ElementType::kSI8, ElementType::kF32>(
Log, {4}, {.scale = 1e-1, .zero_point = -4}, {0.1, 0.5, 1, 1.5},
{-2.30258509299404568401f, -0.69314718055994530941f, 0,
0.40546510810816438197f});
test<ElementType::kSI16, ElementType::kF32>(
Log, {4}, {.scale = 1e-3, .zero_point = -4}, {0.1, 0.5, 1, 1.5},
{-2.30258509299404568401f, -0.69314718055994530941f, 0,
0.40546510810816438197f});
}
TEST(ElementwiseUnary, LogPlusOne) {
test<ElementType::kBF16>(LogPlusOne, {4}, {-0.9, -0.5, 0, 0.5},
{-2.30258509299404568401f, -0.69314718055994530941f,
0, 0.40546510810816438197f});
test<ElementType::kF16>(LogPlusOne, {4}, {-0.9, -0.5, 0, 0.5},
{-2.30258509299404568401f, -0.69314718055994530941f,
0, 0.40546510810816438197f});
test<ElementType::kF32>(LogPlusOne, {4}, {-0.9, -0.5, 0, 0.5},
{-2.30258509299404568401f, -0.69314718055994530941f,
0, 0.40546510810816438197f});
}
TEST(ElementwiseUnary, LogPlusOneQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
LogPlusOne, {4}, {.scale = 1e-1, .zero_point = 0}, {-0.9, -0.5, 0, 0.5},
{-2.30258509299404568401f, -0.69314718055994530941f, 0,
0.40546510810816438197f});
test<ElementType::kSI8, ElementType::kF16>(
LogPlusOne, {4}, {.scale = 1e-1, .zero_point = 0}, {-0.9, -0.5, 0, 0.5},
{-2.30258509299404568401f, -0.69314718055994530941f, 0,
0.40546510810816438197f});
test<ElementType::kSI8, ElementType::kF32>(
LogPlusOne, {4}, {.scale = 1e-1, .zero_point = 0}, {-0.9, -0.5, 0, 0.5},
{-2.30258509299404568401f, -0.69314718055994530941f, 0,
0.40546510810816438197f});
test<ElementType::kSI16, ElementType::kF32>(
LogPlusOne, {4}, {.scale = 1e-4, .zero_point = 0}, {-0.9, -0.5, 0, 0.5},
{-2.30258509299404568401f, -0.69314718055994530941f, 0,
0.40546510810816438197f});
}
TEST(ElementwiseUnary, Logistic) {
test<ElementType::kBF16>(Logistic, {4}, {-1, -0.5, 0, 0.5},
{0.26894142136999512074f, 0.37754066879814543536f,
0.5, 0.62245933120185456464f});
test<ElementType::kF16>(Logistic, {4}, {-1, -0.5, 0, 0.5},
{0.26894142136999512074f, 0.37754066879814543536f,
0.5, 0.62245933120185456464f});
test<ElementType::kF32>(Logistic, {4}, {-1, -0.5, 0, 0.5},
{0.26894142136999512074f, 0.37754066879814543536f,
0.5, 0.62245933120185456464f});
}
TEST(ElementwiseUnary, LogisticQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Logistic, {4}, {.scale = 1e-1, .zero_point = 0}, {-1, -0.5, 0, 0.5},
{0.26894142136999512074f, 0.37754066879814543536f, 0.5,
0.62245933120185456464f});
test<ElementType::kSI8, ElementType::kF16>(
Logistic, {4}, {.scale = 1e-1, .zero_point = 0}, {-1, -0.5, 0, 0.5},
{0.26894142136999512074f, 0.37754066879814543536f, 0.5,
0.62245933120185456464f});
test<ElementType::kSI8, ElementType::kF32>(
Logistic, {4}, {.scale = 1e-1, .zero_point = 0}, {-1, -0.5, 0, 0.5},
{0.26894142136999512074f, 0.37754066879814543536f, 0.5,
0.62245933120185456464f});
test<ElementType::kSI16, ElementType::kF32>(
Logistic, {4}, {.scale = 1e-3, .zero_point = 0}, {-1, -0.5, 0, 0.5},
{0.26894142136999512074f, 0.37754066879814543536f, 0.5,
0.62245933120185456464f});
}
TEST(ElementwiseUnary, Negate) {
test<ElementType::kSI8>(Negate, {5}, {0, 1, -2, 3, -4}, {0, -1, 2, -3, 4});
test<ElementType::kSI16>(Negate, {5}, {0, 1, -2, 3, -4}, {0, -1, 2, -3, 4});
test<ElementType::kSI32>(Negate, {5}, {0, 1, -2, 3, -4}, {0, -1, 2, -3, 4});
test<ElementType::kBF16>(Negate, {5}, {0, 1, -2, 3, -4}, {0, -1, 2, -3, 4});
test<ElementType::kF16>(Negate, {5}, {0, 1, -2, 3, -4}, {0, -1, 2, -3, 4});
test<ElementType::kF32>(Negate, {5}, {0, 1, -2, 3, -4}, {0, -1, 2, -3, 4});
}
TEST(ElementwiseBinary, NegateQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Negate, {5}, {.scale = 1, .zero_point = 0}, {0, 1, -2, 3, -4},
{0, -1, 2, -3, 4});
test<ElementType::kSI8, ElementType::kF16>(
Negate, {5}, {.scale = 1e-1, .zero_point = 1}, {0, 1, -2, 3, -4},
{0, -1, 2, -3, 4});
test<ElementType::kSI8, ElementType::kF32>(
Negate, {5}, {.scale = 1e-1, .zero_point = -1}, {0, 1, -2, 3, -4},
{0, -1, 2, -3, 4});
test<ElementType::kSI16, ElementType::kF32>(
Negate, {5}, {.scale = 1e-3, .zero_point = -1}, {0, 1, -2, 3, -4},
{0, -1, 2, -3, 4});
}
TEST(ElementwiseUnary, Not) {
test<ElementType::kI1>(Not, {2}, {0, 1}, {1, 0});
test<ElementType::kSI8>(Not, {5}, {-2, -1, 0, 1, 2},
{1, 0, int8_t(0xFF), int8_t(0xFE), int8_t(0xFD)});
test<ElementType::kSI16>(
Not, {5}, {-2, -1, 0, 1, 2},
{1, 0, int16_t(0xFFFF), int16_t(0xFFFE), int16_t(0xFFFD)});
test<ElementType::kSI32>(
Not, {5}, {-2, -1, 0, 1, 2},
{1, 0, int32_t(0xFFFFFFFFU), int32_t(0xFFFFFFFEU), int32_t(0xFFFFFFFDU)});
}
TEST(ElementwiseUnary, Popcnt) {
test<ElementType::kSI8>(Popcnt, {4}, {0, 1, 2, 127}, {0, 1, 1, 7});
test<ElementType::kSI16>(Popcnt, {4}, {0, 1, 2, 127}, {0, 1, 1, 7});
test<ElementType::kSI32>(Popcnt, {4}, {0, 1, 2, 127}, {0, 1, 1, 7});
}
TEST(ElementwiseUnary, RoundNearestAfz) {
test<ElementType::kBF16>(RoundNearestAfz, {5}, {-2.5, 0.4, 0.5, 0.6, 2.5},
{-3.0, 0.0, 1.0, 1.0, 3.0});
test<ElementType::kF16>(RoundNearestAfz, {5}, {-2.5, 0.4, 0.5, 0.6, 2.5},
{-3.0, 0.0, 1.0, 1.0, 3.0});
test<ElementType::kF32>(RoundNearestAfz, {5}, {-2.5, 0.4, 0.5, 0.6, 2.5},
{-3.0, 0.0, 1.0, 1.0, 3.0});
}
TEST(ElementwiseBinary, RoundNearestAfzQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
RoundNearestAfz, {5}, {.scale = 1e-1, .zero_point = 0},
{-2.5, 0.4, 0.5, 0.6, 2.5}, {-3.0, 0.0, 1.0, 1.0, 3.0});
test<ElementType::kSI8, ElementType::kF16>(
RoundNearestAfz, {5}, {.scale = 1e-1, .zero_point = 0},
{-2.5, 0.4, 0.5, 0.6, 2.5}, {-3.0, 0.0, 1.0, 1.0, 3.0});
test<ElementType::kSI8, ElementType::kF32>(
RoundNearestAfz, {5}, {.scale = 1e-1, .zero_point = 0},
{-2.5, 0.4, 0.5, 0.6, 2.5}, {-3.0, 0.0, 1.0, 1.0, 3.0});
test<ElementType::kSI16, ElementType::kF32>(
RoundNearestAfz, {5}, {.scale = 1e-2, .zero_point = 0},
{-2.5, 0.4, 0.5, 0.6, 2.5}, {-3.0, 0.0, 1.0, 1.0, 3.0});
}
TEST(ElementwiseUnary, RoundNearestEven) {
test<ElementType::kBF16>(RoundNearestEven, {5}, {-2.5, 0.4, 0.5, 0.6, 2.5},
{-2.0, 0.0, 0.0, 1.0, 2.0});
test<ElementType::kF16>(RoundNearestEven, {5}, {-2.5, 0.4, 0.5, 0.6, 2.5},
{-2.0, 0.0, 0.0, 1.0, 2.0});
test<ElementType::kF32>(RoundNearestEven, {5}, {-2.5, 0.4, 0.5, 0.6, 2.5},
{-2.0, 0.0, 0.0, 1.0, 2.0});
}
TEST(ElementwiseBinary, RoundNearestEvenQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
RoundNearestEven, {5}, {.scale = 1e-1, .zero_point = 0},
{-2.5, 0.4, 0.5, 0.6, 2.5}, {-2.0, 0.0, 0.0, 1.0, 2.0});
test<ElementType::kSI8, ElementType::kF16>(
RoundNearestEven, {5}, {.scale = 1e-1, .zero_point = 0},
{-2.5, 0.4, 0.5, 0.6, 2.5}, {-2.0, 0.0, 0.0, 1.0, 2.0});
test<ElementType::kSI8, ElementType::kF32>(
RoundNearestEven, {5}, {.scale = 1e-1, .zero_point = 0},
{-2.5, 0.4, 0.5, 0.6, 2.5}, {-2.0, 0.0, 0.0, 1.0, 2.0});
test<ElementType::kSI16, ElementType::kF32>(
RoundNearestEven, {5}, {.scale = 1e-2, .zero_point = 0},
{-2.5, 0.4, 0.5, 0.6, 2.5}, {-2.0, 0.0, 0.0, 1.0, 2.0});
}
TEST(ElementwiseUnary, Rsqrt) {
test<ElementType::kBF16>(Rsqrt, {4}, {1.0, 4.0, 9.0, 25.0},
{1.0, 1.0 / 2.0, 1.0 / 3.0, 1.0 / 5.0});
test<ElementType::kF16>(Rsqrt, {4}, {1.0, 4.0, 9.0, 25.0},
{1.0, 1.0 / 2.0, 1.0 / 3.0, 1.0 / 5.0});
test<ElementType::kF32>(Rsqrt, {4}, {1.0, 4.0, 9.0, 25.0},
{1.0, 1.0 / 2.0, 1.0 / 3.0, 1.0 / 5.0});
}
TEST(ElementwiseUnary, RsqrtQuantized) {
test<ElementType::kSI16, ElementType::kF32>(
Rsqrt, {4}, {.scale = 1e-3, .zero_point = 0}, {1.0, 4.0, 9.0, 25.0},
{1.0, 1.0 / 2.0, 1.0 / 3.0, 1.0 / 5.0});
}
TEST(ElementwiseUnary, Sign) {
test<ElementType::kSI8>(Sign, {3}, {-2, 0, 2}, {-1, 0, 1});
test<ElementType::kSI16>(Sign, {3}, {-2, 0, 2}, {-1, 0, 1});
test<ElementType::kSI32>(Sign, {3}, {-2, 0, 2}, {-1, 0, 1});
test<ElementType::kBF16>(
Sign, {8}, {+NAN, -NAN, +INFINITY, -INFINITY, -2.0, -0.0, +0.0, 2.0},
{NAN, NAN, 1, -1, -1, 0, 0, 1});
test<ElementType::kF16>(
Sign, {8}, {+NAN, -NAN, +INFINITY, -INFINITY, -2.0, -0.0, +0.0, 2.0},
{NAN, NAN, 1, -1, -1, 0, 0, 1});
test<ElementType::kF32>(
Sign, {8}, {+NAN, -NAN, +INFINITY, -INFINITY, -2.0, -0.0, +0.0, 2.0},
{NAN, NAN, 1, -1, -1, 0, 0, 1});
}
TEST(ElementwiseUnary, SignQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Sign, {4}, {.scale = 1e-1, .zero_point = 0}, {-2.0, -0.0, +0.0, 2.0},
{-1, 0, 0, 1});
test<ElementType::kSI8, ElementType::kF16>(
Sign, {4}, {.scale = 1e-1, .zero_point = 0}, {-2.0, -0.0, +0.0, 2.0},
{-1, 0, 0, 1});
test<ElementType::kSI8, ElementType::kF32>(
Sign, {4}, {.scale = 1e-1, .zero_point = 0}, {-2.0, -0.0, +0.0, 2.0},
{-1, 0, 0, 1});
test<ElementType::kSI16, ElementType::kF32>(
Sign, {4}, {.scale = 1e-2, .zero_point = 0}, {-2.0, -0.0, +0.0, 2.0},
{-1, 0, 0, 1});
}
TEST(ElementwiseUnary, Sine) {
test<ElementType::kBF16>(Sine, {5}, {0, M_PI_2, M_PI, 3 * M_PI_2, 2 * M_PI},
{0, 1, 0, -1, 0});
test<ElementType::kF16>(Sine, {5}, {0, M_PI_2, M_PI, 3 * M_PI_2, 2 * M_PI},
{0, 1, 0, -1, 0});
test<ElementType::kF32>(Sine, {5}, {0, M_PI_2, M_PI, 3 * M_PI_2, 2 * M_PI},
{0, 1, 0, -1, 0});
}
TEST(ElementwiseUnary, SineQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Sine, {5}, {.scale = 1e-1, .zero_point = 0},
{0, M_PI_2, M_PI, 3 * M_PI_2, 2 * M_PI}, {0, 1, 0, -1, 0});
test<ElementType::kSI8, ElementType::kF16>(
Sine, {5}, {.scale = 1e-1, .zero_point = 0},
{0, M_PI_2, M_PI, 3 * M_PI_2, 2 * M_PI}, {0, 1, 0, -1, 0});
test<ElementType::kSI8, ElementType::kF32>(
Sine, {5}, {.scale = 1e-1, .zero_point = 0},
{0, M_PI_2, M_PI, 3 * M_PI_2, 2 * M_PI}, {0, 1, 0, -1, 0});
test<ElementType::kSI16, ElementType::kF32>(
Sine, {5}, {.scale = 1e-2, .zero_point = 0},
{0, M_PI_2, M_PI, 3 * M_PI_2, 2 * M_PI}, {0, 1, 0, -1, 0});
}
TEST(ElementwiseUnary, Sqrt) {
test<ElementType::kBF16>(Sqrt, {4}, {0, 1, 4, 9}, {0, 1, 2, 3});
test<ElementType::kF16>(Sqrt, {4}, {0, 1, 4, 9}, {0, 1, 2, 3});
test<ElementType::kF32>(Sqrt, {4}, {0, 1, 4, 9}, {0, 1, 2, 3});
}
TEST(ElementwiseUnary, SqrtQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Sqrt, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 1, 4, 9}, {0, 1, 2, 3});
test<ElementType::kSI8, ElementType::kF16>(
Sqrt, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 1, 4, 9}, {0, 1, 2, 3});
test<ElementType::kSI8, ElementType::kF32>(
Sqrt, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 1, 4, 9}, {0, 1, 2, 3});
test<ElementType::kSI16, ElementType::kF32>(
Sqrt, {4}, {.scale = 1e-2, .zero_point = 0}, {0, 1, 4, 9}, {0, 1, 2, 3});
}
TEST(ElementwiseUnary, Tanh) {
test<ElementType::kBF16>(Tanh, {3}, {-1, 0, 1},
{-0.76159416, 0.0, 0.76159416});
test<ElementType::kF16>(Tanh, {3}, {-1, 0, 1},
{-0.76159416, 0.0, 0.76159416});
test<ElementType::kF32>(Tanh, {3}, {-1, 0, 1},
{-0.76159416, 0.0, 0.76159416});
}
TEST(ElementwiseUnary, TanhQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Tanh, {3}, {.scale = 1e-1, .zero_point = 0}, {-1, 0, 1},
{-0.76159416, 0.0, 0.76159416});
test<ElementType::kSI8, ElementType::kF16>(
Tanh, {3}, {.scale = 1e-1, .zero_point = 0}, {-1, 0, 1},
{-0.76159416, 0.0, 0.76159416});
test<ElementType::kSI8, ElementType::kF32>(
Tanh, {3}, {.scale = 1e-1, .zero_point = 0}, {-1, 0, 1},
{-0.76159416, 0.0, 0.76159416});
test<ElementType::kSI16, ElementType::kF32>(
Tanh, {3}, {.scale = 1e-2, .zero_point = 0}, {-1, 0, 1},
{-0.76159416, 0.0, 0.76159416});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/elementwise_unary.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/test/elementwise_unary_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b72f667a-24cd-4daf-9d39-213b23b9b095 | cpp | abseil/abseil-cpp | pool_urbg | absl/random/internal/pool_urbg.cc | absl/random/internal/pool_urbg_test.cc | #include "absl/random/internal/pool_urbg.h"
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <cstring>
#include <iterator>
#include "absl/base/attributes.h"
#include "absl/base/call_once.h"
#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
#include "absl/base/internal/sysinfo.h"
#include "absl/base/internal/unaligned_access.h"
#include "absl/base/optimization.h"
#include "absl/random/internal/randen.h"
#include "absl/random/internal/seed_material.h"
#include "absl/random/seed_gen_exception.h"
using absl::base_internal::SpinLock;
using absl::base_internal::SpinLockHolder;
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
namespace {
class RandenPoolEntry {
public:
static constexpr size_t kState = RandenTraits::kStateBytes / sizeof(uint32_t);
static constexpr size_t kCapacity =
RandenTraits::kCapacityBytes / sizeof(uint32_t);
void Init(absl::Span<const uint32_t> data) {
SpinLockHolder l(&mu_);
std::copy(data.begin(), data.end(), std::begin(state_));
next_ = kState;
}
void Fill(uint8_t* out, size_t bytes) ABSL_LOCKS_EXCLUDED(mu_);
template <typename T>
inline T Generate() ABSL_LOCKS_EXCLUDED(mu_);
inline void MaybeRefill() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (next_ >= kState) {
next_ = kCapacity;
impl_.Generate(state_);
}
}
private:
uint32_t state_[kState] ABSL_GUARDED_BY(mu_);
SpinLock mu_;
const Randen impl_;
size_t next_ ABSL_GUARDED_BY(mu_);
};
template <>
inline uint8_t RandenPoolEntry::Generate<uint8_t>() {
SpinLockHolder l(&mu_);
MaybeRefill();
return static_cast<uint8_t>(state_[next_++]);
}
template <>
inline uint16_t RandenPoolEntry::Generate<uint16_t>() {
SpinLockHolder l(&mu_);
MaybeRefill();
return static_cast<uint16_t>(state_[next_++]);
}
template <>
inline uint32_t RandenPoolEntry::Generate<uint32_t>() {
SpinLockHolder l(&mu_);
MaybeRefill();
return state_[next_++];
}
template <>
inline uint64_t RandenPoolEntry::Generate<uint64_t>() {
SpinLockHolder l(&mu_);
if (next_ >= kState - 1) {
next_ = kCapacity;
impl_.Generate(state_);
}
auto p = state_ + next_;
next_ += 2;
uint64_t result;
std::memcpy(&result, p, sizeof(result));
return result;
}
void RandenPoolEntry::Fill(uint8_t* out, size_t bytes) {
SpinLockHolder l(&mu_);
while (bytes > 0) {
MaybeRefill();
size_t remaining = (kState - next_) * sizeof(state_[0]);
size_t to_copy = std::min(bytes, remaining);
std::memcpy(out, &state_[next_], to_copy);
out += to_copy;
bytes -= to_copy;
next_ += (to_copy + sizeof(state_[0]) - 1) / sizeof(state_[0]);
}
}
static constexpr size_t kPoolSize = 8;
static absl::once_flag pool_once;
ABSL_CACHELINE_ALIGNED static RandenPoolEntry* shared_pools[kPoolSize];
size_t GetPoolID() {
static_assert(kPoolSize >= 1,
"At least one urbg instance is required for PoolURBG");
ABSL_CONST_INIT static std::atomic<uint64_t> sequence{0};
#ifdef ABSL_HAVE_THREAD_LOCAL
static thread_local size_t my_pool_id = kPoolSize;
if (ABSL_PREDICT_FALSE(my_pool_id == kPoolSize)) {
my_pool_id = (sequence++ % kPoolSize);
}
return my_pool_id;
#else
static pthread_key_t tid_key = [] {
pthread_key_t tmp_key;
int err = pthread_key_create(&tmp_key, nullptr);
if (err) {
ABSL_RAW_LOG(FATAL, "pthread_key_create failed with %d", err);
}
return tmp_key;
}();
uintptr_t my_pool_id =
reinterpret_cast<uintptr_t>(pthread_getspecific(tid_key));
if (ABSL_PREDICT_FALSE(my_pool_id == 0)) {
my_pool_id = (sequence++ % kPoolSize) + 1;
int err = pthread_setspecific(tid_key, reinterpret_cast<void*>(my_pool_id));
if (err) {
ABSL_RAW_LOG(FATAL, "pthread_setspecific failed with %d", err);
}
}
return my_pool_id - 1;
#endif
}
RandenPoolEntry* PoolAlignedAlloc() {
constexpr size_t kAlignment =
ABSL_CACHELINE_SIZE > 32 ? ABSL_CACHELINE_SIZE : 32;
uintptr_t x = reinterpret_cast<uintptr_t>(
new char[sizeof(RandenPoolEntry) + kAlignment]);
auto y = x % kAlignment;
void* aligned = reinterpret_cast<void*>(y == 0 ? x : (x + kAlignment - y));
return new (aligned) RandenPoolEntry();
}
void InitPoolURBG() {
static constexpr size_t kSeedSize =
RandenTraits::kStateBytes / sizeof(uint32_t);
uint32_t seed_material[kPoolSize * kSeedSize];
if (!random_internal::ReadSeedMaterialFromOSEntropy(
absl::MakeSpan(seed_material))) {
random_internal::ThrowSeedGenException();
}
for (size_t i = 0; i < kPoolSize; i++) {
shared_pools[i] = PoolAlignedAlloc();
shared_pools[i]->Init(
absl::MakeSpan(&seed_material[i * kSeedSize], kSeedSize));
}
}
RandenPoolEntry* GetPoolForCurrentThread() {
absl::call_once(pool_once, InitPoolURBG);
return shared_pools[GetPoolID()];
}
}
template <typename T>
typename RandenPool<T>::result_type RandenPool<T>::Generate() {
auto* pool = GetPoolForCurrentThread();
return pool->Generate<T>();
}
template <typename T>
void RandenPool<T>::Fill(absl::Span<result_type> data) {
auto* pool = GetPoolForCurrentThread();
pool->Fill(reinterpret_cast<uint8_t*>(data.data()),
data.size() * sizeof(result_type));
}
template class RandenPool<uint8_t>;
template class RandenPool<uint16_t>;
template class RandenPool<uint32_t>;
template class RandenPool<uint64_t>;
}
ABSL_NAMESPACE_END
} | #include "absl/random/internal/pool_urbg.h"
#include <algorithm>
#include <bitset>
#include <cmath>
#include <cstdint>
#include <iterator>
#include "gtest/gtest.h"
#include "absl/meta/type_traits.h"
#include "absl/types/span.h"
using absl::random_internal::PoolURBG;
using absl::random_internal::RandenPool;
namespace {
template <typename T>
using is_randen_pool = typename absl::disjunction<
std::is_same<T, RandenPool<uint8_t>>,
std::is_same<T, RandenPool<uint16_t>>,
std::is_same<T, RandenPool<uint32_t>>,
std::is_same<T, RandenPool<uint64_t>>>;
template <typename T, typename V>
typename absl::enable_if_t<absl::negation<is_randen_pool<T>>::value, void>
MyFill(T& rng, absl::Span<V> data) {
std::generate(std::begin(data), std::end(data), rng);
}
template <typename T, typename V>
typename absl::enable_if_t<is_randen_pool<T>::value, void>
MyFill(T& rng, absl::Span<V> data) {
rng.Fill(data);
}
template <typename EngineType>
class PoolURBGTypedTest : public ::testing::Test {};
using EngineTypes = ::testing::Types<
RandenPool<uint8_t>,
RandenPool<uint16_t>,
RandenPool<uint32_t>,
RandenPool<uint64_t>,
PoolURBG<uint8_t, 2>,
PoolURBG<uint16_t, 2>,
PoolURBG<uint32_t, 2>,
PoolURBG<uint64_t, 2>,
PoolURBG<unsigned int, 8>,
PoolURBG<unsigned long, 8>,
PoolURBG<unsigned long int, 4>,
PoolURBG<unsigned long long, 4>>;
TYPED_TEST_SUITE(PoolURBGTypedTest, EngineTypes);
TYPED_TEST(PoolURBGTypedTest, URBGInterface) {
using E = TypeParam;
using T = typename E::result_type;
static_assert(std::is_copy_constructible<E>::value,
"engine must be copy constructible");
static_assert(absl::is_copy_assignable<E>::value,
"engine must be copy assignable");
E e;
const E x;
e();
static_assert(std::is_same<decltype(e()), T>::value,
"return type of operator() must be result_type");
E u0(x);
u0();
E u1 = e;
u1();
}
TYPED_TEST(PoolURBGTypedTest, VerifySequences) {
using E = TypeParam;
using result_type = typename E::result_type;
E rng;
(void)rng();
constexpr int kNumOutputs = 64;
result_type a[kNumOutputs];
result_type b[kNumOutputs];
std::fill(std::begin(b), std::end(b), 0);
{
E x = rng;
MyFill(x, absl::MakeSpan(a));
}
{
E x = rng;
std::generate(std::begin(b), std::end(b), x);
}
size_t changed_bits = 0;
size_t unchanged_bits = 0;
size_t total_set = 0;
size_t total_bits = 0;
size_t equal_count = 0;
for (size_t i = 0; i < kNumOutputs; ++i) {
equal_count += (a[i] == b[i]) ? 1 : 0;
std::bitset<sizeof(result_type) * 8> bitset(a[i] ^ b[i]);
changed_bits += bitset.count();
unchanged_bits += bitset.size() - bitset.count();
std::bitset<sizeof(result_type) * 8> a_set(a[i]);
std::bitset<sizeof(result_type) * 8> b_set(b[i]);
total_set += a_set.count() + b_set.count();
total_bits += 2 * 8 * sizeof(result_type);
}
EXPECT_LE(changed_bits, 0.60 * (changed_bits + unchanged_bits));
EXPECT_GE(changed_bits, 0.40 * (changed_bits + unchanged_bits));
EXPECT_NEAR(total_set, total_bits * 0.5, 4 * std::sqrt(total_bits))
<< "@" << total_set / static_cast<double>(total_bits);
const double kExpected = kNumOutputs / (1.0 * sizeof(result_type) * 8);
EXPECT_LE(equal_count, 1.0 + kExpected);
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/pool_urbg.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/pool_urbg_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
8a36c167-4407-4e5f-84ef-a0ee90099328 | cpp | tensorflow/tensorflow | pjrt_c_api_client | third_party/xla/xla/pjrt/pjrt_c_api_client.cc | third_party/xla/xla/pjrt/pjrt_c_api_client_test.cc | #include "xla/pjrt/pjrt_c_api_client.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "llvm/Support/ErrorHandling.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "xla/client/xla_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_helpers.h"
#include "xla/pjrt/c/pjrt_c_api_layouts_extension.h"
#include "xla/pjrt/c/pjrt_c_api_profiler_extension.h"
#include "xla/pjrt/c/pjrt_c_api_stream_extension.h"
#include "xla/pjrt/compile_options.pb.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/pjrt/mlir_to_hlo.h"
#include "xla/pjrt/pjrt_api.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/pjrt/pjrt_device_description.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tsl/framework/allocator.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/fingerprint.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
#define RETURN_FUTURE_IF_ERROR(expr, c_api) \
do { \
PJRT_Error* error = (expr); \
std::unique_ptr<PJRT_Error, pjrt::PJRT_ErrorDeleter> _error( \
error, pjrt::MakeErrorDeleter(c_api)); \
absl::Status _status = pjrt::PjrtErrorToStatus(_error.get(), c_api); \
if (!_status.ok()) { \
return PjRtFuture<>(_status); \
} \
} while (false)
static absl::StatusOr<const PjRtCApiTopologyDescription> InitClientTopoDesc(
const PJRT_Api* c_api, PJRT_Client* c_client) {
absl::StatusOr<PJRT_TopologyDescription*> c_topo =
pjrt::GetTopologyDescription(c_client, c_api);
TF_RETURN_IF_ERROR(c_topo.status());
return PjRtCApiTopologyDescription(c_api, *c_topo, false);
}
PjRtCApiClient::PjRtCApiClient(
const PJRT_Api* c_api, PJRT_Client* c_client,
std::unique_ptr<pjrt::PJRT_KeyValueCallbackData> kv_callback_data)
: c_api_(c_api),
c_client_(std::unique_ptr<PJRT_Client, ::pjrt::PJRT_ClientDeleter>(
c_client, ::pjrt::MakeClientDeleter(c_api))),
kv_callback_data_(std::move(kv_callback_data)),
topo_desc_(InitClientTopoDesc(c_api, c_client)),
platform_version_(absl::StrCat(
"PJRT C API\n", ::pjrt::GetPlatformVersion(c_client, c_api))),
platform_name_(::pjrt::GetPlatformName(c_client, c_api)),
platform_id_(tsl::Fingerprint64(platform_name_)) {
InitDevicesAndMemorySpaces();
InitAttributes();
LOG(INFO) << "PjRtCApiClient created.";
}
void PjRtCApiClient::InitDevicesAndMemorySpaces() {
PJRT_Client_Devices_Args devices_args;
devices_args.struct_size = PJRT_Client_Devices_Args_STRUCT_SIZE;
devices_args.extension_start = nullptr;
devices_args.client = c_client_.get();
pjrt::LogFatalIfPjrtError(c_api_->PJRT_Client_Devices(&devices_args), c_api_);
const size_t num_devices = devices_args.num_devices;
c_to_cpp_device_map_.reserve(num_devices);
owned_devices_.reserve(num_devices);
devices_.reserve(num_devices);
for (int i = 0; i < num_devices; ++i) {
PJRT_Device* device = devices_args.devices[i];
std::unique_ptr<PjRtCApiDevice>& cpp_device = owned_devices_.emplace_back(
std::make_unique<PjRtCApiDevice>(device, this));
devices_.push_back(cpp_device.get());
c_to_cpp_device_map_[device] = cpp_device.get();
}
PJRT_Client_AddressableDevices_Args address_args;
address_args.struct_size = PJRT_Client_AddressableDevices_Args_STRUCT_SIZE;
address_args.extension_start = nullptr;
address_args.client = c_client_.get();
pjrt::LogFatalIfPjrtError(
c_api_->PJRT_Client_AddressableDevices(&address_args), c_api_);
const size_t num_addressable_devices = address_args.num_addressable_devices;
addressable_devices_.reserve(num_addressable_devices);
for (int i = 0; i < num_addressable_devices; ++i) {
PJRT_Device* c_device = address_args.addressable_devices[i];
addressable_devices_.push_back(GetCppDevice(c_device));
}
PJRT_Client_AddressableMemories_Args memory_args;
memory_args.struct_size = PJRT_Client_AddressableMemories_Args_STRUCT_SIZE;
memory_args.extension_start = nullptr;
memory_args.client = c_client_.get();
std::unique_ptr<PJRT_Error, pjrt::PJRT_ErrorDeleter> client_error(
c_api_->PJRT_Client_AddressableMemories(&memory_args),
pjrt::MakeErrorDeleter(c_api_));
if (client_error == nullptr) {
const size_t num_memories = memory_args.num_addressable_memories;
c_to_cpp_memory_map_.reserve(num_memories);
owned_memory_spaces_.reserve(num_memories);
addressable_memory_spaces_.reserve(num_memories);
for (int i = 0; i < num_memories; ++i) {
PJRT_Memory* memory = memory_args.addressable_memories[i];
std::unique_ptr<PjRtCApiMemorySpace>& cpp_memory =
owned_memory_spaces_.emplace_back(
std::make_unique<PjRtCApiMemorySpace>(memory, this));
addressable_memory_spaces_.push_back(cpp_memory.get());
c_to_cpp_memory_map_[memory] = cpp_memory.get();
}
} else if (pjrt::GetErrorCode(client_error.get(), c_api_) !=
PJRT_Error_Code_UNIMPLEMENTED) {
pjrt::LogFatalIfPjrtError(client_error.get(), c_api_);
}
for (const auto& device : addressable_devices_) {
PjRtCApiDevice* cpp_device = tensorflow::down_cast<PjRtCApiDevice*>(device);
PJRT_Device* c_device = cpp_device->c_device();
PJRT_Device_AddressableMemories_Args args;
args.struct_size = PJRT_Device_AddressableMemories_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device = c_device;
std::unique_ptr<PJRT_Error, pjrt::PJRT_ErrorDeleter> device_error(
c_api_->PJRT_Device_AddressableMemories(&args),
pjrt::MakeErrorDeleter(c_api_));
if (device_error != nullptr) {
if (pjrt::GetErrorCode(device_error.get(), c_api_) !=
PJRT_Error_Code_UNIMPLEMENTED) {
pjrt::LogFatalIfPjrtError(device_error.get(), c_api_);
}
break;
}
const size_t num_memories = args.num_memories;
cpp_device->memory_spaces_.reserve(num_memories);
for (int i = 0; i < num_memories; ++i) {
cpp_device->memory_spaces_.push_back(GetCppMemory(args.memories[i]));
}
}
for (const auto& memory : addressable_memory_spaces_) {
PjRtCApiMemorySpace* cpp_memory =
tensorflow::down_cast<PjRtCApiMemorySpace*>(memory);
PJRT_Memory* c_memory = cpp_memory->c_memory();
PJRT_Memory_AddressableByDevices_Args args;
args.struct_size = PJRT_Memory_AddressableByDevices_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.memory = c_memory;
pjrt::LogFatalIfPjrtError(c_api_->PJRT_Memory_AddressableByDevices(&args),
c_api_);
const size_t num_attached_devices = args.num_devices;
cpp_memory->devices_.reserve(num_attached_devices);
for (int i = 0; i < num_attached_devices; ++i) {
cpp_memory->devices_.push_back(GetCppDevice(args.devices[i]));
}
}
}
void PjRtCApiClient::InitAttributes() {
PJRT_Plugin_Attributes_Args args;
args.struct_size = PJRT_Plugin_Attributes_Args_STRUCT_SIZE;
args.extension_start = nullptr;
pjrt::LogFatalIfPjrtError(c_api_->PJRT_Plugin_Attributes(&args), c_api_);
attributes_ =
pjrt::ConvertFromPjRtNamedValueList(args.attributes, args.num_attributes);
}
int PjRtCApiClient::device_count() const { return devices_.size(); }
int PjRtCApiClient::addressable_device_count() const {
return addressable_devices_.size();
}
absl::Span<PjRtDevice* const> PjRtCApiClient::devices() const {
return devices_;
}
absl::Span<PjRtDevice* const> PjRtCApiClient::addressable_devices() const {
return addressable_devices_;
}
int PjRtCApiClient::process_index() const {
PJRT_Client_ProcessIndex_Args process_index_args;
process_index_args.struct_size = PJRT_Client_ProcessIndex_Args_STRUCT_SIZE;
process_index_args.extension_start = nullptr;
process_index_args.client = c_client_.get();
pjrt::LogFatalIfPjrtError(
c_api_->PJRT_Client_ProcessIndex(&process_index_args), c_api_);
return process_index_args.process_index;
}
absl::string_view PjRtCApiClient::platform_version() const {
return platform_version_;
}
std::optional<PjRtPluginAttributes> PjRtCApiClient::plugin_attributes() const {
return PjRtPluginAttributes{c_api_->pjrt_api_version.major_version,
c_api_->pjrt_api_version.minor_version,
attributes_};
}
static DeviceAssignment CalculateDefaultAssignment(
int num_replicas, int num_partitions,
absl::Span<const int> device_assignment) {
DeviceAssignment cpp_device_assignment(num_replicas, num_partitions);
const int* iterator = device_assignment.begin();
for (int replica = 0; replica < num_replicas; ++replica) {
for (int partition = 0; partition < num_partitions; ++partition) {
cpp_device_assignment(replica, partition) = *(iterator++);
}
}
return cpp_device_assignment;
}
absl::StatusOr<DeviceAssignment> PjRtCApiClient::GetDefaultDeviceAssignment(
int num_replicas, int num_partitions) const {
PJRT_Client_DefaultDeviceAssignment_Args args;
args.struct_size = PJRT_Client_DefaultDeviceAssignment_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.client = c_client_.get();
args.num_replicas = num_replicas;
args.num_partitions = num_partitions;
std::vector<int> assignment_buffer(num_replicas * num_partitions);
args.default_assignment_size = assignment_buffer.size();
args.default_assignment = assignment_buffer.data();
RETURN_STATUS_IF_PJRT_ERROR(
c_api_->PJRT_Client_DefaultDeviceAssignment(&args), c_api_);
absl::Span<const int> param{args.default_assignment,
args.default_assignment_size};
return CalculateDefaultAssignment(args.num_replicas, args.num_partitions,
param);
}
absl::StatusOr<PjRtDevice*> PjRtCApiClient::LookupDevice(
PjRtGlobalDeviceId global_device_id) const {
PJRT_Client_LookupDevice_Args args;
args.struct_size = PJRT_Client_LookupDevice_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.client = c_client_.get();
args.id = global_device_id.value();
RETURN_STATUS_IF_PJRT_ERROR(c_api_->PJRT_Client_LookupDevice(&args), c_api_);
return GetCppDevice(args.device);
}
absl::StatusOr<PjRtDevice*> PjRtCApiClient::LookupAddressableDevice(
PjRtLocalDeviceId local_device_id) const {
PJRT_Client_LookupAddressableDevice_Args args;
args.struct_size = PJRT_Client_LookupAddressableDevice_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.client = c_client_.get();
args.local_hardware_id = local_device_id.value();
RETURN_STATUS_IF_PJRT_ERROR(
c_api_->PJRT_Client_LookupAddressableDevice(&args), c_api_);
return GetCppDevice(args.addressable_device);
}
absl::Span<PjRtMemorySpace* const> PjRtCApiClient::memory_spaces() const {
return addressable_memory_spaces_;
}
static absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
InitializeArgsAndCompile(PjRtCApiClient* api_client, const PJRT_Api* c_api,
PJRT_Client* client, const CompileOptions& options,
const std::string& code, const std::string& format) {
PJRT_Client_Compile_Args args;
args.struct_size = PJRT_Client_Compile_Args_STRUCT_SIZE;
PJRT_Profiler_Extension profiler_extension =
pjrt::CreatePjrtProfilerExtension("PJRT_Client_Compile linkage");
args.extension_start =
reinterpret_cast<PJRT_Extension_Base*>(&profiler_extension);
args.client = client;
TF_ASSIGN_OR_RETURN(const CompileOptionsProto options_proto,
options.ToProto());
std::string options_str = options_proto.SerializeAsString();
args.compile_options = options_str.c_str();
args.compile_options_size = options_str.size();
PJRT_Program program;
program.struct_size = PJRT_Program_STRUCT_SIZE;
program.extension_start = nullptr;
program.code = const_cast<char*>(code.c_str());
program.code_size = code.size();
program.format = format.c_str();
program.format_size = format.size();
args.program = &program;
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Client_Compile(&args), c_api);
std::unique_ptr<PjRtLoadedExecutable> ret =
std::make_unique<PjRtCApiLoadedExecutable>(api_client, args.executable);
return ret;
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>> PjRtCApiClient::Compile(
const XlaComputation& computation, CompileOptions options) {
std::string module_str = computation.proto().SerializeAsString();
std::string format(pjrt::kHloFormat);
return InitializeArgsAndCompile(this, c_api_, c_client_.get(), options,
module_str, format);
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>> PjRtCApiClient::Compile(
mlir::ModuleOp module, CompileOptions options) {
if (!pjrt_c_api()) llvm::report_fatal_error("pjrt_c_api is null");
TF_ASSIGN_OR_RETURN(
std::string serialized,
xla::Serialize(module,
xla::GetDefaultStablehloVersion(
plugin_attributes()->pjrt_c_api_minor_version)));
std::string format(pjrt::kMlirFormat);
return InitializeArgsAndCompile(this, c_api_, c_client_.get(), options,
serialized, format);
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
PjRtCApiClient::DeserializeExecutable(absl::string_view serialized,
std::optional<CompileOptions> options) {
PJRT_Executable_DeserializeAndLoad_Args des_args;
des_args.struct_size = PJRT_Executable_DeserializeAndLoad_Args_STRUCT_SIZE;
des_args.extension_start = nullptr;
des_args.client = c_client_.get();
des_args.serialized_executable = serialized.data();
des_args.serialized_executable_size = serialized.length();
const PJRT_Api* api = pjrt_c_api();
RETURN_STATUS_IF_PJRT_ERROR(
api->PJRT_Executable_DeserializeAndLoad(&des_args), api);
PJRT_LoadedExecutable* c_exec = des_args.loaded_executable;
CHECK(c_exec != nullptr);
return std::unique_ptr<PjRtLoadedExecutable>(
std::make_unique<PjRtCApiLoadedExecutable>(this, c_exec));
}
absl::StatusOr<const PjRtTopologyDescription*>
PjRtCApiClient::GetTopologyDescription() const {
if (!topo_desc_.ok()) {
return topo_desc_.status();
}
return &(*topo_desc_);
}
absl::StatusOr<std::uintptr_t> PjRtCApiClient::UnsafeBufferPointer(
PjRtBuffer* buffer) {
if (buffer->client() != this) {
return InvalidArgument(
"buffer passed to PjRtCApiClient::UnsafeBufferPointer() is from a "
"different client than that of the function call. Buffer's client "
"platform: '%s', function call's client platform: '%s'.",
buffer->client()->platform_name(), this->platform_name());
}
PJRT_Buffer_UnsafePointer_Args args;
args.struct_size = PJRT_Buffer_UnsafePointer_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer =
tensorflow::down_cast<const PjRtCApiBuffer*>(buffer)->c_buffer();
RETURN_STATUS_IF_PJRT_ERROR(c_api_->PJRT_Buffer_UnsafePointer(&args), c_api_);
return args.buffer_pointer;
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtCApiClient::BufferFromHostBufferInternalImpl(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
absl::AnyInvocable<void() &&> on_done_with_host_buffer,
std::variant<PjRtDevice*, PjRtMemorySpace*> device_or_memory,
const Layout* device_layout) {
if (host_buffer_semantics != HostBufferSemantics::kImmutableOnlyDuringCall &&
host_buffer_semantics != HostBufferSemantics::kImmutableZeroCopy &&
host_buffer_semantics !=
HostBufferSemantics::kImmutableUntilTransferCompletes) {
return Unimplemented(
"PJRT C API does not support HostBufferSemantics other than "
"HostBufferSemantics::kImmutableOnlyDuringCall, "
"HostBufferSemantics::kImmutableZeroCopy and "
"HostBufferSemantics::kImmutableUntilTransferCompletes.");
}
PJRT_Client_BufferFromHostBuffer_Args args;
args.struct_size = PJRT_Client_BufferFromHostBuffer_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.client = c_client_.get();
args.data = data;
args.type = ::pjrt::ConvertToPjRtBufferType(type);
args.dims = dims.data();
args.num_dims = dims.size();
if (byte_strides.has_value()) {
args.byte_strides = byte_strides.value().data();
args.num_byte_strides = byte_strides.value().size();
} else {
args.byte_strides = nullptr;
args.num_byte_strides = 0;
}
pjrt::BufferMemoryLayoutData c_layout_data;
if (device_layout != nullptr) {
TF_ASSIGN_OR_RETURN(c_layout_data,
pjrt::ConvertToBufferMemoryLayoutData(*device_layout));
args.device_layout = &c_layout_data.c_layout;
} else {
args.device_layout = nullptr;
}
args.host_buffer_semantics =
::pjrt::ConvertToPjRtHostBufferSemantics(host_buffer_semantics);
if (std::holds_alternative<PjRtDevice*>(device_or_memory)) {
args.device = tensorflow::down_cast<PjRtCApiDevice*>(
std::get<PjRtDevice*>(device_or_memory))
->c_device();
args.memory = nullptr;
} else {
CHECK(std::holds_alternative<PjRtMemorySpace*>(device_or_memory));
args.device = nullptr;
args.memory = tensorflow::down_cast<PjRtCApiMemorySpace*>(
std::get<PjRtMemorySpace*>(device_or_memory))
->c_memory();
}
RETURN_STATUS_IF_PJRT_ERROR(c_api_->PJRT_Client_BufferFromHostBuffer(&args),
c_api_);
auto buffer = std::unique_ptr<PjRtBuffer>(
std::make_unique<PjRtCApiBuffer>(this, args.buffer));
std::unique_ptr<PJRT_Event, ::pjrt::PJRT_EventDeleter> event(
args.done_with_host_buffer, ::pjrt::MakeEventDeleter(c_api_));
if (on_done_with_host_buffer) {
PJRT_Event_OnReady_Args event_args;
event_args.struct_size = PJRT_Event_OnReady_Args_STRUCT_SIZE;
event_args.extension_start = nullptr;
event_args.event = event.get();
event_args.user_arg = new absl::AnyInvocable<void(PJRT_Error*)>(
[on_done_with_host_buffer = std::move(on_done_with_host_buffer),
c_api = c_api_](PJRT_Error* error) mutable {
if (error) {
::pjrt::MakeErrorDeleter(c_api)(error);
}
std::move(on_done_with_host_buffer)();
});
event_args.callback = [](PJRT_Error* error, void* args) {
auto* on_done_with_host_buffer =
reinterpret_cast<absl::AnyInvocable<void(PJRT_Error*)>*>(args);
(*on_done_with_host_buffer)(error);
delete on_done_with_host_buffer;
};
RETURN_STATUS_IF_PJRT_ERROR(c_api_->PJRT_Event_OnReady(&event_args),
c_api_);
}
return buffer;
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtCApiClient::BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
absl::AnyInvocable<void() &&> on_done_with_host_buffer,
PjRtMemorySpace* memory_space, const Layout* device_layout) {
return BufferFromHostBufferInternalImpl(
data, type, dims, byte_strides, host_buffer_semantics,
std::move(on_done_with_host_buffer), memory_space, device_layout);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtCApiClient::BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
absl::AnyInvocable<void() &&> on_done_with_host_buffer, PjRtDevice* device,
const Layout* device_layout) {
return BufferFromHostBufferInternalImpl(
data, type, dims, byte_strides, host_buffer_semantics,
std::move(on_done_with_host_buffer), device, device_layout);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtCApiClient::BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
absl::AnyInvocable<void() &&> on_done_with_host_buffer,
PjRtDevice* device) {
return BufferFromHostBufferInternalImpl(
data, type, dims, byte_strides, host_buffer_semantics,
std::move(on_done_with_host_buffer), device, nullptr);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtCApiClient::CreateViewOfDeviceBuffer(
void* device_ptr, const Shape& shape, PjRtDevice* device,
std::function<void()> on_delete_callback,
std::optional<std::intptr_t> stream) {
PJRT_Client_CreateViewOfDeviceBuffer_Args args;
args.struct_size = PJRT_Client_CreateViewOfDeviceBuffer_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.client = c_client_.get();
args.device_buffer_ptr = device_ptr;
args.dims = shape.dimensions().data();
args.num_dims = shape.dimensions().size();
args.element_type = pjrt::ConvertToPjRtBufferType(shape.element_type());
pjrt::BufferMemoryLayoutData c_layout_data;
if (shape.has_layout()) {
TF_ASSIGN_OR_RETURN(c_layout_data,
pjrt::ConvertToBufferMemoryLayoutData(shape.layout()));
args.layout = &(c_layout_data.c_layout);
} else {
args.layout = nullptr;
}
if (on_delete_callback != nullptr) {
args.on_delete_callback_arg =
new std::function(std::move(on_delete_callback));
args.on_delete_callback = [](void* device_buffer_ptr, void* user_arg) {
auto* c_func = reinterpret_cast<std::function<void()>*>(user_arg);
(*c_func)();
delete c_func;
};
} else {
args.on_delete_callback = nullptr;
args.on_delete_callback_arg = nullptr;
}
args.device = tensorflow::down_cast<PjRtCApiDevice*>(device)->c_device();
if (stream.has_value()) {
args.stream = *stream;
} else {
args.stream = reinterpret_cast<intptr_t>(nullptr);
}
const PJRT_Api* c_api = pjrt_c_api();
RETURN_STATUS_IF_PJRT_ERROR(
c_api->PJRT_Client_CreateViewOfDeviceBuffer(&args), c_api);
return std::unique_ptr<PjRtBuffer>(
std::make_unique<PjRtCApiBuffer>(this, args.buffer));
}
absl::StatusOr<Layout> PjRtCApiClient::GetDefaultLayout(
PrimitiveType element_type, absl::Span<const int64_t> dims) {
const PJRT_Api* c_api = pjrt_c_api();
PJRT_Layouts_Extension* extension =
pjrt::FindExtension<PJRT_Layouts_Extension>(
c_api, PJRT_Extension_Type::PJRT_Extension_Type_Layouts);
if (extension == nullptr) {
return LayoutUtil::MakeDescendingLayout(dims.size());
}
PJRT_Layouts_PJRT_Client_GetDefaultLayout_Args args;
args.struct_size = PJRT_Layouts_PJRT_Client_GetDefaultLayout_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.client = c_client_.get();
args.type = pjrt::ConvertToPjRtBufferType(element_type);
args.dims = dims.data();
args.num_dims = dims.size();
RETURN_STATUS_IF_PJRT_ERROR(
extension->PJRT_Layouts_PJRT_Client_GetDefaultLayout(&args), c_api);
std::unique_ptr<PJRT_Layouts_MemoryLayout,
pjrt::PJRT_Layouts_MemoryLayoutDeleter>
layout_destroyer(args.layout, pjrt::MakeMemoryLayoutDeleter(c_api));
PJRT_Layouts_MemoryLayout_Serialize_Args serialize_args;
serialize_args.struct_size =
PJRT_Layouts_MemoryLayout_Serialize_Args_STRUCT_SIZE;
serialize_args.extension_start = nullptr;
serialize_args.layout = args.layout;
RETURN_STATUS_IF_PJRT_ERROR(
extension->PJRT_Layouts_MemoryLayout_Serialize(&serialize_args), c_api);
absl::Cleanup cleanup = [&serialize_args] {
serialize_args.serialized_layout_deleter(serialize_args.serialized_layout);
};
std::string serialized_layout(serialize_args.serialized_bytes,
serialize_args.serialized_bytes_size);
TF_ASSIGN_OR_RETURN(PjRtXlaLayout pjrt_xla_layout,
PjRtXlaLayout::Deserialize(serialized_layout));
return pjrt_xla_layout.xla_layout();
}
const PJRT_Api* PjRtCApiClient::pjrt_c_api() const { return c_api_; }
PjRtCApiDeviceDescription::PjRtCApiDeviceDescription(
const PJRT_Api* c_api, PJRT_DeviceDescription* device_description)
: c_api_(c_api), device_description_(device_description) {
InitAttributes();
}
int PjRtCApiDeviceDescription::id() const {
PJRT_DeviceDescription_Id_Args args;
args.struct_size = PJRT_DeviceDescription_Id_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device_description = device_description_;
pjrt::LogFatalIfPjrtError(c_api_->PJRT_DeviceDescription_Id(&args), c_api_);
return args.id;
}
int PjRtCApiDeviceDescription::process_index() const {
PJRT_DeviceDescription_ProcessIndex_Args args;
args.struct_size = PJRT_DeviceDescription_ProcessIndex_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device_description = device_description_;
pjrt::LogFatalIfPjrtError(c_api_->PJRT_DeviceDescription_ProcessIndex(&args),
c_api_);
return args.process_index;
}
void PjRtCApiDeviceDescription::InitAttributes() {
attributes_ = {};
PJRT_DeviceDescription_Attributes_Args args;
args.struct_size = PJRT_DeviceDescription_Attributes_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device_description = device_description_;
pjrt::LogFatalIfPjrtError(c_api_->PJRT_DeviceDescription_Attributes(&args),
c_api_);
for (int i = 0; i < args.num_attributes; ++i) {
const auto& attribute = args.attributes[i];
std::string attribute_name(attribute.name, attribute.name_size);
switch (attribute.type) {
case PJRT_NamedValue_Type::PJRT_NamedValue_kString: {
std::string string_value(attribute.string_value, attribute.value_size);
attributes_[attribute_name] = PjRtDeviceAttribute(string_value);
break;
}
case PJRT_NamedValue_Type::PJRT_NamedValue_kInt64: {
attributes_[attribute_name] =
PjRtDeviceAttribute(attribute.int64_value);
break;
}
case PJRT_NamedValue_Type::PJRT_NamedValue_kInt64List: {
const int64_t* array_ptr(attribute.int64_array_value);
std::vector<int64_t> int64_array(array_ptr,
array_ptr + attribute.value_size);
attributes_[attribute_name] = PjRtDeviceAttribute(int64_array);
break;
}
default: {
LOG(FATAL) << "PJRT_DeviceDescription_Attributes() returned attribute '"
<< attribute_name << "' with unsupported type "
<< attribute.type
<< " to PjRtCApiDeviceDescription::InitAttributes()";
break;
}
}
}
}
const absl::flat_hash_map<std::string, PjRtDeviceAttribute>&
PjRtCApiDeviceDescription::Attributes() const {
return attributes_;
}
absl::string_view PjRtCApiDeviceDescription::device_kind() const {
PJRT_DeviceDescription_Kind_Args args;
args.struct_size = PJRT_DeviceDescription_Kind_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device_description = device_description_;
pjrt::LogFatalIfPjrtError(c_api_->PJRT_DeviceDescription_Kind(&args), c_api_);
absl::string_view device_kind(args.device_kind, args.device_kind_size);
return device_kind;
}
absl::string_view PjRtCApiDeviceDescription::DebugString() const {
PJRT_DeviceDescription_DebugString_Args args;
args.struct_size = PJRT_DeviceDescription_DebugString_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device_description = device_description_;
pjrt::LogFatalIfPjrtError(c_api_->PJRT_DeviceDescription_DebugString(&args),
c_api_);
absl::string_view debug_string(args.debug_string, args.debug_string_size);
return debug_string;
}
absl::string_view PjRtCApiDeviceDescription::ToString() const {
PJRT_DeviceDescription_ToString_Args args;
args.struct_size = PJRT_DeviceDescription_ToString_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device_description = device_description_;
pjrt::LogFatalIfPjrtError(c_api_->PJRT_DeviceDescription_ToString(&args),
c_api_);
absl::string_view to_string(args.to_string, args.to_string_size);
return to_string;
}
PjRtCApiDevice::PjRtCApiDevice(PJRT_Device* device, PjRtCApiClient* client)
: client_(client),
device_(device),
description_(client->pjrt_c_api(),
pjrt::GetDeviceDescription(client->pjrt_c_api(), device)) {}
PjRtClient* PjRtCApiDevice::client() const { return client_; }
bool PjRtCApiDevice::IsAddressable() const {
PJRT_Device_IsAddressable_Args args;
args.struct_size = PJRT_Device_IsAddressable_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device = device_;
const PJRT_Api* api = client_->pjrt_c_api();
pjrt::LogFatalIfPjrtError(api->PJRT_Device_IsAddressable(&args), api);
return args.is_addressable;
}
PjRtLocalHardwareId PjRtCApiDevice::local_hardware_id() const {
PJRT_Device_LocalHardwareId_Args args;
args.struct_size = PJRT_Device_LocalHardwareId_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device = device_;
const PJRT_Api* api = client_->pjrt_c_api();
pjrt::LogFatalIfPjrtError(api->PJRT_Device_LocalHardwareId(&args), api);
return PjRtLocalHardwareId(args.local_hardware_id);
}
absl::StatusOr<PjRtMemorySpace*> PjRtCApiDevice::default_memory_space() const {
PJRT_Device_DefaultMemory_Args args;
args.struct_size = PJRT_Device_DefaultMemory_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device = device_;
const PJRT_Api* api = client_->pjrt_c_api();
RETURN_STATUS_IF_PJRT_ERROR(api->PJRT_Device_DefaultMemory(&args), api);
return client_->GetCppMemory(args.memory);
}
absl::StatusOr<tsl::AllocatorStats> PjRtCApiDevice::GetAllocatorStats() const {
PJRT_Device_MemoryStats_Args args;
args.struct_size = PJRT_Device_MemoryStats_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device = device_;
const PJRT_Api* api = client_->pjrt_c_api();
RETURN_STATUS_IF_PJRT_ERROR(api->PJRT_Device_MemoryStats(&args), api);
tsl::AllocatorStats result;
result.bytes_in_use = args.bytes_in_use;
if (args.peak_bytes_in_use_is_set) {
result.peak_bytes_in_use = args.peak_bytes_in_use;
} else {
result.peak_bytes_in_use = -1;
}
if (args.num_allocs_is_set) {
result.num_allocs = args.num_allocs;
} else {
result.num_allocs = -1;
}
if (args.largest_alloc_size_is_set) {
result.largest_alloc_size = args.largest_alloc_size;
} else {
result.largest_alloc_size = -1;
}
if (args.bytes_limit_is_set) {
result.bytes_limit = args.bytes_limit;
}
if (args.bytes_reserved_is_set) {
result.bytes_reserved = args.bytes_reserved;
} else {
result.bytes_reserved = -1;
}
if (args.peak_bytes_reserved_is_set) {
result.peak_bytes_reserved = args.peak_bytes_reserved;
} else {
result.peak_bytes_reserved = -1;
}
if (args.bytes_reservable_limit_is_set) {
result.bytes_reservable_limit = args.bytes_reservable_limit;
}
if (args.largest_free_block_bytes_is_set) {
result.largest_free_block_bytes = args.largest_free_block_bytes;
} else {
result.largest_free_block_bytes = -1;
}
if (args.pool_bytes_is_set) {
result.pool_bytes = args.pool_bytes;
}
if (args.peak_pool_bytes_is_set) {
result.peak_pool_bytes = args.peak_pool_bytes;
}
return result;
}
absl::StatusOr<std::intptr_t> PjRtCApiDevice::GetStreamForExternalReadyEvents()
const {
const PJRT_Api* c_api = client_->pjrt_c_api();
PJRT_Stream_Extension* extension = pjrt::FindExtension<PJRT_Stream_Extension>(
c_api, PJRT_Extension_Type::PJRT_Extension_Type_Stream);
if (extension == nullptr) {
return absl::UnimplementedError(
"Stream extension not implemented in this PJRT plugin.");
}
PJRT_Get_Stream_For_External_Ready_Events_Args args;
args.struct_size = PJRT_Get_Stream_For_External_Ready_Events_Args_STRUCT_SIZE;
args.device = device_;
RETURN_STATUS_IF_PJRT_ERROR(extension->get_stream(&args), c_api);
return args.stream;
}
const PJRT_Api* PjRtCApiMemorySpace::pjrt_c_api() const {
return client_->pjrt_c_api();
}
PjRtClient* PjRtCApiMemorySpace::client() const { return client_; }
int PjRtCApiMemorySpace::id() const {
PJRT_Memory_Id_Args args;
args.struct_size = PJRT_Memory_Id_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.memory = c_memory_;
pjrt::LogFatalIfPjrtError(pjrt_c_api()->PJRT_Memory_Id(&args), pjrt_c_api());
return args.id;
}
absl::string_view PjRtCApiMemorySpace::kind() const {
PJRT_Memory_Kind_Args args;
args.struct_size = PJRT_Memory_Kind_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.memory = c_memory_;
pjrt::LogFatalIfPjrtError(pjrt_c_api()->PJRT_Memory_Kind(&args),
pjrt_c_api());
return absl::string_view(args.kind, args.kind_size);
}
int PjRtCApiMemorySpace::kind_id() const {
PJRT_Memory_Kind_Id_Args args;
args.struct_size = PJRT_Memory_Kind_Id_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.memory = c_memory_;
if (pjrt_c_api()->pjrt_api_version.major_version > 0 ||
pjrt_c_api()->pjrt_api_version.minor_version >= 48) {
pjrt::LogFatalIfPjrtError(pjrt_c_api()->PJRT_Memory_Kind_Id(&args),
pjrt_c_api());
return args.kind_id;
}
return tsl::Fingerprint32(kind());
}
absl::string_view PjRtCApiMemorySpace::DebugString() const {
PJRT_Memory_DebugString_Args args;
args.struct_size = PJRT_Memory_DebugString_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.memory = c_memory_;
pjrt::LogFatalIfPjrtError(pjrt_c_api()->PJRT_Memory_DebugString(&args),
pjrt_c_api());
return absl::string_view(args.debug_string, args.debug_string_size);
}
absl::string_view PjRtCApiMemorySpace::ToString() const {
PJRT_Memory_ToString_Args args;
args.struct_size = PJRT_Memory_ToString_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.memory = c_memory_;
pjrt::LogFatalIfPjrtError(pjrt_c_api()->PJRT_Memory_ToString(&args),
pjrt_c_api());
return absl::string_view(args.to_string, args.to_string_size);
}
PjRtCApiExecutable::PjRtCApiExecutable(const PJRT_Api* c_api,
PJRT_Executable* executable)
: c_api_(c_api),
executable_(executable, ::pjrt::MakeExecutableDeleter(c_api)) {}
absl::string_view PjRtCApiExecutable::name() const {
auto* c_api = pjrt_c_api();
auto* executable = c_executable();
PJRT_Executable_Name_Args args;
args.executable = executable;
args.struct_size = PJRT_Executable_Name_Args_STRUCT_SIZE;
args.extension_start = nullptr;
pjrt::LogFatalIfPjrtError(c_api->PJRT_Executable_Name(&args), c_api);
return absl::string_view(args.executable_name, args.executable_name_size);
}
int PjRtCApiExecutable::num_replicas() const {
auto* c_api = pjrt_c_api();
auto* executable = c_executable();
PJRT_Executable_NumReplicas_Args args;
args.executable = executable;
args.struct_size = PJRT_Executable_NumReplicas_Args_STRUCT_SIZE;
args.extension_start = nullptr;
pjrt::LogFatalIfPjrtError(c_api->PJRT_Executable_NumReplicas(&args), c_api);
return args.num_replicas;
}
int PjRtCApiExecutable::num_partitions() const {
auto* c_api = pjrt_c_api();
auto* executable = c_executable();
PJRT_Executable_NumPartitions_Args args;
args.executable = executable;
args.struct_size = PJRT_Executable_NumPartitions_Args_STRUCT_SIZE;
args.extension_start = nullptr;
pjrt::LogFatalIfPjrtError(c_api->PJRT_Executable_NumPartitions(&args), c_api);
return args.num_partitions;
}
int64_t PjRtCApiExecutable::SizeOfGeneratedCodeInBytes() const {
auto* c_api = pjrt_c_api();
auto* executable = c_executable();
PJRT_Executable_SizeOfGeneratedCodeInBytes_Args args;
args.struct_size =
PJRT_Executable_SizeOfGeneratedCodeInBytes_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = executable;
pjrt::LogFatalIfPjrtError(
c_api->PJRT_Executable_SizeOfGeneratedCodeInBytes(&args), c_api);
return args.size_in_bytes;
}
absl::StatusOr<absl::flat_hash_map<std::string, PjRtValueType>>
PjRtCApiExecutable::GetCostAnalysis() const {
PJRT_Executable_GetCostAnalysis_Args args;
args.struct_size = PJRT_Executable_GetCostAnalysis_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = c_executable();
const PJRT_Api* c_api = pjrt_c_api();
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Executable_GetCostAnalysis(&args),
c_api);
return pjrt::ConvertFromPjRtNamedValueList(args.properties,
args.num_properties);
}
absl::StatusOr<std::vector<std::vector<PrimitiveType>>>
PjRtCApiExecutable::GetOutputElementTypes() const {
PJRT_Executable_OutputElementTypes_Args args;
args.struct_size = PJRT_Executable_OutputElementTypes_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = c_executable();
const PJRT_Api* c_api = pjrt_c_api();
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Executable_OutputElementTypes(&args),
c_api);
std::vector<PrimitiveType> out;
out.reserve(args.num_output_types);
for (int i = 0; i < args.num_output_types; ++i) {
out.push_back(pjrt::ConvertFromPjRtBufferType(args.output_types[i]));
}
return std::vector<std::vector<PrimitiveType>>{std::move(out)};
}
absl::StatusOr<std::vector<std::vector<DimensionVector>>>
PjRtCApiExecutable::GetOutputDimensions() const {
PJRT_Executable_OutputDimensions_Args args;
args.struct_size = PJRT_Executable_OutputDimensions_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = c_executable();
const PJRT_Api* c_api = pjrt_c_api();
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Executable_OutputDimensions(&args),
c_api);
std::vector<DimensionVector> out;
out.reserve(args.num_outputs);
int index = 0;
for (int i = 0; i < args.num_outputs; ++i) {
DimensionVector dimensions;
dimensions.reserve(args.dim_sizes[i]);
for (int j = 0; j < args.dim_sizes[i]; ++j) {
dimensions.push_back(args.dims[index++]);
}
out.push_back(std::move(dimensions));
}
return std::vector<std::vector<DimensionVector>>{std::move(out)};
}
absl::StatusOr<std::vector<std::vector<absl::string_view>>>
PjRtCApiExecutable::GetOutputMemoryKinds() const {
PJRT_Executable_OutputMemoryKinds_Args args;
args.struct_size = PJRT_Executable_OutputMemoryKinds_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = c_executable();
const PJRT_Api* c_api = pjrt_c_api();
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Executable_OutputMemoryKinds(&args),
c_api);
std::vector<absl::string_view> out;
out.reserve(args.num_outputs);
for (int i = 0; i < args.num_outputs; ++i) {
out.push_back(
absl::string_view(args.memory_kinds[i], args.memory_kind_sizes[i]));
}
return std::vector<std::vector<absl::string_view>>{std::move(out)};
}
absl::StatusOr<std::vector<std::shared_ptr<HloModule>>>
PjRtCApiExecutable::GetHloModules() const {
auto* c_api = pjrt_c_api();
auto* executable = c_executable();
PJRT_Executable_OptimizedProgram_Args args;
args.struct_size = PJRT_Executable_OptimizedProgram_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = executable;
PJRT_Program program;
program.struct_size = PJRT_Program_STRUCT_SIZE;
program.extension_start = nullptr;
program.code = nullptr;
args.program = &program;
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Executable_OptimizedProgram(&args),
c_api);
constexpr size_t TWO_GIBIBYTES = 2ull * 1024 * 1024 * 1024;
const size_t code_size = args.program->code_size;
CHECK(code_size < TWO_GIBIBYTES);
std::string code(code_size, ' ');
args.program->code = code.data();
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Executable_OptimizedProgram(&args),
c_api);
absl::string_view program_format(program.format, program.format_size);
if (program_format != ::pjrt::kHloWithConfigFormat &&
program_format != ::pjrt::kMlirFormat) {
return xla::Internal(
"expected program format `hlo_with_config` or `mlir` but got %s",
program_format);
}
if (program_format == ::pjrt::kMlirFormat) {
mlir::MLIRContext ctx;
TF_ASSIGN_OR_RETURN(
mlir::OwningOpRef<mlir::ModuleOp> module,
ParseMlirModuleString(code, ctx));
mlir::PassManager pm(&ctx);
pm.addPass(mlir::mhlo::createStablehloLegalizeToHloPass());
if (mlir::failed(pm.run(module.get())))
return xla::Internal("failed to convert to MHLO");
mlir::MlirToHloConversionOptions options;
options.return_tuple = false;
TF_ASSIGN_OR_RETURN(std::unique_ptr<xla::HloModule> hlo_module,
mlir::ConvertMlirHloToHloModule(module.get(), options));
std::vector<std::shared_ptr<HloModule>> out;
out.push_back(std::move(hlo_module));
return out;
}
HloModuleProtoWithConfig proto;
proto.ParseFromString(code);
std::vector<std::shared_ptr<HloModule>> out;
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
HloModule::CreateFromProtoWithConfig(proto));
out.push_back(std::move(module));
return out;
}
absl::StatusOr<std::string> PjRtCApiExecutable::SerializeExecutable() const {
auto* c_api = pjrt_c_api();
auto* executable = c_executable();
PJRT_Executable_Serialize_Args ser_args;
ser_args.struct_size = PJRT_Executable_Serialize_Args_STRUCT_SIZE;
ser_args.extension_start = nullptr;
ser_args.executable = executable;
ser_args.serialized_executable = nullptr;
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Executable_Serialize(&ser_args),
c_api);
absl::Cleanup cleanup = [&ser_args] {
ser_args.serialized_executable_deleter(ser_args.serialized_executable);
};
return std::string(ser_args.serialized_bytes, ser_args.serialized_bytes_size);
}
absl::StatusOr<std::string> PjRtCApiExecutable::FingerprintExecutable() const {
const PJRT_Api* c_api_ = pjrt_c_api();
PJRT_Executable_Fingerprint_Args args;
args.struct_size = PJRT_Executable_Fingerprint_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = c_executable();
RETURN_STATUS_IF_PJRT_ERROR(c_api_->PJRT_Executable_Fingerprint(&args),
c_api_);
return std::string(args.executable_fingerprint,
args.executable_fingerprint_size);
}
PjRtCApiLoadedExecutable::PjRtCApiLoadedExecutable(
PjRtCApiClient* client, PJRT_LoadedExecutable* executable)
: client_(client),
loaded_executable_(executable, ::pjrt::MakeLoadedExecutableDeleter(
client->pjrt_c_api())) {
PJRT_LoadedExecutable_GetExecutable_Args args;
args.struct_size = PJRT_LoadedExecutable_GetExecutable_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.loaded_executable = c_loaded_executable();
args.executable = nullptr;
pjrt::LogFatalIfPjrtError(
pjrt_c_api()->PJRT_LoadedExecutable_GetExecutable(&args), pjrt_c_api());
executable_ =
std::make_unique<PjRtCApiExecutable>(pjrt_c_api(), args.executable);
InitDevices();
}
void PjRtCApiLoadedExecutable::InitDevices() {
PJRT_LoadedExecutable_AddressableDevices_Args args;
args.struct_size = PJRT_LoadedExecutable_AddressableDevices_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = c_loaded_executable();
args.addressable_devices = nullptr;
args.num_addressable_devices = 0;
const PJRT_Api* api = pjrt_c_api();
pjrt::LogFatalIfPjrtError(
api->PJRT_LoadedExecutable_AddressableDevices(&args), api);
const size_t num_addressable_devices = args.num_addressable_devices;
addressable_devices_.reserve(num_addressable_devices);
for (size_t i = 0; i < num_addressable_devices; ++i) {
PJRT_Device* device = args.addressable_devices[i];
PjRtCApiDevice* c_api_device = client_->GetCppDevice(device);
addressable_devices_.push_back(c_api_device);
}
}
static std::vector<std::vector<PJRT_Buffer*>> Convert2DCppBuffersToCBuffers(
absl::Span<const std::vector<PjRtBuffer*>> cpp_lists) {
std::vector<std::vector<PJRT_Buffer*>> c_lists;
c_lists.reserve(cpp_lists.size());
for (const auto& cpp_list : cpp_lists) {
auto& c_list = c_lists.emplace_back();
c_list.reserve(cpp_list.size());
for (PjRtBuffer* buffer : cpp_list) {
auto* c_api_argument = tensorflow::down_cast<PjRtCApiBuffer*>(buffer);
c_list.push_back(c_api_argument->c_buffer());
}
}
return c_lists;
}
static std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>
Convert2DCBuffersToCppBuffers(PJRT_Buffer** const* c_lists, size_t outer_size,
int inner_size, xla::PjRtCApiClient* client) {
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>> ret;
for (size_t i = 0; i < outer_size; ++i) {
auto& output_list = ret.emplace_back();
output_list.reserve(inner_size);
for (size_t j = 0; j < inner_size; ++j) {
output_list.push_back(
std::make_unique<PjRtCApiBuffer>(client, c_lists[i][j]));
}
}
return ret;
}
PJRT_SendCallbackInfo CppSendCallbackToC(
const xla::SendCallback& cpp_send_callback,
PjRtCApiLoadedExecutable::SendCallbackFunction* send_callback_function) {
*send_callback_function =
[&send_callback = cpp_send_callback.callback](
PJRT_Chunk* chunk, PJRT_CallbackError* callback_error,
size_t total_size_in_bytes, bool done) -> PJRT_Error* {
xla::Shape dummy_shape;
absl::Status status = send_callback(xla::PjRtTransferMetadata{dummy_shape},
::pjrt::ConvertToCppChunk(*chunk),
total_size_in_bytes, done);
if (!status.ok()) {
absl::string_view message = status.message();
return (*callback_error)(pjrt::StatusCodeToPjrtErrorCode(status.code()),
message.data(), message.size());
}
return nullptr;
};
return PJRT_SendCallbackInfo{
cpp_send_callback.channel_id,
send_callback_function,
[](PJRT_Chunk* chunk, PJRT_CallbackError* callback_error,
size_t total_size_in_bytes, bool done, void* user_arg) -> PJRT_Error* {
PjRtCApiLoadedExecutable::SendCallbackFunction* send_callback =
reinterpret_cast<PjRtCApiLoadedExecutable::SendCallbackFunction*>(
user_arg);
return (*send_callback)(chunk, callback_error, total_size_in_bytes,
done);
}};
}
CApiCopyToDeviceStream::CApiCopyToDeviceStream(
PJRT_CopyToDeviceStream* c_stream, const PJRT_Api* c_api)
: CopyToDeviceStream(0, 0),
c_stream_(c_stream),
c_api_(c_api) {
PJRT_CopyToDeviceStream_TotalBytes_Args total_bytes_args;
total_bytes_args.struct_size =
PJRT_CopyToDeviceStream_TotalBytes_Args_STRUCT_SIZE;
total_bytes_args.extension_start = nullptr;
total_bytes_args.stream = c_stream_;
pjrt::LogFatalIfPjrtError(
c_api_->PJRT_CopyToDeviceStream_TotalBytes(&total_bytes_args), c_api_);
total_bytes_ = total_bytes_args.total_bytes;
PJRT_CopyToDeviceStream_GranuleSize_Args granule_size_args;
granule_size_args.struct_size =
PJRT_CopyToDeviceStream_GranuleSize_Args_STRUCT_SIZE;
granule_size_args.extension_start = nullptr;
granule_size_args.stream = c_stream_;
pjrt::LogFatalIfPjrtError(
c_api_->PJRT_CopyToDeviceStream_GranuleSize(&granule_size_args), c_api_);
granule_bytes_ = granule_size_args.granule_size_in_bytes;
}
CApiCopyToDeviceStream::~CApiCopyToDeviceStream() {
PJRT_CopyToDeviceStream_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_CopyToDeviceStream_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.stream = c_stream_;
pjrt::LogFatalIfPjrtError(
c_api_->PJRT_CopyToDeviceStream_Destroy(&destroy_args), c_api_);
}
PjRtFuture<> CApiCopyToDeviceStream::AddChunk(PjRtChunk chunk) {
PJRT_Chunk c_chunk = ::pjrt::ConvertFromCppChunk(std::move(chunk));
PJRT_CopyToDeviceStream_AddChunk_Args add_chunk_args;
add_chunk_args.struct_size =
PJRT_CopyToDeviceStream_AddChunk_Args_STRUCT_SIZE;
add_chunk_args.extension_start = nullptr;
add_chunk_args.stream = c_stream_;
add_chunk_args.chunk = &c_chunk;
PJRT_CopyToDeviceStream_CurrentBytes_Args current_bytes_args;
current_bytes_args.struct_size =
PJRT_CopyToDeviceStream_CurrentBytes_Args_STRUCT_SIZE;
current_bytes_args.extension_start = nullptr;
current_bytes_args.stream = c_stream_;
{
absl::MutexLock lock(&mu_);
RETURN_FUTURE_IF_ERROR(
c_api_->PJRT_CopyToDeviceStream_AddChunk(&add_chunk_args), c_api_);
RETURN_FUTURE_IF_ERROR(
c_api_->PJRT_CopyToDeviceStream_CurrentBytes(¤t_bytes_args),
c_api_);
current_bytes_ = current_bytes_args.current_bytes;
}
CHECK(add_chunk_args.transfer_complete != nullptr);
return ::pjrt::ConvertCEventToCppFuture(add_chunk_args.transfer_complete,
c_api_);
}
PJRT_RecvCallbackInfo CppRecvCallbackToC(
const xla::RecvCallback& cpp_recv_callback, const PJRT_Api* c_api,
PjRtCApiLoadedExecutable::RecvCallbackFunction* recv_callback_function) {
*recv_callback_function = [&recv_callback = cpp_recv_callback.callback,
c_api](PJRT_CopyToDeviceStream* stream) {
xla::Shape dummy_shape;
recv_callback(xla::PjRtTransferMetadata{dummy_shape},
std::make_unique<CApiCopyToDeviceStream>(stream, c_api));
};
return PJRT_RecvCallbackInfo{
cpp_recv_callback.channel_id,
recv_callback_function,
[](PJRT_CopyToDeviceStream* stream, void* user_arg) {
PjRtCApiLoadedExecutable::RecvCallbackFunction* recv_callback =
reinterpret_cast<PjRtCApiLoadedExecutable::RecvCallbackFunction*>(
user_arg);
(*recv_callback)(stream);
}};
}
static void CppSendCallbackListsToC(
absl::Span<const std::vector<xla::SendCallback>> cpp_lists,
std::vector<PjRtCApiLoadedExecutable::SendCallbackFunction>&
send_callback_functions,
std::vector<std::vector<PJRT_SendCallbackInfo>>& c_lists) {
if (cpp_lists.empty()) return;
send_callback_functions.resize(cpp_lists.size() * cpp_lists[0].size());
c_lists.reserve(cpp_lists.size());
int func_count = 0;
for (const std::vector<xla::SendCallback>& cpp_list : cpp_lists) {
std::vector<PJRT_SendCallbackInfo>& c_list = c_lists.emplace_back();
c_list.reserve(cpp_list.size());
for (const xla::SendCallback& cpp_callback : cpp_list) {
c_list.emplace_back(CppSendCallbackToC(
cpp_callback, &send_callback_functions[func_count++]));
}
}
}
static void CppRecvCallbackListsToC(
absl::Span<const std::vector<xla::RecvCallback>> cpp_lists,
const PJRT_Api* c_api,
std::vector<PjRtCApiLoadedExecutable::RecvCallbackFunction>&
recv_callback_functions,
std::vector<std::vector<PJRT_RecvCallbackInfo>>& c_lists) {
if (cpp_lists.empty()) return;
recv_callback_functions.resize(cpp_lists.size() * cpp_lists[0].size());
c_lists.reserve(cpp_lists.size());
int func_count = 0;
for (const auto& cpp_list : cpp_lists) {
std::vector<PJRT_RecvCallbackInfo>& c_list = c_lists.emplace_back();
c_list.reserve(cpp_list.size());
for (const auto& cpp_callback : cpp_list) {
c_list.emplace_back(CppRecvCallbackToC(
cpp_callback, c_api, &recv_callback_functions[func_count++]));
}
}
}
absl::StatusOr<PJRT_LoadedExecutable_Execute_Args>
PjRtCApiLoadedExecutable::GetCommonExecuteArgs(
absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
const ExecuteOptions& options, PJRT_ExecuteOptions& c_options,
std::vector<std::vector<PJRT_Buffer*>>& c_argument_lists_storage,
std::vector<PJRT_Buffer**>& c_arguments,
std::vector<std::vector<PJRT_Buffer*>>& c_output_lists_storage,
std::vector<PJRT_Buffer**>& c_output_lists,
std::optional<std::vector<PJRT_Event*>>& device_complete_events,
SendRecvCallbackData& callback_data,
std::vector<int64_t>& non_donatable_input_indices_storage) {
bool using_host_callbacks =
!options.send_callbacks.empty() || !options.recv_callbacks.empty();
if (using_host_callbacks &&
!options.use_major_to_minor_data_layout_for_callbacks) {
return Unimplemented(
"PJRT C API doesn't support "
"ExecuteOptions::use_major_to_minor_data_layout_for_callbacks = false");
}
PJRT_LoadedExecutable_Execute_Args args;
args.struct_size = PJRT_LoadedExecutable_Execute_Args_STRUCT_SIZE;
args.executable = c_loaded_executable();
args.options = &c_options;
args.options->struct_size = PJRT_ExecuteOptions_STRUCT_SIZE;
args.options->launch_id = options.launch_id;
for (auto i : options.non_donatable_input_indices) {
non_donatable_input_indices_storage.push_back(i);
}
args.options->num_non_donatable_input_indices =
options.non_donatable_input_indices.size();
args.options->non_donatable_input_indices =
non_donatable_input_indices_storage.data();
args.num_devices = argument_handles.size();
CHECK_GT(args.num_devices, 0);
args.num_args = argument_handles[0].size();
if (device_complete_events.has_value() || using_host_callbacks) {
device_complete_events->resize(args.num_devices);
args.device_complete_events = device_complete_events->data();
} else {
args.device_complete_events = nullptr;
}
c_argument_lists_storage = Convert2DCppBuffersToCBuffers(argument_handles);
c_arguments.reserve(c_argument_lists_storage.size());
for (auto& argument_list : c_argument_lists_storage) {
c_arguments.push_back(argument_list.data());
}
args.argument_lists = c_arguments.data();
PJRT_Executable_NumOutputs_Args numoutputs_args;
numoutputs_args.struct_size = PJRT_Executable_NumOutputs_Args_STRUCT_SIZE;
numoutputs_args.extension_start = nullptr;
numoutputs_args.executable = c_executable();
RETURN_STATUS_IF_PJRT_ERROR(
pjrt_c_api()->PJRT_Executable_NumOutputs(&numoutputs_args), pjrt_c_api());
size_t outer_size = args.num_devices;
size_t inner_size = numoutputs_args.num_outputs;
c_output_lists_storage.resize(outer_size);
c_output_lists.resize(outer_size);
for (int i = 0; i < outer_size; ++i) {
c_output_lists_storage[i].resize(inner_size);
c_output_lists[i] = c_output_lists_storage[i].data();
}
args.output_lists = c_output_lists.data();
if (!options.send_callbacks.empty()) {
CppSendCallbackListsToC(options.send_callbacks,
callback_data.send_callback_functions,
callback_data.c_send_callbacks);
for (auto& c_send_callback_list : callback_data.c_send_callbacks) {
callback_data.c_send_callback_lists.push_back(
c_send_callback_list.data());
}
args.options->send_callbacks = callback_data.c_send_callback_lists.data();
args.options->num_send_ops = options.send_callbacks[0].size();
}
if (!options.recv_callbacks.empty()) {
CppRecvCallbackListsToC(options.recv_callbacks, pjrt_c_api(),
callback_data.recv_callback_functions,
callback_data.c_recv_callbacks);
for (auto& c_recv_callback_list : callback_data.c_recv_callbacks) {
callback_data.c_recv_callback_lists.push_back(
c_recv_callback_list.data());
}
args.options->recv_callbacks = callback_data.c_recv_callback_lists.data();
args.options->num_recv_ops = options.recv_callbacks[0].size();
}
return args;
}
absl::StatusOr<std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>>
PjRtCApiLoadedExecutable::Execute(
absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
const ExecuteOptions& options,
std::optional<std::vector<PjRtFuture<>>>& returned_futures) {
std::vector<std::vector<PJRT_Buffer*>> c_argument_lists_storage;
std::vector<std::vector<PJRT_Buffer*>> c_output_lists_storage;
std::vector<PJRT_Buffer**> c_output_lists;
std::vector<int64_t> non_donatable_input_indices_storage;
PJRT_ExecuteOptions c_options;
c_options.num_send_ops = 0;
c_options.num_recv_ops = 0;
std::vector<PJRT_Buffer**> c_arguments;
std::optional<std::vector<PJRT_Event*>> device_complete_events;
if (returned_futures.has_value()) {
device_complete_events.emplace();
}
auto callback_data = std::make_shared<SendRecvCallbackData>();
TF_ASSIGN_OR_RETURN(
PJRT_LoadedExecutable_Execute_Args args,
GetCommonExecuteArgs(argument_handles, options, c_options,
c_argument_lists_storage, c_arguments,
c_output_lists_storage, c_output_lists,
device_complete_events, *callback_data,
non_donatable_input_indices_storage));
args.execute_device = nullptr;
PJRT_Profiler_Extension profiler_extension =
pjrt::CreatePjrtProfilerExtension(
"PJRT_LoadedExecutable_Execute linkage");
args.extension_start =
reinterpret_cast<PJRT_Extension_Base*>(&profiler_extension);
RETURN_STATUS_IF_PJRT_ERROR(
pjrt_c_api()->PJRT_LoadedExecutable_Execute(&args), pjrt_c_api());
if (device_complete_events.has_value()) {
std::vector<PjRtFuture<>> device_complete_futures;
device_complete_futures.reserve(args.num_devices);
for (int i = 0; i < args.num_devices; ++i) {
device_complete_futures.push_back(pjrt::ConvertCEventToCppFuture(
args.device_complete_events[i], pjrt_c_api()));
if (!callback_data->c_send_callbacks.empty() ||
!callback_data->c_recv_callbacks.empty()) {
device_complete_futures.back().OnReady(
[callback_data](absl::Status status) {
});
}
}
if (returned_futures.has_value()) {
*returned_futures = std::move(device_complete_futures);
}
}
return Convert2DCBuffersToCppBuffers(args.output_lists, args.num_devices,
c_output_lists_storage[0].size(),
client_);
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
PjRtCApiLoadedExecutable::ExecuteWithSingleDevice(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options, std::optional<PjRtFuture<>>& returned_future,
bool fill_future) {
if (!options.send_callbacks.empty() || !options.recv_callbacks.empty()) {
return absl::Status(absl::StatusCode::kUnimplemented,
"Send/recv callbacks not implemented for "
"PjRtCApiLoadedExecutable::ExecuteWithSingleDevice.");
}
std::vector<std::vector<PjRtBuffer*>> argument_handles_vec = {
{argument_handles.begin(), argument_handles.end()}};
std::vector<std::vector<PJRT_Buffer*>> c_argument_lists_storage;
std::vector<std::vector<PJRT_Buffer*>> c_output_lists_storage;
std::vector<PJRT_Buffer**> c_output_lists;
std::vector<int64_t> non_donatable_input_indices_storage;
PJRT_ExecuteOptions c_options;
c_options.num_send_ops = 0;
c_options.num_recv_ops = 0;
std::vector<PJRT_Buffer**> c_arguments;
std::optional<std::vector<PJRT_Event*>> device_complete_events;
if (fill_future) {
device_complete_events.emplace();
}
auto callback_data = std::make_shared<SendRecvCallbackData>();
TF_ASSIGN_OR_RETURN(
PJRT_LoadedExecutable_Execute_Args args,
GetCommonExecuteArgs(argument_handles_vec, options, c_options,
c_argument_lists_storage, c_arguments,
c_output_lists_storage, c_output_lists,
device_complete_events, *callback_data,
non_donatable_input_indices_storage));
args.execute_device =
tensorflow::down_cast<PjRtCApiDevice*>(device)->c_device();
PJRT_Profiler_Extension profiler_extension =
pjrt::CreatePjrtProfilerExtension(
"PJRT_LoadedExecutable_Execute linkage");
args.extension_start =
reinterpret_cast<PJRT_Extension_Base*>(&profiler_extension);
RETURN_STATUS_IF_PJRT_ERROR(
pjrt_c_api()->PJRT_LoadedExecutable_Execute(&args), pjrt_c_api());
if (fill_future) {
returned_future = pjrt::ConvertCEventToCppFuture(
args.device_complete_events[0], pjrt_c_api());
}
return std::move(Convert2DCBuffersToCppBuffers(
args.output_lists, args.num_devices, c_output_lists_storage[0].size(),
client_)[0]);
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
PjRtCApiLoadedExecutable::ExecuteSharded(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options, std::optional<PjRtFuture<>>& returned_future,
bool fill_future) {
return ExecuteWithSingleDevice(argument_handles, device, options,
returned_future, fill_future);
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
PjRtCApiLoadedExecutable::ExecutePortable(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options, std::optional<PjRtFuture<>>& returned_future,
bool fill_future) {
return ExecuteWithSingleDevice(argument_handles, device, options,
returned_future, fill_future);
}
void PjRtCApiLoadedExecutable::Delete() {
PJRT_LoadedExecutable_Delete_Args args;
args.struct_size = PJRT_LoadedExecutable_Delete_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = c_loaded_executable();
const PJRT_Api* c_api = pjrt_c_api();
pjrt::LogFatalIfPjrtError(c_api->PJRT_LoadedExecutable_Delete(&args), c_api);
}
bool PjRtCApiLoadedExecutable::IsDeleted() {
PJRT_LoadedExecutable_IsDeleted_Args args;
args.struct_size = PJRT_LoadedExecutable_IsDeleted_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = c_loaded_executable();
const PJRT_Api* c_api = pjrt_c_api();
pjrt::LogFatalIfPjrtError(c_api->PJRT_LoadedExecutable_IsDeleted(&args),
c_api);
return args.is_deleted;
}
absl::StatusOr<std::string> PjRtCApiLoadedExecutable::FingerprintExecutable()
const {
absl::StatusOr<std::string> fingerprint =
executable_->FingerprintExecutable();
if (fingerprint.ok()) {
return *fingerprint;
}
if (fingerprint.status().code() != absl::StatusCode::kUnimplemented) {
return fingerprint.status();
}
PJRT_LoadedExecutable_Fingerprint_Args args;
args.struct_size = PJRT_LoadedExecutable_Fingerprint_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = c_loaded_executable();
const PJRT_Api* c_api = pjrt_c_api();
std::unique_ptr<PJRT_Error, pjrt::PJRT_ErrorDeleter> error(
c_api->PJRT_LoadedExecutable_Fingerprint(&args),
pjrt::MakeErrorDeleter(c_api));
if (error) {
return ::pjrt::PjrtErrorToStatus(error.get(), c_api);
}
return std::string(args.executable_fingerprint,
args.executable_fingerprint_size);
}
PjRtCApiBuffer::PjRtCApiBuffer(PjRtCApiClient* client, PJRT_Buffer* buffer)
: client_(client),
buffer_(buffer, ::pjrt::MakeBufferDeleter(client->pjrt_c_api())),
readiness_event_(nullptr,
::pjrt::MakeEventDeleter(client->pjrt_c_api())) {}
PrimitiveType PjRtCApiBuffer::element_type() const {
PJRT_Buffer_ElementType_Args args;
args.struct_size = PJRT_Buffer_ElementType_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
pjrt::LogFatalIfPjrtError(pjrt_c_api()->PJRT_Buffer_ElementType(&args),
pjrt_c_api());
return pjrt::ConvertFromPjRtBufferType(args.type);
}
absl::Span<const int64_t> PjRtCApiBuffer::dimensions() const {
PJRT_Buffer_Dimensions_Args args;
args.struct_size = PJRT_Buffer_Dimensions_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
pjrt::LogFatalIfPjrtError(pjrt_c_api()->PJRT_Buffer_Dimensions(&args),
pjrt_c_api());
return absl::Span<const int64_t>(args.dims, args.num_dims);
}
std::unique_ptr<PjRtLayout> PjRtCApiBuffer::layout() const {
{
absl::MutexLock lock(&mu_);
if (!layout_.has_value()) {
const PJRT_Api* c_api = pjrt_c_api();
PJRT_Layouts_Extension* extension =
pjrt::FindExtension<PJRT_Layouts_Extension>(
c_api, PJRT_Extension_Type::PJRT_Extension_Type_Layouts);
if (extension == nullptr) {
layout_.emplace(LayoutUtil::MakeDescendingLayout(dimensions().size()));
} else {
std::unique_ptr<PJRT_Layouts_MemoryLayout,
pjrt::PJRT_Layouts_MemoryLayoutDeleter>
layout = pjrt::GetMemoryLayout(c_api, buffer_.get());
PJRT_Layouts_MemoryLayout_Serialize_Args serialize_args;
serialize_args.struct_size =
PJRT_Layouts_MemoryLayout_Serialize_Args_STRUCT_SIZE;
serialize_args.extension_start = nullptr;
serialize_args.layout = layout.get();
pjrt::LogFatalIfPjrtError(
extension->PJRT_Layouts_MemoryLayout_Serialize(&serialize_args),
c_api);
absl::Cleanup cleanup = [&serialize_args] {
serialize_args.serialized_layout_deleter(
serialize_args.serialized_layout);
};
std::string serialized_layout(serialize_args.serialized_bytes,
serialize_args.serialized_bytes_size);
absl::StatusOr<PjRtXlaLayout> pjrt_xla_layout =
PjRtXlaLayout::Deserialize(serialized_layout);
TF_CHECK_OK(pjrt_xla_layout.status());
layout_.emplace(*pjrt_xla_layout);
}
}
}
return std::make_unique<PjRtXlaLayout>(*layout_);
}
bool PjRtCApiBuffer::has_dynamic_dimensions() const {
PJRT_Buffer_DynamicDimensionIndices_Args args;
args.struct_size = PJRT_Buffer_DynamicDimensionIndices_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
const PJRT_Api* api = pjrt_c_api();
std::unique_ptr<PJRT_Error, pjrt::PJRT_ErrorDeleter> error(
api->PJRT_Buffer_DynamicDimensionIndices(&args),
pjrt::MakeErrorDeleter(api));
if (error &&
pjrt::GetErrorCode(error.get(), api) == PJRT_Error_Code_UNIMPLEMENTED) {
return false;
}
return args.num_dynamic_dims > 0;
}
absl::Span<const bool> PjRtCApiBuffer::is_dynamic_dimension() const {
{
absl::MutexLock lock(&mu_);
if (!is_dynamic_dimension_.has_value()) {
absl::InlinedVector<bool, InlineRank()>& is_dynamic_dimension_value =
is_dynamic_dimension_.emplace();
is_dynamic_dimension_value.assign(dimensions().size(), false);
PJRT_Buffer_DynamicDimensionIndices_Args args;
args.struct_size = PJRT_Buffer_DynamicDimensionIndices_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
const PJRT_Api* api = pjrt_c_api();
std::unique_ptr<PJRT_Error, pjrt::PJRT_ErrorDeleter> error(
api->PJRT_Buffer_DynamicDimensionIndices(&args),
pjrt::MakeErrorDeleter(api));
if (error && pjrt::GetErrorCode(error.get(), api) ==
PJRT_Error_Code_UNIMPLEMENTED) {
return *is_dynamic_dimension_;
}
for (int i = 0; i < args.num_dynamic_dims; ++i) {
is_dynamic_dimension_value[args.dynamic_dim_indices[i]] = true;
}
}
}
return *is_dynamic_dimension_;
}
absl::StatusOr<std::vector<int64_t>> PjRtCApiBuffer::logical_dimensions() {
PJRT_Buffer_UnpaddedDimensions_Args args;
args.struct_size = PJRT_Buffer_UnpaddedDimensions_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
RETURN_STATUS_IF_PJRT_ERROR(
pjrt_c_api()->PJRT_Buffer_UnpaddedDimensions(&args), pjrt_c_api());
return std::vector<int64_t>(args.unpadded_dims,
args.unpadded_dims + args.num_dims);
}
PjRtFuture<> PjRtCApiBuffer::LazyToLiteral(
absl::AnyInvocable<absl::StatusOr<MutableLiteralBase*>() &&> generator) {
auto buffer = std::move(generator)();
if (!buffer.ok()) {
return PjRtFuture<>(buffer.status());
}
return ToLiteral(buffer.value());
}
PjRtFuture<> PjRtCApiBuffer::ToLiteral(MutableLiteralBase* literal) {
PJRT_Buffer_ToHostBuffer_Args args;
args.struct_size = PJRT_Buffer_ToHostBuffer_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.src = buffer_.get();
const xla::Shape& shape = literal->shape();
if (!shape.IsArray()) {
return PjRtFuture<>(
Unimplemented("PjRtCApiBuffer::ToLiteral: Shapes other than array are"
"not supported."));
}
args.dst_size = ShapeUtil::ByteSizeOfElements(shape);
args.dst = literal->untyped_data();
absl::StatusOr<pjrt::BufferMemoryLayoutData> c_layout_data;
if (literal->shape().has_layout()) {
c_layout_data =
pjrt::ConvertToBufferMemoryLayoutData(literal->shape().layout());
if (!c_layout_data.ok()) {
return PjRtFuture<>(c_layout_data.status());
}
args.host_layout = &(c_layout_data->c_layout);
} else {
args.host_layout = nullptr;
}
const PJRT_Api* api = pjrt_c_api();
std::unique_ptr<PJRT_Error, ::pjrt::PJRT_ErrorDeleter> error{
pjrt_c_api()->PJRT_Buffer_ToHostBuffer(&args),
::pjrt::MakeErrorDeleter(api)};
if (error != nullptr) {
return PjRtFuture<>(::pjrt::PjrtErrorToStatus(error.get(), api));
}
return pjrt::ConvertCEventToCppFuture(args.event, api);
}
absl::StatusOr<size_t> PjRtCApiBuffer::GetOnDeviceSizeInBytes() const {
PJRT_Buffer_OnDeviceSizeInBytes_Args args;
args.struct_size = PJRT_Buffer_OnDeviceSizeInBytes_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
RETURN_STATUS_IF_PJRT_ERROR(
client_->pjrt_c_api()->PJRT_Buffer_OnDeviceSizeInBytes(&args),
client_->pjrt_c_api());
return args.on_device_size_in_bytes;
}
PjRtMemorySpace* PjRtCApiBuffer::memory_space() const {
PJRT_Buffer_Memory_Args args;
args.struct_size = PJRT_Buffer_Memory_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
const PJRT_Api* api = pjrt_c_api();
std::unique_ptr<PJRT_Error, pjrt::PJRT_ErrorDeleter> error(
api->PJRT_Buffer_Memory(&args), pjrt::MakeErrorDeleter(api));
if (error == nullptr && args.memory != nullptr) {
return client_->GetCppMemory(args.memory);
} else if (error != nullptr && pjrt::GetErrorCode(error.get(), api) !=
PJRT_Error_Code_UNIMPLEMENTED) {
pjrt::LogFatalIfPjrtError(error.get(), api);
}
return nullptr;
}
PjRtDevice* PjRtCApiBuffer::device() const {
PJRT_Buffer_Device_Args args;
args.struct_size = PJRT_Buffer_Device_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
const PJRT_Api* api = pjrt_c_api();
pjrt::LogFatalIfPjrtError(api->PJRT_Buffer_Device(&args), api);
return client_->GetCppDevice(args.device);
}
void PjRtCApiBuffer::Delete() {
PJRT_Buffer_Delete_Args args;
args.struct_size = PJRT_Buffer_Delete_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
const PJRT_Api* api = pjrt_c_api();
pjrt::LogFatalIfPjrtError(api->PJRT_Buffer_Delete(&args), api);
}
bool PjRtCApiBuffer::IsDeleted() {
PJRT_Buffer_IsDeleted_Args args;
args.struct_size = PJRT_Buffer_IsDeleted_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
const PJRT_Api* api = pjrt_c_api();
pjrt::LogFatalIfPjrtError(api->PJRT_Buffer_IsDeleted(&args), api);
return args.is_deleted;
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> PjRtCApiBuffer::CopyToDevice(
PjRtDevice* dst_device) {
if (dst_device->client() == client_) {
PJRT_Buffer_CopyToDevice_Args args;
args.struct_size = PJRT_Buffer_CopyToDevice_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
args.dst_device =
tensorflow::down_cast<PjRtCApiDevice*>(dst_device)->c_device();
const PJRT_Api* api = pjrt_c_api();
RETURN_STATUS_IF_PJRT_ERROR(api->PJRT_Buffer_CopyToDevice(&args), api);
return std::unique_ptr<PjRtBuffer>(
std::make_unique<PjRtCApiBuffer>(client_, args.dst_buffer));
} else {
TF_ASSIGN_OR_RETURN(std::shared_ptr<Literal> literal, ToLiteralSync());
absl::InlinedVector<int64_t, 4> byte_strides(
literal->shape().dimensions_size());
TF_RETURN_IF_ERROR(
ShapeUtil::ByteStrides(literal->shape(), absl::MakeSpan(byte_strides)));
Literal* literal_pointer = literal.get();
return dst_device->client()->BufferFromHostBuffer(
literal_pointer->untyped_data(),
literal_pointer->shape().element_type(),
literal_pointer->shape().dimensions(), byte_strides,
PjRtClient::HostBufferSemantics::kImmutableZeroCopy,
[literal{std::move(literal)}]() { }, dst_device);
}
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> PjRtCApiBuffer::CopyToMemorySpace(
PjRtMemorySpace* dst_memory) {
const PJRT_Api* api = pjrt_c_api();
if (dst_memory->client() == client_) {
PJRT_Buffer_CopyToMemory_Args args;
args.struct_size = PJRT_Buffer_CopyToMemory_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
args.dst_memory =
tensorflow::down_cast<PjRtCApiMemorySpace*>(dst_memory)->c_memory();
RETURN_STATUS_IF_PJRT_ERROR(api->PJRT_Buffer_CopyToMemory(&args), api);
return std::unique_ptr<PjRtBuffer>(
std::make_unique<PjRtCApiBuffer>(client_, args.dst_buffer));
} else {
TF_ASSIGN_OR_RETURN(std::shared_ptr<Literal> literal, ToLiteralSync());
absl::InlinedVector<int64_t, 4> byte_strides(
literal->shape().dimensions_size());
TF_RETURN_IF_ERROR(
ShapeUtil::ByteStrides(literal->shape(), absl::MakeSpan(byte_strides)));
Literal* literal_pointer = literal.get();
return dst_memory->client()->BufferFromHostBuffer(
literal_pointer->untyped_data(),
literal_pointer->shape().element_type(),
literal_pointer->shape().dimensions(), byte_strides,
PjRtClient::HostBufferSemantics::kImmutableZeroCopy,
[literal{std::move(literal)}]() { }, dst_memory,
nullptr);
}
}
bool PjRtCApiBuffer::IsOnCpu() const {
PJRT_Buffer_IsOnCpu_Args args;
args.struct_size = PJRT_Buffer_IsOnCpu_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
const PJRT_Api* api = pjrt_c_api();
pjrt::LogFatalIfPjrtError(api->PJRT_Buffer_IsOnCpu(&args), api);
return args.is_on_cpu;
}
PJRT_Event* PjRtCApiBuffer::GetReadyEvent() {
if (readiness_event_ == nullptr) {
const PJRT_Api* api = pjrt_c_api();
PJRT_Buffer_ReadyEvent_Args args;
args.struct_size = PJRT_Buffer_ReadyEvent_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
pjrt::LogFatalIfPjrtError(api->PJRT_Buffer_ReadyEvent(&args), api);
readiness_event_.reset(args.event);
}
return readiness_event_.get();
}
void PjRtCApiBuffer::MakePromiseTrackEvent() {
CHECK(readiness_promise_ != nullptr);
const PJRT_Api* api = pjrt_c_api();
PJRT_Event_OnReady_Args args;
args.struct_size = PJRT_Event_OnReady_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.event = GetReadyEvent();
args.user_arg = new std::function<void(PJRT_Error*)>(
[promise = readiness_promise_, api](PJRT_Error* error) -> void {
promise->Set(::pjrt::PjrtErrorToStatus(error, api));
::pjrt::MakeErrorDeleter(api)(error);
});
args.callback = [](PJRT_Error* error, void* callback_ptr) {
auto callback =
static_cast<std::function<void(PJRT_Error*)>*>(callback_ptr);
CHECK(callback != nullptr);
(*callback)(error);
delete callback;
};
std::unique_ptr<PJRT_Error, ::pjrt::PJRT_ErrorDeleter> error{
api->PJRT_Event_OnReady(&args), ::pjrt::MakeErrorDeleter(api)};
if (error != nullptr) {
readiness_promise_->Set(::pjrt::PjrtErrorToStatus(error.get(), api));
}
}
PjRtFuture<> PjRtCApiBuffer::GetReadyFuture() {
if (readiness_promise_ == nullptr) {
readiness_promise_ =
std::make_shared<PjRtFuture<>::Promise>(PjRtFuture<>::CreatePromise());
MakePromiseTrackEvent();
}
return PjRtFuture<>{*readiness_promise_};
}
absl::StatusOr<std::unique_ptr<PjRtBuffer::ExternalReference>>
PjRtCApiBuffer::AcquireExternalReference() {
PJRT_Buffer_IncreaseExternalReferenceCount_Args increase_reference_count_args;
increase_reference_count_args.buffer = c_buffer();
increase_reference_count_args.struct_size =
PJRT_Buffer_IncreaseExternalReferenceCount_Args_STRUCT_SIZE;
increase_reference_count_args.extension_start = nullptr;
RETURN_STATUS_IF_PJRT_ERROR(
pjrt_c_api()->PJRT_Buffer_IncreaseExternalReferenceCount(
&increase_reference_count_args),
pjrt_c_api());
PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args
opaque_device_memory_data_pointer_args;
opaque_device_memory_data_pointer_args.struct_size =
PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args_STRUCT_SIZE;
opaque_device_memory_data_pointer_args.extension_start = nullptr;
opaque_device_memory_data_pointer_args.buffer = c_buffer();
RETURN_STATUS_IF_PJRT_ERROR(
pjrt_c_api()->PJRT_Buffer_OpaqueDeviceMemoryDataPointer(
&opaque_device_memory_data_pointer_args),
pjrt_c_api());
void* device_memory_ptr =
opaque_device_memory_data_pointer_args.device_memory_ptr;
return std::make_unique<PjRtCApiExternalReference>(client_, this,
device_memory_ptr);
}
PjRtCApiExternalReference::~PjRtCApiExternalReference() {
PJRT_Buffer_DecreaseExternalReferenceCount_Args args;
args.struct_size =
PJRT_Buffer_DecreaseExternalReferenceCount_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_->c_buffer();
pjrt::LogFatalIfPjrtError(
client_->pjrt_c_api()->PJRT_Buffer_DecreaseExternalReferenceCount(&args),
client_->pjrt_c_api());
}
absl::Status PjRtCApiExternalReference::WaitUntilBufferReadyOnStream(
std::intptr_t stream) {
const PJRT_Api* c_api = buffer_->pjrt_c_api();
PJRT_Stream_Extension* extension = pjrt::FindExtension<PJRT_Stream_Extension>(
c_api, PJRT_Extension_Type::PJRT_Extension_Type_Stream);
if (extension == nullptr) {
return absl::UnimplementedError(
"Stream extension not implemented in this PJRT plugin.");
}
PJRT_Wait_Until_Buffer_Ready_On_Stream_Args args;
args.struct_size = PJRT_Wait_Until_Buffer_Ready_On_Stream_Args_STRUCT_SIZE;
args.stream = stream;
args.buffer = buffer_->c_buffer();
RETURN_STATUS_IF_PJRT_ERROR(extension->wait_stream(&args), c_api);
return absl::OkStatus();
}
PjRtCApiTopologyDescription::PjRtCApiTopologyDescription(
const PJRT_Api* c_api, PJRT_TopologyDescription* c_topology, bool owned)
: compiler_(std::make_unique<PjRtCApiCompiler>(c_api)),
c_api_(c_api),
c_topology_(c_topology) {
if (owned) {
owned_c_topology_ = std::unique_ptr<PJRT_TopologyDescription,
pjrt::PJRT_TopologyDescriptionDeleter>(
c_topology, pjrt::MakeTopologyDescriptionDeleter(c_api));
}
InitAttributes();
}
absl::string_view PjRtCApiTopologyDescription::platform_name() const {
PJRT_TopologyDescription_PlatformName_Args args;
args.topology = c_topology_;
args.struct_size = PJRT_TopologyDescription_PlatformName_Args_STRUCT_SIZE;
args.extension_start = nullptr;
pjrt::LogFatalIfPjrtError(
c_api_->PJRT_TopologyDescription_PlatformName(&args), c_api_);
return absl::string_view(args.platform_name, args.platform_name_size);
}
absl::string_view PjRtCApiTopologyDescription::platform_version() const {
PJRT_TopologyDescription_PlatformVersion_Args args;
args.struct_size = PJRT_TopologyDescription_PlatformVersion_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.topology = c_topology_;
pjrt::LogFatalIfPjrtError(
c_api_->PJRT_TopologyDescription_PlatformVersion(&args), c_api_);
return absl::string_view(args.platform_version, args.platform_version_size);
}
std::vector<std::unique_ptr<const PjRtDeviceDescription>>
PjRtCApiTopologyDescription::DeviceDescriptions() const {
PJRT_TopologyDescription_GetDeviceDescriptions_Args args;
args.struct_size =
PJRT_TopologyDescription_GetDeviceDescriptions_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.topology = c_topology_;
pjrt::LogFatalIfPjrtError(
c_api_->PJRT_TopologyDescription_GetDeviceDescriptions(&args), c_api_);
std::vector<std::unique_ptr<const PjRtDeviceDescription>> out;
out.reserve(args.num_descriptions);
for (PJRT_DeviceDescription* device_desc :
absl::Span<PJRT_DeviceDescription* const>(args.descriptions,
args.num_descriptions)) {
out.push_back(
std::make_unique<PjRtCApiDeviceDescription>(c_api_, device_desc));
}
return out;
}
absl::StatusOr<std::string> PjRtCApiTopologyDescription::Serialize() const {
PJRT_TopologyDescription_Serialize_Args args;
args.struct_size = PJRT_TopologyDescription_Serialize_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.topology = c_topology_;
RETURN_STATUS_IF_PJRT_ERROR(c_api_->PJRT_TopologyDescription_Serialize(&args),
c_api_);
auto out = std::string(args.serialized_bytes, args.serialized_bytes_size);
args.serialized_topology_deleter(args.serialized_topology);
return out;
}
void PjRtCApiTopologyDescription::InitAttributes() {
PJRT_TopologyDescription_Attributes_Args args;
args.struct_size = PJRT_TopologyDescription_Attributes_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.topology = c_topology_;
pjrt::LogFatalIfPjrtError(c_api_->PJRT_TopologyDescription_Attributes(&args),
c_api_);
attributes_ =
pjrt::ConvertFromPjRtNamedValueList(args.attributes, args.num_attributes);
}
static absl::StatusOr<std::unique_ptr<PjRtExecutable>>
InitializeArgsAndCompileAot(const PJRT_Api* c_api, PjRtClient* client,
const CompileOptions& options,
const PjRtTopologyDescription& topology,
const std::string& code,
const std::string& format) {
PJRT_Compile_Args args;
args.struct_size = PJRT_Compile_Args_STRUCT_SIZE;
args.extension_start = nullptr;
if (client == nullptr) {
args.client = nullptr;
} else {
args.client =
tensorflow::down_cast<PjRtCApiClient*>(client)->pjrt_c_client();
}
args.topology =
tensorflow::down_cast<const PjRtCApiTopologyDescription*>(&topology)
->c_topology();
TF_ASSIGN_OR_RETURN(const CompileOptionsProto options_proto,
options.ToProto());
std::string options_str = options_proto.SerializeAsString();
args.compile_options = options_str.c_str();
args.compile_options_size = options_str.size();
PJRT_Program program;
program.struct_size = PJRT_Program_STRUCT_SIZE;
program.extension_start = nullptr;
program.code = const_cast<char*>(code.c_str());
program.code_size = code.size();
program.format = format.c_str();
program.format_size = format.size();
args.program = &program;
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Compile(&args), c_api);
std::unique_ptr<PjRtExecutable> ret =
std::make_unique<PjRtCApiExecutable>(c_api, args.executable);
return ret;
}
absl::StatusOr<std::unique_ptr<PjRtExecutable>> PjRtCApiCompiler::Compile(
CompileOptions options, const XlaComputation& computation,
const PjRtTopologyDescription& topology, PjRtClient* client) {
std::string module_str = computation.proto().SerializeAsString();
std::string format(pjrt::kHloFormat);
return InitializeArgsAndCompileAot(c_api_, client, options, topology,
module_str, format);
}
absl::StatusOr<std::unique_ptr<PjRtExecutable>> PjRtCApiCompiler::Compile(
CompileOptions options, mlir::ModuleOp module,
const PjRtTopologyDescription& topology, PjRtClient* client) {
std::optional<int64_t> plugin_version;
if (client) {
plugin_version = client->plugin_attributes()->pjrt_c_api_minor_version;
}
TF_ASSIGN_OR_RETURN(
std::string serialized,
xla::Serialize(module, xla::GetDefaultStablehloVersion(plugin_version)));
std::string format(pjrt::kMlirFormat);
return InitializeArgsAndCompileAot(c_api_, client, options, topology,
serialized, format);
}
absl::StatusOr<std::unique_ptr<PjRtClient>> GetCApiClient(
absl::string_view device_type,
const absl::flat_hash_map<std::string, PjRtValueType>& create_options,
std::shared_ptr<KeyValueStoreInterface> kv_store) {
TF_ASSIGN_OR_RETURN(const PJRT_Api* c_api, pjrt::PjrtApi(device_type));
if (c_api == nullptr) {
return Internal("PJRT C API is nullptr for %s", device_type);
}
PJRT_Client_Create_Args init_args;
init_args.struct_size = PJRT_Client_Create_Args_STRUCT_SIZE;
init_args.extension_start = nullptr;
TF_ASSIGN_OR_RETURN(std::vector<PJRT_NamedValue> c_options,
pjrt::ConvertToPjRtNamedValueList(create_options));
init_args.create_options = c_options.data();
init_args.num_options = c_options.size();
std::unique_ptr<pjrt::PJRT_KeyValueCallbackData> kv_callback_data;
if (kv_store) {
kv_callback_data = pjrt::ConvertToCKeyValueCallbacks(kv_store);
init_args.kv_get_callback = kv_callback_data->c_kv_get;
init_args.kv_get_user_arg = &kv_callback_data->kv_get_c_func;
init_args.kv_put_callback = kv_callback_data->c_kv_put;
init_args.kv_put_user_arg = &kv_callback_data->kv_put_c_func;
}
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Client_Create(&init_args), c_api);
PJRT_Client* c_client = init_args.client;
return std::unique_ptr<PjRtClient>(std::make_unique<PjRtCApiClient>(
c_api, c_client, std::move(kv_callback_data)));
}
absl::StatusOr<std::unique_ptr<PjRtTopologyDescription>> GetCApiTopology(
absl::string_view device_type, absl::string_view topology_name,
const absl::flat_hash_map<std::string, PjRtValueType>& create_options) {
TF_ASSIGN_OR_RETURN(const PJRT_Api* c_api, pjrt::PjrtApi(device_type));
if (c_api == nullptr) {
return Internal("PJRT C API is nullptr for %s", device_type);
}
return GetCApiTopology(c_api, topology_name, create_options);
}
absl::StatusOr<std::unique_ptr<PjRtTopologyDescription>> GetCApiTopology(
const PJRT_Api* c_api, absl::string_view topology_name,
const absl::flat_hash_map<std::string, PjRtValueType>& create_options) {
PJRT_TopologyDescription_Create_Args init_args;
init_args.struct_size = PJRT_TopologyDescription_Create_Args_STRUCT_SIZE;
init_args.extension_start = nullptr;
TF_ASSIGN_OR_RETURN(std::vector<PJRT_NamedValue> c_options,
pjrt::ConvertToPjRtNamedValueList(create_options));
init_args.create_options = c_options.data();
init_args.num_options = c_options.size();
init_args.topology_name = topology_name.data();
init_args.topology_name_size = topology_name.size();
RETURN_STATUS_IF_PJRT_ERROR(
c_api->PJRT_TopologyDescription_Create(&init_args), c_api);
PJRT_TopologyDescription* c_topology = init_args.topology;
return std::unique_ptr<PjRtTopologyDescription>(
std::make_unique<PjRtCApiTopologyDescription>(c_api, c_topology,
true));
}
} | #include "xla/pjrt/pjrt_c_api_client.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "xla/client/xla_builder.h"
#include "xla/literal_util.h"
#include "xla/pjrt/c/pjrt_c_api_cpu_internal.h"
#include "xla/pjrt/pjrt_api.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
static void SetUpCpuPjRtApi() {
std::string device_type = "cpu";
auto status = ::pjrt::PjrtApi(device_type);
if (!status.ok()) {
TF_ASSERT_OK(
pjrt::SetPjrtApi(device_type, ::pjrt::cpu_plugin::GetCpuPjrtApi()));
}
}
TEST(PjRtCApiClientTest, IsDynamicDimension) {
SetUpCpuPjRtApi();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<PjRtClient> client,
GetCApiClient("cpu"));
std::vector<int32_t> data0{1, 2, 3, 4, 5, 6};
Shape shape0 = ShapeUtil::MakeShape(S32, {2, 3});
TF_ASSERT_OK_AND_ASSIGN(
auto param0,
client->BufferFromHostBuffer(
data0.data(), shape0.element_type(), shape0.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
std::vector<int32_t> data1{2};
Shape shape1 = ShapeUtil::MakeShape(S32, {});
TF_ASSERT_OK_AND_ASSIGN(
auto param1,
client->BufferFromHostBuffer(
data1.data(), shape1.element_type(), shape1.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
XlaBuilder builder("DynamicReshape");
auto inp_0 = Parameter(&builder, 0, shape0, "input0");
auto inp_1 = Parameter(&builder, 1, shape1, "input1");
std::vector<bool> dims_are_dynamic = {false, true};
auto reshaped =
DynamicReshape(inp_0, {inp_1, inp_1}, {2, 3}, dims_are_dynamic);
auto computation = builder.Build(reshaped).value();
std::unique_ptr<PjRtLoadedExecutable> executable =
client->Compile(computation, CompileOptions()).value();
ExecuteOptions execute_options;
execute_options.non_donatable_input_indices = {0};
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>> results =
executable->Execute({{param0.get(), param1.get()}}, execute_options)
.value();
ASSERT_EQ(results[0].size(), 1);
auto* result_buffer = results[0][0].get();
auto is_dynamic_dimension = result_buffer->is_dynamic_dimension();
EXPECT_THAT(is_dynamic_dimension,
::testing::ElementsAreArray(dims_are_dynamic));
}
TEST(PjRtCApiClientTest, PlatformId) {
SetUpCpuPjRtApi();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<PjRtClient> client,
GetCApiClient("cpu"));
EXPECT_EQ(client->platform_name(), xla::CpuName());
EXPECT_EQ(client->platform_id(), xla::CpuId());
}
TEST(PjRtCApiClientTest, EmptyExecutableFingerprint) {
SetUpCpuPjRtApi();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<PjRtClient> client,
GetCApiClient("cpu"));
Shape shape = ShapeUtil::MakeShapeWithType<float>({4});
XlaBuilder builder("sum");
auto inp_0 = Parameter(&builder, 0, shape, "input0");
auto inp_1 = Parameter(&builder, 1, shape, "input1");
auto sum = Add(inp_0, inp_1);
builder.SetUpAlias({}, 0, {});
auto computation = builder.Build(sum).value();
std::unique_ptr<PjRtLoadedExecutable> executable =
client->Compile(computation, CompileOptions()).value();
PjRtCApiClient* c_client = dynamic_cast<PjRtCApiClient*>(client.get());
ASSERT_NE(c_client, nullptr);
if (c_client->pjrt_c_api()->pjrt_api_version.minor_version >= 35) {
EXPECT_FALSE(executable->FingerprintExecutable().ok());
} else {
EXPECT_EQ(executable->FingerprintExecutable().status().code(),
absl::StatusCode::kUnimplemented);
}
}
TEST(PjRtClientTest, CreateViewAndCopyToDeviceAsyncExternalCpuOnly) {
SetUpCpuPjRtApi();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<PjRtClient> client,
GetCApiClient("cpu"));
ASSERT_GT(client->addressable_devices().size(), 1);
std::vector<int32_t> data(4, 0);
auto* data_ptr = data.data();
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->CreateViewOfDeviceBuffer(
data_ptr, shape, client->addressable_devices()[0],
[data = std::move(data)]() mutable {}));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtBuffer> result,
buffer->CopyToDevice(client->addressable_devices()[1]));
buffer.reset();
ASSERT_TRUE(result);
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
std::vector<int32_t> expected(4, 0);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/pjrt_c_api_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/pjrt_c_api_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d58050f3-3eca-4d91-8bb5-3561892698dc | cpp | tensorflow/tensorflow | calibration_statistics_saver_op | tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_saver_op.cc | tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_saver_op_test.cc | #include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_collector_average_min_max.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_collector_base.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_collector_histogram.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_collector_min_max.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/logging.h"
#include "tsl/platform/file_system.h"
namespace tensorflow {
namespace {
using ::stablehlo::quantization::CalibrationOptions;
using CalibrationMethod =
::stablehlo::quantization::CalibrationOptions_CalibrationMethod;
using ::tensorflow::calibrator::CalibrationStatistics;
using ::tensorflow::calibrator::CalibrationStatisticsCollectorAverageMinMax;
using ::tensorflow::calibrator::CalibrationStatisticsCollectorBase;
using ::tensorflow::calibrator::CalibrationStatisticsCollectorHistogram;
using ::tensorflow::calibrator::CalibrationStatisticsCollectorMinMax;
using ::tensorflow::calibrator::CalibrationStatisticsMap;
}
REGISTER_OP("CalibrationStatisticsSaver")
.Input("args: Tin")
.Attr("Tin: list(type) >= 0")
.Attr("ids: list(string) >= 1")
.Attr("calibration_methods: list(int) >= 1")
.Attr("output_file_path: string")
.SetIsStateful()
.Doc(R"doc(
Aggregates and saves the calibration statistics data.
This op collects outputs of multiples CustomAggregator ops, which includes
`min`, `max` and `histogram`. Then it aggregates them according to the
calibration method and save the result to the given file path as a binary
proto file.)doc");
class CalibrationStatisticsSaverOp : public OpKernel {
public:
explicit CalibrationStatisticsSaverOp(
absl::Nonnull<OpKernelConstruction*> context)
: OpKernel(context) {
std::string output_file_path;
OP_REQUIRES_OK(context,
context->GetAttr("output_file_path", &output_file_path));
OP_REQUIRES_OK(context, context->env()->NewWritableFile(output_file_path,
&output_file_));
OP_REQUIRES_OK(context, context->GetAttr("ids", &ids_));
OP_REQUIRES_OK(context, context->GetAttr("calibration_methods",
&calibration_methods_));
OP_REQUIRES(
context, ids_.size() == calibration_methods_.size(),
absl::AbortedError(
"The `ids` and `calibration_methods` must have the same size."));
OP_REQUIRES(context, context->num_inputs() == ids_.size() * 3,
absl::AbortedError("The number of inputs must be three times "
"the size of the `ids` list."));
for (int i = 0; i < ids_.size(); ++i) {
OP_REQUIRES(context, context->input_type(i * 3) == DT_FLOAT,
absl::AbortedError("The input `min` must have float type."));
OP_REQUIRES(context, context->input_type(i * 3 + 1) == DT_FLOAT,
absl::AbortedError("The input `max` must have float type."));
OP_REQUIRES(
context, context->input_type(i * 3 + 2) == DT_INT64,
absl::AbortedError("The input `histogram` must have int64 type."));
}
}
~CalibrationStatisticsSaverOp() override {
CalibrationStatisticsMap statistics_map;
for (const auto& [id, collector] : id_to_collector_) {
std::optional<CalibrationStatistics> statistics =
collector->GetStatistics();
if (!statistics.has_value()) continue;
statistics_map.mutable_statistics()->emplace(id, std::move(*statistics));
}
if (auto status = output_file_->Append(statistics_map.SerializeAsString());
!status.ok()) {
LOG(ERROR) << "Failed to write calibration statistics: "
<< status.message();
}
if (auto status = output_file_->Close(); !status.ok()) {
LOG(ERROR) << "Failed to close calibration statistics file: "
<< status.message();
}
}
void Compute(absl::Nonnull<OpKernelContext*> context) override {
for (int idx = 0; idx < ids_.size(); ++idx) {
AssignIfNotExists(
ids_[idx], static_cast<CalibrationMethod>(calibration_methods_[idx]));
const Tensor& min_tensor = context->input(3 * idx);
const Tensor& max_tensor = context->input(3 * idx + 1);
const Tensor& histogram_tensor = context->input(3 * idx + 2);
const float min_value = min_tensor.scalar<float>()();
const float max_value = max_tensor.scalar<float>()();
auto histogram_flat = histogram_tensor.flat<int64_t>();
absl::Span<const int64_t> histogram_data =
absl::MakeSpan(histogram_flat.data(), histogram_flat.size());
id_to_collector_[ids_[idx]]->Collect(min_value, max_value,
histogram_data);
}
}
private:
std::unique_ptr<tsl::WritableFile> output_file_;
std::vector<std::string> ids_;
std::vector<int32_t> calibration_methods_;
absl::flat_hash_map<std::string,
std::unique_ptr<CalibrationStatisticsCollectorBase>>
id_to_collector_;
void AssignIfNotExists(absl::string_view id,
const CalibrationMethod calibration_method) {
std::unique_ptr<CalibrationStatisticsCollectorBase>& collector =
id_to_collector_[id];
if (collector != nullptr) return;
switch (calibration_method) {
case CalibrationOptions::CALIBRATION_METHOD_AVERAGE_MIN_MAX:
collector =
std::make_unique<CalibrationStatisticsCollectorAverageMinMax>();
break;
case CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_PERCENTILE:
case CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE:
case CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_SYMMETRIC:
case CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_MAX_FREQUENCY:
collector = std::make_unique<CalibrationStatisticsCollectorHistogram>();
break;
case CalibrationOptions::CALIBRATION_METHOD_MIN_MAX:
default:
collector = std::make_unique<CalibrationStatisticsCollectorMinMax>();
}
}
};
REGISTER_KERNEL_BUILDER(Name("CalibrationStatisticsSaver").Device(DEVICE_CPU),
CalibrationStatisticsSaverOp);
} | #include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace {
using ::stablehlo::quantization::CalibrationOptions;
using ::tensorflow::calibrator::CalibrationStatistics;
using ::tensorflow::calibrator::CalibrationStatisticsMap;
using ::testing::Contains;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::testing::Key;
using ::testing::SizeIs;
using ::tsl::testing::StatusIs;
class CalibrationStatisticsSaverTest : public OpsTestBase {};
TEST_F(CalibrationStatisticsSaverTest, MissingOutputPath) {
std::vector<std::string> ids{"1"};
std::vector<int32_t> calibration_methods{
CalibrationOptions::CALIBRATION_METHOD_AVERAGE_MIN_MAX};
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.emplace_back("min", 0, DT_FLOAT);
inputs.emplace_back("max", 0, DT_FLOAT);
TF_CHECK_OK(NodeDefBuilder("op", "CalibrationStatisticsSaver")
.Input(inputs)
.Attr("ids", ids)
.Attr("calibration_methods", calibration_methods)
.Finalize(node_def()));
ASSERT_THAT(InitOp(),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("NodeDef missing attr 'output_file_path'")));
}
TEST_F(CalibrationStatisticsSaverTest, WrongNumInputs) {
std::vector<std::string> ids{"1"};
std::vector<int32_t> calibration_methods{
CalibrationOptions::CALIBRATION_METHOD_AVERAGE_MIN_MAX};
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.emplace_back("min", 0, DT_FLOAT);
inputs.emplace_back("max", 0, DT_FLOAT);
TF_CHECK_OK(NodeDefBuilder("op", "CalibrationStatisticsSaver")
.Input(inputs)
.Attr("ids", ids)
.Attr("calibration_methods", calibration_methods)
.Attr("output_file_path", "/tmp/statistics.pbtxt")
.Finalize(node_def()));
ASSERT_THAT(InitOp(),
StatusIs(tsl::error::ABORTED,
HasSubstr("The number of inputs must be three times "
"the size of the `ids` list.")));
}
TEST_F(CalibrationStatisticsSaverTest, WrongInputTypes) {
std::vector<std::string> ids{"1"};
std::vector<int32_t> calibration_methods{
CalibrationOptions::CALIBRATION_METHOD_AVERAGE_MIN_MAX};
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.emplace_back("min", 0, DT_FLOAT);
inputs.emplace_back("max", 0, DT_FLOAT);
inputs.emplace_back("histogram", 0, DT_FLOAT);
TF_CHECK_OK(NodeDefBuilder("op", "CalibrationStatisticsSaver")
.Input(inputs)
.Attr("ids", ids)
.Attr("calibration_methods", calibration_methods)
.Attr("output_file_path", "/tmp/statistics.pbtxt")
.Finalize(node_def()));
ASSERT_THAT(
InitOp(),
StatusIs(tsl::error::ABORTED,
HasSubstr("The input `histogram` must have int64 type")));
}
TEST_F(CalibrationStatisticsSaverTest, SimpleMinMax) {
std::vector<std::string> ids{"1"};
std::vector<int32_t> calibration_methods{
CalibrationOptions::CALIBRATION_METHOD_MIN_MAX};
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.emplace_back("min", 0, DT_FLOAT);
inputs.emplace_back("max", 0, DT_FLOAT);
inputs.emplace_back("histogram", 0, DT_INT64);
const std::string dir = testing::TmpDir();
const std::string output_file_path = io::JoinPath(dir, "statistics.pbtxt");
TF_CHECK_OK(NodeDefBuilder("op", "CalibrationStatisticsSaver")
.Input(inputs)
.Attr("ids", ids)
.Attr("calibration_methods", calibration_methods)
.Attr("output_file_path", output_file_path)
.Finalize(node_def()));
TF_CHECK_OK(InitOp());
AddInputFromArray<float>(TensorShape({}), {1.f});
AddInputFromArray<float>(TensorShape({}), {5.f});
AddInputFromArray<int64_t>(TensorShape({0}), {});
TF_CHECK_OK(RunOpKernel());
kernel_.reset();
CalibrationStatisticsMap statistics_map;
TF_CHECK_OK(
ReadBinaryProto(Env::Default(), output_file_path, &statistics_map));
ASSERT_THAT(statistics_map.statistics(), SizeIs(1));
ASSERT_THAT(statistics_map.statistics(), ElementsAre(Key("1")));
const CalibrationStatistics& stats = statistics_map.statistics().at("1");
ASSERT_TRUE(stats.has_min_max_statistics());
EXPECT_FLOAT_EQ(stats.min_max_statistics().global_min(), 1.f);
EXPECT_FLOAT_EQ(stats.min_max_statistics().global_max(), 5.f);
}
TEST_F(CalibrationStatisticsSaverTest, SimpleAverageMinMax) {
std::vector<std::string> ids{"1"};
std::vector<int32_t> calibration_methods{
CalibrationOptions::CALIBRATION_METHOD_AVERAGE_MIN_MAX};
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.emplace_back("min", 0, DT_FLOAT);
inputs.emplace_back("max", 0, DT_FLOAT);
inputs.emplace_back("histogram", 0, DT_INT64);
const std::string dir = testing::TmpDir();
const std::string output_file_path = io::JoinPath(dir, "statistics.pbtxt");
TF_CHECK_OK(NodeDefBuilder("op", "CalibrationStatisticsSaver")
.Input(inputs)
.Attr("ids", ids)
.Attr("calibration_methods", calibration_methods)
.Attr("output_file_path", output_file_path)
.Finalize(node_def()));
TF_CHECK_OK(InitOp());
AddInputFromArray<float>(TensorShape({}), {1.f});
AddInputFromArray<float>(TensorShape({}), {5.f});
AddInputFromArray<int64_t>(TensorShape({0}), {});
TF_CHECK_OK(RunOpKernel());
kernel_.reset();
CalibrationStatisticsMap statistics_map;
TF_CHECK_OK(
ReadBinaryProto(Env::Default(), output_file_path, &statistics_map));
ASSERT_THAT(statistics_map.statistics(), SizeIs(1));
ASSERT_THAT(statistics_map.statistics(), ElementsAre(Key("1")));
const CalibrationStatistics& stats = statistics_map.statistics().at("1");
ASSERT_TRUE(stats.has_average_min_max_statistics());
EXPECT_FLOAT_EQ(stats.average_min_max_statistics().min_sum(), 1.f);
EXPECT_FLOAT_EQ(stats.average_min_max_statistics().max_sum(), 5.f);
EXPECT_EQ(stats.average_min_max_statistics().num_samples(), 1);
}
TEST_F(CalibrationStatisticsSaverTest, SimpleHistogram) {
std::vector<std::string> ids{"1"};
std::vector<int32_t> calibration_methods{
CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE};
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.emplace_back("min", 0, DT_FLOAT);
inputs.emplace_back("max", 0, DT_FLOAT);
inputs.emplace_back("histogram", 0, DT_INT64);
const std::string dir = testing::TmpDir();
const std::string output_file_path = io::JoinPath(dir, "statistics.pbtxt");
TF_CHECK_OK(NodeDefBuilder("op", "CalibrationStatisticsSaver")
.Input(inputs)
.Attr("ids", ids)
.Attr("calibration_methods", calibration_methods)
.Attr("output_file_path", output_file_path)
.Finalize(node_def()));
TF_CHECK_OK(InitOp());
AddInputFromArray<float>(TensorShape({}), {1.f});
AddInputFromArray<float>(TensorShape({}), {5.f});
AddInputFromArray<int64_t>(TensorShape({8}), {1, 4, 6, 7, 3, 2, 1, 0});
TF_CHECK_OK(RunOpKernel());
kernel_.reset();
CalibrationStatisticsMap statistics_map;
TF_CHECK_OK(
ReadBinaryProto(Env::Default(), output_file_path, &statistics_map));
ASSERT_THAT(statistics_map.statistics(), SizeIs(1));
ASSERT_THAT(statistics_map.statistics(), ElementsAre(Key("1")));
const CalibrationStatistics& stats = statistics_map.statistics().at("1");
ASSERT_TRUE(stats.has_histogram_statistics());
EXPECT_FLOAT_EQ(stats.histogram_statistics().bin_width(), 0.5f);
EXPECT_FLOAT_EQ(stats.histogram_statistics().lower_bound(), 1.f);
EXPECT_THAT(stats.histogram_statistics().hist_freq(),
ElementsAre(1, 4, 6, 7, 3, 2, 1));
}
TEST_F(CalibrationStatisticsSaverTest, MultipleStats) {
std::vector<std::string> ids{"1", "2"};
std::vector<int32_t> calibration_methods{
CalibrationOptions::CALIBRATION_METHOD_AVERAGE_MIN_MAX,
CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE};
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.emplace_back("min", 0, DT_FLOAT);
inputs.emplace_back("max", 0, DT_FLOAT);
inputs.emplace_back("histogram", 0, DT_INT64);
inputs.emplace_back("min", 0, DT_FLOAT);
inputs.emplace_back("max", 0, DT_FLOAT);
inputs.emplace_back("histogram", 0, DT_INT64);
const std::string dir = testing::TmpDir();
const std::string output_file_path = io::JoinPath(dir, "statistics.pbtxt");
TF_CHECK_OK(NodeDefBuilder("op", "CalibrationStatisticsSaver")
.Input(inputs)
.Attr("ids", ids)
.Attr("calibration_methods", calibration_methods)
.Attr("output_file_path", output_file_path)
.Finalize(node_def()));
TF_CHECK_OK(InitOp());
AddInputFromArray<float>(TensorShape({}), {1.f});
AddInputFromArray<float>(TensorShape({}), {5.f});
AddInputFromArray<int64_t>(TensorShape({0}), {});
AddInputFromArray<float>(TensorShape({}), {1.f});
AddInputFromArray<float>(TensorShape({}), {5.f});
AddInputFromArray<int64_t>(TensorShape({8}), {1, 4, 6, 7, 3, 2, 1, 0});
TF_CHECK_OK(RunOpKernel());
kernel_.reset();
CalibrationStatisticsMap statistics_map;
TF_CHECK_OK(
ReadBinaryProto(Env::Default(), output_file_path, &statistics_map));
ASSERT_THAT(statistics_map.statistics(), SizeIs(2));
ASSERT_THAT(statistics_map.statistics(), Contains(Key("1")));
ASSERT_THAT(statistics_map.statistics(), Contains(Key("2")));
const CalibrationStatistics& stats_1 = statistics_map.statistics().at("1");
ASSERT_TRUE(stats_1.has_average_min_max_statistics());
EXPECT_FLOAT_EQ(stats_1.average_min_max_statistics().min_sum(), 1.f);
EXPECT_FLOAT_EQ(stats_1.average_min_max_statistics().max_sum(), 5.f);
EXPECT_EQ(stats_1.average_min_max_statistics().num_samples(), 1);
const CalibrationStatistics& stats_2 = statistics_map.statistics().at("2");
ASSERT_TRUE(stats_2.has_histogram_statistics());
EXPECT_FLOAT_EQ(stats_2.histogram_statistics().bin_width(), 0.5f);
EXPECT_FLOAT_EQ(stats_2.histogram_statistics().lower_bound(), 1.f);
EXPECT_THAT(stats_2.histogram_statistics().hist_freq(),
ElementsAre(1, 4, 6, 7, 3, 2, 1));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_saver_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_saver_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0bc63f44-f8b9-4765-a91f-c9b302a20911 | cpp | tensorflow/tensorflow | hlo_schedule | third_party/xla/xla/hlo/ir/hlo_schedule.cc | third_party/xla/xla/service/hlo_schedule_test.cc | #include "xla/hlo/ir/hlo_schedule.h"
#include <cstdint>
#include <ostream>
#include <queue>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/map_util.h"
#include "xla/status_macros.h"
#include "xla/tsl/lib/gtl/map_util.h"
#include "xla/util.h"
namespace xla {
absl::StatusOr<HloSchedule> HloSchedule::CreateFromProto(
const HloModule* module, const HloScheduleProto& proto) {
absl::flat_hash_map<int64_t, const HloComputation*> id_to_computation;
for (const HloComputation* computation : module->computations()) {
id_to_computation[computation->unique_id()] = computation;
}
HloSchedule schedule(module);
for (const auto& id_sequence : proto.sequences()) {
int64_t computation_id = id_sequence.first;
auto comp_it = id_to_computation.find(computation_id);
if (comp_it == id_to_computation.end()) {
continue;
}
const HloComputation* computation = comp_it->second;
absl::flat_hash_map<int64_t, HloInstruction*> id_to_instruction;
for (HloInstruction* instruction : computation->instructions()) {
id_to_instruction[instruction->unique_id()] = instruction;
}
HloInstructionSequence& sequence =
schedule.GetOrCreateSequence(computation);
for (const int64_t instruction_id : id_sequence.second.instruction_ids()) {
auto instr_it = id_to_instruction.find(instruction_id);
TF_RET_CHECK(instr_it != id_to_instruction.end())
<< "No instruction exists in HLO computation " << computation->name()
<< " with id " << instruction_id;
sequence.push_back(instr_it->second);
}
}
TF_RETURN_IF_ERROR(schedule.Verify());
return std::move(schedule);
}
absl::StatusOr<HloScheduleProto> HloSchedule::ToProto() const {
TF_RETURN_IF_ERROR(Verify());
HloScheduleProto proto;
for (const auto& id_sequence : sequences_) {
int64_t computation_id = id_sequence.first;
const HloInstructionSequence& sequence = id_sequence.second;
HloScheduleProto::InstructionSequence& proto_sequence =
(*proto.mutable_sequences())[computation_id];
proto_sequence.mutable_instruction_ids()->Reserve(sequence.size());
for (const int64_t id : sequence.ids()) {
proto_sequence.add_instruction_ids(id);
}
}
return std::move(proto);
}
void HloSchedule::set_sequence(const HloComputation* computation,
absl::Span<HloInstruction* const> sequence) {
set_sequence(computation, HloInstructionSequence(sequence));
}
void HloSchedule::set_sequence(const HloComputation* computation,
HloInstructionSequence sequence) {
CHECK(computation->parent() == module_);
sequences_[computation->unique_id()] = std::move(sequence);
execution_threads_[computation->unique_id()] =
std::string(computation->execution_thread());
}
HloInstructionSequence& HloSchedule::GetOrCreateSequence(
const HloComputation* computation) {
auto it = sequences_.find(computation->unique_id());
if (it == sequences_.end()) {
CHECK(computation->parent() == module_);
execution_threads_[computation->unique_id()] =
std::string(computation->execution_thread());
return sequences_[computation->unique_id()];
} else {
return it->second;
}
}
const HloInstructionSequence& HloSchedule::sequence(
const HloComputation* computation) const {
return sequences_.at(computation->unique_id());
}
absl::Status HloSchedule::UpdateComputationSchedule(
const HloComputation* computation) {
absl::flat_hash_map<int, HloInstruction*> id_to_instruction;
for (HloInstruction* instruction : computation->instructions()) {
InsertOrDie(&id_to_instruction, instruction->unique_id(), instruction);
}
absl::flat_hash_set<int> ids_in_schedule;
for (int id : sequences_.at(computation->unique_id()).ids()) {
InsertOrDie(&ids_in_schedule, id);
}
absl::flat_hash_map<const HloInstruction*, std::vector<HloInstruction*>>
new_instruction_uses;
absl::flat_hash_map<const HloInstruction*, int> unscheduled_operand_count;
std::queue<HloInstruction*> worklist;
for (HloInstruction* instruction : computation->instructions()) {
if (!ids_in_schedule.contains(instruction->unique_id())) {
if (instruction->operands().empty()) {
worklist.push(instruction);
} else {
for (const HloInstruction* operand : instruction->operands()) {
new_instruction_uses[operand].push_back(instruction);
}
unscheduled_operand_count[instruction] = instruction->operand_count();
}
}
}
HloInstructionSequence new_sequence;
auto schedule_worklist = [&]() {
while (!worklist.empty()) {
HloInstruction* instruction = worklist.front();
worklist.pop();
new_sequence.push_back(instruction);
std::vector<HloInstruction*>* new_users =
tsl::gtl::FindOrNull(new_instruction_uses, instruction);
if (new_users != nullptr) {
for (HloInstruction* new_user : *new_users) {
unscheduled_operand_count.at(new_user)--;
CHECK_GE(unscheduled_operand_count.at(new_user), 0);
if (unscheduled_operand_count.at(new_user) == 0) {
worklist.push(new_user);
}
}
}
}
};
schedule_worklist();
for (int id : sequences_.at(computation->unique_id()).ids()) {
auto it = id_to_instruction.find(id);
if (it == id_to_instruction.end()) {
continue;
}
worklist.push(it->second);
schedule_worklist();
}
set_sequence(computation, std::move(new_sequence));
return absl::OkStatus();
}
absl::Status HloSchedule::Update(
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::vector<HloComputation*> nonfusion_computations =
module_->MakeNonfusionComputations(execution_threads);
for (const HloComputation* computation : nonfusion_computations) {
if (!is_computation_scheduled(computation)) {
GetOrCreateSequence(computation);
TF_RETURN_IF_ERROR(UpdateComputationSchedule(computation));
}
}
auto sum_of_sequences_for_threads = [&]() -> int64_t {
if (execution_threads.empty()) {
return sequences_.size();
}
int64_t sequences_num_for_threads = 0;
for (const auto& [thread_name, sequence_num] :
num_sequences_by_execution_thread()) {
sequences_num_for_threads +=
execution_threads.contains(thread_name) ? sequence_num : 0;
}
return sequences_num_for_threads;
};
int64_t sequence_sum = sum_of_sequences_for_threads();
if (sequence_sum > nonfusion_computations.size()) {
absl::flat_hash_set<int64_t> nonfusion_computations_ids;
for (const HloComputation* computation : nonfusion_computations) {
nonfusion_computations_ids.insert(computation->unique_id());
}
for (auto it = sequences_.begin(); it != sequences_.end();) {
std::string sequence_thread_name = tsl::gtl::FindWithDefault(
execution_threads_, it->first, HloInstruction::kMainExecutionThread);
bool is_thread_included =
execution_threads.empty() ||
execution_threads.contains(sequence_thread_name);
if (!nonfusion_computations_ids.contains(it->first) &&
is_thread_included) {
execution_threads_.erase(it->first);
sequences_.erase(it++);
} else {
++it;
}
}
}
sequence_sum = sum_of_sequences_for_threads();
CHECK_EQ(sequence_sum, nonfusion_computations.size());
for (const HloComputation* computation : nonfusion_computations) {
TF_RETURN_IF_ERROR(UpdateComputationSchedule(computation));
}
TF_RETURN_IF_ERROR(Verify());
return absl::OkStatus();
}
absl::flat_hash_map<std::string, int64_t>
HloSchedule::num_sequences_by_execution_thread() const {
absl::flat_hash_map<std::string, int64_t> sequence_num_by_execution_threads;
for (const auto& id_sequence_item : sequences_) {
++sequence_num_by_execution_threads[tsl::gtl::FindWithDefault(
execution_threads_, id_sequence_item.first,
HloInstruction::kMainExecutionThread)];
}
return sequence_num_by_execution_threads;
}
absl::Status HloSchedule::Verify() const {
VLOG(2) << "VerifySchedule()";
XLA_VLOG_LINES(2, ToString());
absl::flat_hash_map<std::string, int64_t> sequence_num_by_execution_threads =
num_sequences_by_execution_thread();
for (const auto& [thread_name, sequence_size] :
sequence_num_by_execution_threads) {
std::vector<HloComputation*> nonfusion_computations =
module_->MakeNonfusionComputations({thread_name});
TF_RET_CHECK(nonfusion_computations.size() == sequence_size)
<< "For thread " << thread_name << ", schedule has " << sequence_size
<< " sequences, but module has " << nonfusion_computations.size()
<< " non-fusion computations for thread " << thread_name;
for (const HloComputation* computation : nonfusion_computations) {
TF_RET_CHECK(sequences_.contains(computation->unique_id()))
<< "Computation " << computation->name()
<< " missing from HLO schedule.";
}
for (const HloComputation* computation : nonfusion_computations) {
absl::flat_hash_map<const HloInstruction*, int> instruction_position;
int pos = 0;
for (const HloInstruction* instruction :
sequence(computation).instructions()) {
TF_RET_CHECK(instruction_position.insert({instruction, pos}).second)
<< "Instruction " << instruction->name()
<< " appears more than once in the schedule";
pos++;
}
TF_RET_CHECK(instruction_position.size() ==
computation->instruction_count())
<< "Schedule for computation " << computation->name() << " has "
<< instruction_position.size() << " instructions, expected "
<< computation->instruction_count();
for (const HloInstruction* instruction : computation->instructions()) {
TF_RET_CHECK(instruction_position.contains(instruction))
<< "Instruction " << instruction->name() << " is not in schedule";
}
for (const HloInstruction* instruction : computation->instructions()) {
for (const HloInstruction* operand : instruction->operands()) {
TF_RET_CHECK(instruction_position.at(operand) <
instruction_position.at(instruction))
<< "Instruction " << instruction->name()
<< " is not scheduled after its operand " << operand->name();
}
for (const HloInstruction* pred : instruction->control_predecessors()) {
TF_RET_CHECK(instruction_position.at(pred) <
instruction_position.at(instruction))
<< "Instruction " << instruction->name()
<< " is not scheduled after its control predecessor "
<< pred->name();
}
}
}
}
return absl::OkStatus();
}
namespace {
const HloComputation* IdToComputation(const HloModule* module, int64_t id) {
for (const HloComputation* computation : module->computations()) {
if (computation->unique_id() == id) {
return computation;
}
}
return nullptr;
}
}
std::string HloSchedule::ToString() const {
std::vector<std::string> pieces;
pieces.push_back("HloSchedule");
std::vector<int64_t> sorted_ids;
for (const auto& id_sequence : sequences_) {
sorted_ids.push_back(id_sequence.first);
}
absl::c_sort(sorted_ids);
for (const int64_t id : sorted_ids) {
const HloComputation* computation = IdToComputation(module_, id);
const HloInstructionSequence& sequence = sequences_.at(id);
if (computation == nullptr) {
pieces.push_back(absl::StrFormat(
"computation with id %d (no longer in HLO module):", id));
for (int id : sequence.ids()) {
pieces.push_back(absl::StrCat(" ", id));
}
} else {
pieces.push_back(absl::StrFormat("computation %s:", computation->name()));
for (const HloInstruction* instruction : sequence.instructions()) {
pieces.push_back(absl::StrCat(" ", instruction->name()));
}
}
}
return absl::StrJoin(pieces, "\n");
}
std::ostream& operator<<(std::ostream& out, const HloSchedule& schedule) {
return out << schedule.ToString();
}
} | #include "xla/hlo/ir/hlo_schedule.h"
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_ordering.h"
#include "xla/shape_util.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class HloScheduleTest : public HloTestBase {};
TEST_F(HloScheduleTest, UpdateScheduleUnchangedModule) {
const std::string module_str = R"(
HloModule UpdateScheduleUnchanged
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
c = f32[] constant(42.0)
sum = f32[] add(a, b)
neg = f32[] negate(c)
ROOT root = f32[] multiply(sum, neg)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
}));
const auto& entry_schedule =
schedule.sequence(module->entry_computation()).instructions();
EXPECT_EQ(entry_schedule.size(), 6);
TF_ASSERT_OK(schedule.Update());
TF_ASSERT_OK(schedule.Verify());
EXPECT_EQ(entry_schedule,
schedule.sequence(module->entry_computation()).instructions());
}
TEST_F(HloScheduleTest, UpdateScheduleWithNewInstructions) {
const std::string module_str = R"(
HloModule UpdateScheduleWithNewInstructions
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
c = f32[] constant(42.0)
sum = f32[] add(a, b)
neg = f32[] negate(c)
ROOT root = f32[] multiply(sum, neg)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
}));
HloComputation* entry = module->entry_computation();
const Shape shape = entry->root_instruction()->shape();
HloInstruction* constant = entry->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
HloInstruction* sub = entry->AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kSubtract, constant, entry->root_instruction()));
entry->set_root_instruction(sub);
auto in_schedule = [&](const HloInstruction* hlo) {
return absl::c_linear_search(schedule.sequence(entry).instructions(), hlo);
};
EXPECT_EQ(schedule.sequence(entry).size(), 6);
EXPECT_FALSE(in_schedule(constant));
EXPECT_FALSE(in_schedule(sub));
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update());
TF_ASSERT_OK(schedule.Verify());
EXPECT_EQ(schedule.sequence(entry).size(), 8);
EXPECT_TRUE(in_schedule(constant));
EXPECT_TRUE(in_schedule(sub));
}
TEST_F(HloScheduleTest, UpdateScheduleWithAddedAndDeletedInstruction) {
const std::string module_str = R"(
HloModule UpdateScheduleWithAddedAndDeletedInstruction
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
c = f32[] constant(42.0)
sum = f32[] add(a, b)
neg = f32[] negate(c)
ROOT root = f32[] multiply(sum, neg)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
}));
HloComputation* entry = module->entry_computation();
HloInstruction* constant = entry->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
HloInstruction* new_root = entry->AddInstruction(
HloInstruction::CreateBinary(constant->shape(), HloOpcode::kSubtract,
constant, entry->parameter_instruction(0)));
entry->set_root_instruction(new_root);
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
EXPECT_EQ(schedule.sequence(entry).size(), 6);
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update());
TF_ASSERT_OK(schedule.Verify());
EXPECT_EQ(schedule.sequence(entry).size(), 4);
}
TEST_F(HloScheduleTest, UpdateScheduleWithCompletelyReplacedModule) {
const std::string module_str = R"(
HloModule UpdateScheduleWithCompletelyReplacedModule
ENTRY main {
a = f32[] constant(42.0)
b = f32[] constant(123.0)
ROOT sum = f32[] add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
}));
HloComputation* entry = module->entry_computation();
HloInstruction* constant = entry->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HloInstruction* new_root = entry->AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kNegate, constant));
entry->set_root_instruction(new_root);
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
EXPECT_EQ(schedule.sequence(entry).size(), 3);
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update());
TF_ASSERT_OK(schedule.Verify());
EXPECT_EQ(schedule.sequence(entry).size(), 2);
}
TEST_F(HloScheduleTest, UpdateScheduleWithMultipleComputations) {
const std::string module_str = R"(
HloModule UpdateScheduleWithMultipleComputations
%Body (param.1: (s32[], token[])) -> (s32[], token[]) {
%param.1 = (s32[], token[]) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], token[]) %param.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
%after-all = token[] after-all(token[] %get-tuple-element.2)
ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
}
%Cond (param: (s32[], token[])) -> pred[] {
%param = (s32[], token[]) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], token[]) %param), index=0
%constant = s32[] constant(42)
ROOT %less-than = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
ENTRY %WhileLoop () -> s32[] {
%zero = s32[] constant(0)
%init_token = token[] after-all()
%init_tuple = (s32[], token[]) tuple(s32[] %zero, token[] %init_token)
%while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
ROOT %root = s32[] get-tuple-element((s32[], token[]) %while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(),
sizeof(void*));
}));
const HloInstruction* xla_while =
module->entry_computation()->root_instruction()->operand(0);
HloComputation* body = xla_while->while_body();
HloComputation* cond = xla_while->while_condition();
cond->set_root_instruction(cond->AddInstruction(
HloInstruction::CreateUnary(ShapeUtil::MakeShape(PRED, {}),
HloOpcode::kNot, cond->root_instruction())));
body->set_root_instruction(body->parameter_instruction(0));
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
EXPECT_EQ(schedule.sequence(body).size(), 7);
EXPECT_EQ(schedule.sequence(cond).size(), 4);
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update());
TF_ASSERT_OK(schedule.Verify());
EXPECT_EQ(schedule.sequence(body).size(), 1);
EXPECT_EQ(schedule.sequence(cond).size(), 5);
}
TEST_F(HloScheduleTest, UpdateScheduleComputationRemoved) {
const std::string module_str = R"(
HloModule UpdateScheduleWithMultipleComputations
%Body (param.1: (s32[], token[])) -> (s32[], token[]) {
%param.1 = (s32[], token[]) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], token[]) %param.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
%after-all = token[] after-all(token[] %get-tuple-element.2)
ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
}
%Cond (param: (s32[], token[])) -> pred[] {
%param = (s32[], token[]) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], token[]) %param), index=0
%constant = s32[] constant(42)
ROOT %less-than = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
ENTRY %WhileLoop () -> s32[] {
%zero = s32[] constant(0)
%init_token = token[] after-all()
%init_tuple = (s32[], token[]) tuple(s32[] %zero, token[] %init_token)
%while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
ROOT %root = s32[] get-tuple-element((s32[], token[]) %while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(),
sizeof(void*));
}));
HloInstruction* xla_while =
module->entry_computation()->root_instruction()->mutable_operand(0);
HloInstruction* init = xla_while->mutable_operand(0);
TF_ASSERT_OK(xla_while->ReplaceAllUsesWith(init));
HloDCE dce;
ASSERT_EQ(module->computation_count(), 3);
TF_ASSERT_OK(dce.Run(module.get()).status());
ASSERT_EQ(module->computation_count(), 1);
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update());
TF_ASSERT_OK(schedule.Verify());
}
TEST_F(HloScheduleTest, UpdateScheduleComputationRemovedWithMultiThreads) {
const std::string module_str = R"(
HloModule UpdateScheduleWithMultipleComputations
%Body (param.1: (s32[], token[])) -> (s32[], token[]) {
%param.1 = (s32[], token[]) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], token[]) %param.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
%after-all = token[] after-all(token[] %get-tuple-element.2)
ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
}
%Cond (param: (s32[], token[])) -> pred[] {
%param = (s32[], token[]) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], token[]) %param), index=0
%constant = s32[] constant(42)
ROOT %less-than = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
%async_builder {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
ROOT %foo = add(%p0, %p1)
}, execution_thread="parallel_thread"
ENTRY %WhileLoop () -> (s32[], f32[10]) {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
%zero = s32[] constant(0)
%init_token = token[] after-all()
%init_tuple = (s32[], token[]) tuple(s32[] %zero, token[] %init_token)
%while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
%async-start = ((f32[10], f32[10]), f32[10], s32[]) async-start(f32[10] %p0, f32[10] %p1), async_execution_thread="parallel_thread",calls=%async_builder
%async-done = f32[10]{0} async-done(((f32[10], f32[10]), f32[10], s32[]) %async-start), async_execution_thread="parallel_thread", calls=%async_builder
%main_res = s32[] get-tuple-element((s32[], token[]) %while), index=0
ROOT %res = tuple(%main_res, %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(),
[](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(
buffer.shape(),
sizeof(void*));
},
{}, {HloInstruction::kMainExecutionThread}));
HloInstruction* xla_while = module->entry_computation()
->root_instruction()
->mutable_operand(0)
->mutable_operand(0);
HloInstruction* init = xla_while->mutable_operand(0);
TF_ASSERT_OK(xla_while->ReplaceAllUsesWith(init));
HloDCE dce;
ASSERT_EQ(module->computation_count(), 4);
TF_ASSERT_OK(dce.Run(module.get()).status());
ASSERT_EQ(module->computation_count(), 2);
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update({HloInstruction::kMainExecutionThread}));
TF_ASSERT_OK(schedule.Verify());
ASSERT_EQ(module->MakeNonfusionComputations({"parallel_thread"}).size(), 1);
ASSERT_FALSE(schedule.is_computation_scheduled(
module->MakeNonfusionComputations({"parallel_thread"}).front()));
}
TEST_F(HloScheduleTest, UpdateScheduleAddComputation) {
const std::string module_str = R"(
HloModule UpdateScheduleWithMultipleComputations
%Body (param.1: (s32[], token[])) -> (s32[], token[]) {
%param.1 = (s32[], token[]) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], token[]) %param.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
%after-all = token[] after-all(token[] %get-tuple-element.2)
ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
}
%Cond (param: (s32[], token[])) -> pred[] {
%param = (s32[], token[]) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], token[]) %param), index=0
%constant = s32[] constant(42)
ROOT %less-than = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
%async_builder {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
ROOT %foo = add(%p0, %p1)
}, execution_thread="parallel_thread"
ENTRY %WhileLoop () -> (s32[], f32[10]) {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
%zero = s32[] constant(0)
%init_token = token[] after-all()
%init_tuple = (s32[], token[]) tuple(s32[] %zero, token[] %init_token)
%while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
%async-start = ((f32[10], f32[10]), f32[10], s32[]) async-start(f32[10] %p0, f32[10] %p1), async_execution_thread="parallel_thread",calls=%async_builder
%async-done = f32[10]{0} async-done(((f32[10], f32[10]), f32[10], s32[]) %async-start), async_execution_thread="parallel_thread", calls=%async_builder
%main_res = s32[] get-tuple-element((s32[], token[]) %while), index=0
ROOT %res = tuple(%main_res, %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(),
[](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(
buffer.shape(),
sizeof(void*));
},
{}, {HloInstruction::kMainExecutionThread}));
HloComputation* entry_computation = module->entry_computation();
HloComputation::Builder comp_builder("fusion_computation");
HloInstruction* entry_comp_parameter_0 =
entry_computation->parameter_instruction(0);
HloInstruction* entry_comp_parameter_1 =
entry_computation->parameter_instruction(1);
std::vector<HloInstruction*> instructions_in_new_computation;
HloInstruction* added_instruction =
entry_computation->AddInstruction(HloInstruction::CreateBinary(
entry_comp_parameter_0->shape(), HloOpcode::kMultiply,
entry_comp_parameter_0, entry_comp_parameter_1));
instructions_in_new_computation.push_back(added_instruction);
HloInstruction* call =
entry_computation->CreateCallInstruction(instructions_in_new_computation);
Shape completion_sflag_shape = ShapeUtil::MakeScalarShape(U32);
TF_ASSERT_OK_AND_ASSIGN(
HloInstruction * async_done,
entry_computation->CreateAsyncInstructions(
call, {completion_sflag_shape}, entry_computation->execution_thread(),
true, true));
HloInstruction* result_2 =
entry_computation->root_instruction()->mutable_operand(1);
HloInstruction* modified_result_2 =
entry_computation->AddInstruction(HloInstruction::CreateBinary(
result_2->shape(), HloOpcode::kAdd, async_done, result_2));
TF_ASSERT_OK(result_2->ReplaceAllUsesWith(modified_result_2));
auto added_computation_name =
async_done->operand(0)->called_computations()[0]->name();
ASSERT_FALSE(schedule.is_computation_scheduled(
module->GetComputationWithName(added_computation_name)));
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update({HloInstruction::kMainExecutionThread}));
TF_ASSERT_OK(schedule.Verify());
ASSERT_TRUE(schedule.is_computation_scheduled(
module->GetComputationWithName(added_computation_name)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_schedule.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_schedule_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8ad50b9e-3347-4ebf-a039-ec6826ed5c98 | cpp | google/quiche | connect_ip_datagram_payload | quiche/common/masque/connect_ip_datagram_payload.cc | quiche/common/masque/connect_ip_datagram_payload_test.cc | #include "quiche/common/masque/connect_ip_datagram_payload.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/quiche_data_reader.h"
#include "quiche/common/quiche_data_writer.h"
namespace quiche {
std::unique_ptr<ConnectIpDatagramPayload> ConnectIpDatagramPayload::Parse(
absl::string_view datagram_payload) {
QuicheDataReader data_reader(datagram_payload);
uint64_t context_id;
if (!data_reader.ReadVarInt62(&context_id)) {
QUICHE_DVLOG(1) << "Could not parse malformed IP proxy payload";
return nullptr;
}
if (ContextId{context_id} == ConnectIpDatagramIpPacketPayload::kContextId) {
return std::make_unique<ConnectIpDatagramIpPacketPayload>(
data_reader.ReadRemainingPayload());
} else {
return std::make_unique<ConnectIpDatagramUnknownPayload>(
ContextId{context_id}, data_reader.ReadRemainingPayload());
}
}
std::string ConnectIpDatagramPayload::Serialize() const {
std::string buffer(SerializedLength(), '\0');
QuicheDataWriter writer(buffer.size(), buffer.data());
bool result = SerializeTo(writer);
QUICHE_DCHECK(result);
QUICHE_DCHECK_EQ(writer.remaining(), 0u);
return buffer;
}
ConnectIpDatagramIpPacketPayload::ConnectIpDatagramIpPacketPayload(
absl::string_view ip_packet)
: ip_packet_(ip_packet) {}
ConnectIpDatagramPayload::ContextId
ConnectIpDatagramIpPacketPayload::GetContextId() const {
return kContextId;
}
ConnectIpDatagramPayload::Type ConnectIpDatagramIpPacketPayload::GetType()
const {
return Type::kIpPacket;
}
absl::string_view ConnectIpDatagramIpPacketPayload::GetIpProxyingPayload()
const {
return ip_packet_;
}
size_t ConnectIpDatagramIpPacketPayload::SerializedLength() const {
return ip_packet_.size() +
QuicheDataWriter::GetVarInt62Len(uint64_t{kContextId});
}
bool ConnectIpDatagramIpPacketPayload::SerializeTo(
QuicheDataWriter& writer) const {
if (!writer.WriteVarInt62(uint64_t{kContextId})) {
return false;
}
if (!writer.WriteStringPiece(ip_packet_)) {
return false;
}
return true;
}
ConnectIpDatagramUnknownPayload::ConnectIpDatagramUnknownPayload(
ContextId context_id, absl::string_view ip_proxying_payload)
: context_id_(context_id), ip_proxying_payload_(ip_proxying_payload) {
if (context_id == ConnectIpDatagramIpPacketPayload::kContextId) {
QUICHE_BUG(ip_proxy_unknown_payload_ip_context)
<< "ConnectIpDatagramUnknownPayload created with IP packet context "
"ID (0). Should instead create a "
"ConnectIpDatagramIpPacketPayload.";
}
}
ConnectIpDatagramPayload::ContextId
ConnectIpDatagramUnknownPayload::GetContextId() const {
return context_id_;
}
ConnectIpDatagramPayload::Type ConnectIpDatagramUnknownPayload::GetType()
const {
return Type::kUnknown;
}
absl::string_view ConnectIpDatagramUnknownPayload::GetIpProxyingPayload()
const {
return ip_proxying_payload_;
}
size_t ConnectIpDatagramUnknownPayload::SerializedLength() const {
return ip_proxying_payload_.size() +
QuicheDataWriter::GetVarInt62Len(uint64_t{context_id_});
}
bool ConnectIpDatagramUnknownPayload::SerializeTo(
QuicheDataWriter& writer) const {
if (!writer.WriteVarInt62(uint64_t{context_id_})) {
return false;
}
if (!writer.WriteStringPiece(ip_proxying_payload_)) {
return false;
}
return true;
}
} | #include "quiche/common/masque/connect_ip_datagram_payload.h"
#include <memory>
#include <string>
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche::test {
namespace {
TEST(ConnectIpDatagramPayloadTest, ParseIpPacket) {
static constexpr char kDatagramPayload[] = "\x00packet";
std::unique_ptr<ConnectIpDatagramPayload> parsed =
ConnectIpDatagramPayload::Parse(
absl::string_view(kDatagramPayload, sizeof(kDatagramPayload) - 1));
ASSERT_TRUE(parsed);
EXPECT_EQ(parsed->GetContextId(),
ConnectIpDatagramIpPacketPayload::kContextId);
EXPECT_EQ(parsed->GetType(), ConnectIpDatagramPayload::Type::kIpPacket);
EXPECT_EQ(parsed->GetIpProxyingPayload(), "packet");
}
TEST(ConnectIpDatagramPayloadTest, SerializeIpPacket) {
static constexpr absl::string_view kIpPacket = "packet";
ConnectIpDatagramIpPacketPayload payload(kIpPacket);
EXPECT_EQ(payload.GetIpProxyingPayload(), kIpPacket);
EXPECT_EQ(payload.Serialize(), std::string("\x00packet", 7));
}
TEST(ConnectIpDatagramPayloadTest, ParseUnknownPacket) {
static constexpr char kDatagramPayload[] = "\x05packet";
std::unique_ptr<ConnectIpDatagramPayload> parsed =
ConnectIpDatagramPayload::Parse(
absl::string_view(kDatagramPayload, sizeof(kDatagramPayload) - 1));
ASSERT_TRUE(parsed);
EXPECT_EQ(parsed->GetContextId(), 5);
EXPECT_EQ(parsed->GetType(), ConnectIpDatagramPayload::Type::kUnknown);
EXPECT_EQ(parsed->GetIpProxyingPayload(), "packet");
}
TEST(ConnectIpDatagramPayloadTest, SerializeUnknownPacket) {
static constexpr absl::string_view kInnerIpProxyingPayload = "packet";
ConnectIpDatagramUnknownPayload payload(4u, kInnerIpProxyingPayload);
EXPECT_EQ(payload.GetIpProxyingPayload(), kInnerIpProxyingPayload);
EXPECT_EQ(payload.Serialize(), std::string("\x04packet", 7));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/masque/connect_ip_datagram_payload.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/masque/connect_ip_datagram_payload_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
ed33b6cb-931a-44ad-a7c0-aa72370d85d7 | cpp | tensorflow/tensorflow | tuple | third_party/xla/xla/hlo/builder/lib/tuple.cc | third_party/xla/xla/hlo/builder/lib/tuple_test.cc | #include "xla/hlo/builder/lib/tuple.h"
#include <utility>
#include "absl/container/inlined_vector.h"
#include "absl/status/statusor.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::StatusOr<ShapeTree<XlaOp>> DisassembleTuple(XlaOp tuple) {
TF_ASSIGN_OR_RETURN(Shape shape, tuple.builder()->GetShape(tuple));
ShapeTree<XlaOp> result(shape);
result.ForEachMutableElement([&](ShapeIndexView index, XlaOp* element) {
if (index.empty()) {
*element = tuple;
} else {
ShapeIndexView parent_index = index.subspan(0, index.size() - 1);
XlaOp parent = result.element(parent_index);
*element = GetTupleElement(parent, index.back());
}
});
return std::move(result);
}
XlaOp AssembleTuple(XlaBuilder* builder, ShapeTree<XlaOp> elements) {
elements.ForEachMutableElementPostOrder(
[&](const ShapeIndex& index, XlaOp* element) {
const Shape& subshape = ShapeUtil::GetSubshape(elements.shape(), index);
if (subshape.IsTuple()) {
absl::InlinedVector<XlaOp, 2> children;
ShapeIndex child_index = index;
for (int i = 0; i < subshape.tuple_shapes_size(); ++i) {
child_index.push_back(i);
children.push_back(elements.element(child_index));
child_index.pop_back();
}
*element = Tuple(builder, children);
}
});
return elements.element({});
}
} | #include "xla/hlo/builder/lib/tuple.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "xla/error_spec.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/service.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class TupleTest : public ClientLibraryTestBase {};
XLA_TEST_F(TupleTest, DisassembleAssemble) {
XlaBuilder builder(TestName());
Shape shape = ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShape(S32, {3}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {4}), ShapeUtil::MakeShape(S32, {5})}),
ShapeUtil::MakeShape(S32, {6}),
});
Literal input = LiteralUtil::MakeTupleOwned(
LiteralUtil::CreateFullWithDescendingLayout({3}, int32_t{42}),
LiteralUtil::MakeTupleOwned(
LiteralUtil::CreateFullWithDescendingLayout({4}, int32_t{43}),
LiteralUtil::CreateFullWithDescendingLayout({5}, int32_t{44})),
LiteralUtil::CreateFullWithDescendingLayout({6}, int32_t{45}));
XlaOp param = Parameter(&builder, 0, shape, "param");
TF_ASSERT_OK_AND_ASSIGN(ShapeTree<XlaOp> disassembled_tuple,
DisassembleTuple(param));
int32_t addend = 1;
disassembled_tuple.ForEachMutableElement([&](const ShapeIndex& index,
XlaOp* element) {
const Shape& subshape = ShapeUtil::GetSubshape(shape, index);
if (subshape.IsArray()) {
*element = Add(
*element,
ConstantLiteral(&builder, LiteralUtil::CreateFullWithDescendingLayout(
subshape.dimensions(), addend)));
++addend;
}
});
AssembleTuple(&builder, std::move(disassembled_tuple));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<GlobalData> data,
client_->TransferToServer(input));
Literal expected = LiteralUtil::MakeTupleOwned(
LiteralUtil::CreateFullWithDescendingLayout({3}, int32_t{43}),
LiteralUtil::MakeTupleOwned(
LiteralUtil::CreateFullWithDescendingLayout({4}, int32_t{45}),
LiteralUtil::CreateFullWithDescendingLayout({5}, int32_t{47})),
LiteralUtil::CreateFullWithDescendingLayout({6}, int32_t{49}));
ComputeAndCompareLiteral(&builder, expected, {data.get()}, ErrorSpec(0),
&shape);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/tuple.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/tuple_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
acb28b16-a116-4b1a-a11e-a1ee50b66645 | cpp | tensorflow/tensorflow | force_xla_constants_on_host_pass | tensorflow/compiler/jit/force_xla_constants_on_host_pass.cc | tensorflow/compiler/jit/force_xla_constants_on_host_pass_test.cc | #include "tensorflow/compiler/jit/force_xla_constants_on_host_pass.h"
#include "tensorflow/compiler/jit/compilability_check_util.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
namespace tensorflow {
Status ForceXlaConstantsOnHostPass::Run(
const GraphOptimizationPassOptions& options) {
Graph* graph = options.graph->get();
OptimizerOptions opts;
auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
nullptr, options.session_options->env, nullptr,
TF_GRAPH_DEF_VERSION, options.flib_def, opts);
FunctionLibraryRuntime* flr =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
for (Node* node : graph->nodes()) {
if (CanCreateXlaKernel(node->def())) {
const FunctionBody* fbody = nullptr;
std::vector<int> constant_arg_indices;
std::vector<int> resource_arg_indices;
NameAttrList function;
TF_RETURN_IF_ERROR(NameAndAttrsFromFunctionCall(node->def(), &function));
TF_RETURN_IF_ERROR(GetBodyAndConstantsAndResources(
flr, function, &fbody, &constant_arg_indices, &resource_arg_indices));
VLOG(3) << "Found constant arg indices: "
<< absl::StrJoin(constant_arg_indices, ", ");
node->AddAttr("_input_hostmem", constant_arg_indices);
}
}
return absl::OkStatus();
}
} | #include "tensorflow/compiler/jit/force_xla_constants_on_host_pass.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/compilability_check_util.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
Status ForceXlaConstantsOnHost(const Scope& s,
FunctionLibraryDefinition* flib_def,
std::unique_ptr<Graph>* result) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphOptimizationPassOptions options;
SessionOptions session_options;
session_options.env = Env::Default();
options.graph = &graph;
options.session_options = &session_options;
options.flib_def = flib_def;
TF_RETURN_IF_ERROR(s.ToGraph(graph.get()));
ForceXlaConstantsOnHostPass rewriter;
TF_RETURN_IF_ERROR(rewriter.Run(options));
*result = std::move(graph);
return absl::OkStatus();
}
TEST(ForceXlaConstantsOnHostPassTest, Simple) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary library;
FunctionDef called_func =
FunctionDefHelper::Create("TransposeCall",
{"a:float", "b:int32"},
{"c:float"}, {},
{{{"t0"},
"Transpose",
{"a", "b"},
{
{"T", DT_FLOAT},
{"Tperm", DT_INT32},
}}},
{{"c", "t0:y:0"}});
AttrValue true_attribute;
true_attribute.set_b(true);
(*called_func.mutable_attr())[kXlaMustCompileAttr] = true_attribute;
*library.add_function() = called_func;
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(library));
FunctionLibraryDefinition flib_def(OpRegistry::Global(), library);
Output in = ops::Placeholder(root, DT_FLOAT);
Output perm = ops::Const(root, {3, 1, 2, 0});
NameAttrList b_name_attr;
b_name_attr.set_name("TransposeCall");
ops::PartitionedCall call(root.WithOpName("call"), {in, perm}, {DT_FLOAT},
b_name_attr);
call.output.front().node()->AddAttr(kXlaMustCompileAttr, true);
std::unique_ptr<Graph> graph;
TF_ASSERT_OK(ForceXlaConstantsOnHost(root, &flib_def, &graph));
bool found = false;
for (Node* node : graph->nodes()) {
if (CanCreateXlaKernel(node->def())) {
EXPECT_FALSE(found);
found = true;
std::vector<int32> hostmem_attr;
EXPECT_TRUE(TryGetNodeAttr(node->def(), "_input_hostmem", &hostmem_attr));
EXPECT_EQ(hostmem_attr.size(), 1);
EXPECT_EQ(hostmem_attr[0], 1);
}
}
EXPECT_TRUE(found);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/force_xla_constants_on_host_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/force_xla_constants_on_host_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
66f0c126-a09c-451b-83a5-da25cfacf60d | cpp | tensorflow/tensorflow | tf_to_xla_attribute_utils | tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.cc | tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils_test.cc | #include <algorithm>
#include <numeric>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/strings/str_format.h"
#include "llvm/ADT/ArrayRef.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/lite/core/c/builtin_op_data.h"
#include "tensorflow/compiler/mlir/lite/kernels/padding.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold.h"
#include "xla/xla_data.pb.h"
namespace mlir::quant {
namespace {
Value GetDimValue(OpBuilder &builder, Location loc, Value shape_value,
int32_t dim) {
Type attribute_type = builder.getI64Type();
return builder.create<TF::StridedSliceOp>(
loc,
RankedTensorType::get(
{}, mlir::cast<ShapedType>(shape_value.getType()).getElementType()),
shape_value,
Create1DConstValue<int32_t>(builder, loc, {dim}),
Create1DConstValue<int32_t>(builder, loc, {dim + 1}),
Create1DConstValue<int32_t>(builder, loc, {1}),
builder.getIntegerAttr(attribute_type, 0),
builder.getIntegerAttr(attribute_type, 0),
builder.getIntegerAttr(attribute_type, 0),
builder.getIntegerAttr(attribute_type, 0),
builder.getIntegerAttr(attribute_type, 1));
}
void GetSamePaddingValues(OpBuilder &builder, Location loc, Value input_size,
int64_t filter_sz, int64_t dilation_rate,
int64_t stride, Value &padding_low,
Value &padding_high) {
Value zero = CreateScalarConstValue<int32_t>(builder, loc, 0);
Value one = CreateScalarConstValue<int32_t>(builder, loc, 1);
Value two = CreateScalarConstValue<int32_t>(builder, loc, 2);
Value filter_size = CreateScalarConstValue<int32_t>(builder, loc, filter_sz);
Type int32_scalar_type = zero.getType();
auto scalar_add = [&](Value lhs, Value rhs) {
return builder.create<TF::AddOp>(loc, int32_scalar_type, lhs, rhs);
};
auto scalar_mul = [&](Value lhs, Value rhs) {
return builder.create<TF::MulOp>(loc, int32_scalar_type, lhs, rhs);
};
auto scalar_sub = [&](Value lhs, Value rhs) {
return builder.create<TF::SubOp>(loc, int32_scalar_type, lhs, rhs);
};
auto scalar_div = [&](Value lhs, Value rhs) {
return builder.create<TF::DivOp>(loc, int32_scalar_type, lhs, rhs);
};
Value stride_value = CreateScalarConstValue<int32_t>(builder, loc, stride);
Value dilation_rate_value =
CreateScalarConstValue<int32_t>(builder, loc, dilation_rate);
Value effective_filter_size_op = scalar_add(
scalar_mul(dilation_rate_value, scalar_sub(filter_size, one)), one);
Value output_size = scalar_div(
scalar_add(input_size, scalar_sub(stride_value, one)), stride_value);
Value padding_needed = scalar_sub(
scalar_add(effective_filter_size_op,
scalar_mul(stride_value, scalar_sub(output_size, one))),
input_size);
padding_needed = builder.create<TF::MaximumOp>(loc, padding_needed, zero);
padding_low = scalar_div(padding_needed, two);
padding_high = scalar_sub(padding_needed, padding_low);
}
Value PadForDynamicShapedInputSamePadding(
OpBuilder &builder, Location loc, Value input, Value filter,
int8_t input_zp_value, ArrayAttr strides, ArrayAttr dilations,
StringAttr conv_padding, Value &padding, int num_dims) {
Value zero_rank1 = CreateConstValue<int32_t>(builder, loc, {1}, {0});
SmallVector<Value> temp_padding_values{zero_rank1, zero_rank1};
auto reshape_op = [&](Value value, const SmallVector<int64_t> &shape) {
const int64_t rank = shape.size();
return builder.create<TF::ReshapeOp>(
loc, RankedTensorType::get(shape, builder.getI32Type()), value,
CreateConstValue<int64_t>(builder, loc, {rank}, shape));
};
ShapedType filter_shape = mlir::cast<ShapedType>(filter.getType());
Value input_shape_value = builder.create<TF::ShapeOp>(
loc, RankedTensorType::get({num_dims}, builder.getI32Type()), input);
auto scalar_to_rank1 = [&](Value value) { return reshape_op(value, {1}); };
for (int i : llvm::seq<int>(1, num_dims - 1)) {
Value input_size_i = GetDimValue(builder, loc, input_shape_value, i);
const int stride_i = mlir::cast<IntegerAttr>(strides[i]).getInt();
const int dilation_i = mlir::cast<IntegerAttr>(dilations[i]).getInt();
const int filter_i = filter_shape.getDimSize(i - 1);
Value pad_i_low, pad_i_high;
GetSamePaddingValues(builder, loc, input_size_i, filter_i, dilation_i,
stride_i, pad_i_low, pad_i_high);
temp_padding_values.push_back(scalar_to_rank1(pad_i_low));
temp_padding_values.push_back(scalar_to_rank1(pad_i_high));
}
temp_padding_values.push_back(zero_rank1);
temp_padding_values.push_back(zero_rank1);
padding = CreateConstValue<int32_t>(
builder, loc, {num_dims - 2, 2},
SmallVector<int32_t>(2 * (num_dims - 2), 0));
Value zero = CreateScalarConstValue(builder, loc, 0);
Value temp_padding_rank1 = builder.create<TF::ConcatOp>(
loc, RankedTensorType::get({2 * num_dims}, builder.getI32Type()), zero,
temp_padding_values);
Value temp_padding = reshape_op(temp_padding_rank1, {num_dims, 2});
return builder.create<TF::PadV2Op>(
loc, input.getType(), input, temp_padding,
CreateScalarConstValue<int8_t>(builder, loc, input_zp_value));
}
}
Value CalculatePaddingAndPadIfNeeded(OpBuilder &builder, Location loc,
Value input, Value filter,
int8_t input_zp_value, ArrayAttr strides,
ArrayAttr dilations,
StringAttr conv_padding,
ArrayAttr explicit_paddings,
Value &padding, int num_dims) {
ShapedType input_shape = mlir::cast<ShapedType>(input.getType());
SmallVector<int64_t> spatial_dims(num_dims - 2);
absl::c_iota(spatial_dims, 1);
bool has_dynamic_spatial_dim = absl::c_any_of(
spatial_dims,
[&input_shape](int64_t dim) { return input_shape.isDynamicDim(dim); });
if (conv_padding.strref() == "SAME" && has_dynamic_spatial_dim) {
return PadForDynamicShapedInputSamePadding(
builder, loc, input, filter, input_zp_value, strides, dilations,
conv_padding, padding, num_dims);
}
ShapedType filter_shape = mlir::cast<ShapedType>(filter.getType());
SmallVector<int32_t> padding_values(2 * num_dims, 0);
if (conv_padding.strref() == "EXPLICIT") {
if (explicit_paddings.size() != 2 * num_dims) {
emitError(loc,
absl::StrFormat(
"explicit_paddings are expected to be %d-element arrays",
2 * num_dims));
return {};
}
for (int i : spatial_dims) {
padding_values[2 * i] =
mlir::cast<IntegerAttr>(explicit_paddings[2 * i]).getInt();
padding_values[2 * i + 1] =
mlir::cast<IntegerAttr>(explicit_paddings[2 * i + 1]).getInt();
}
} else if (conv_padding.strref() == "SAME") {
for (int i : spatial_dims) {
int input_size = input_shape.getDimSize(i);
int filter_size = filter_shape.getDimSize(i - 1);
int stride_i = mlir::cast<IntegerAttr>(strides[i]).getInt();
int dilation_i = mlir::cast<IntegerAttr>(dilations[i]).getInt();
int out_size = tflite_migration::ComputeOutSize(
kTfLitePaddingSame, input_size, filter_size, stride_i, dilation_i);
int offset = 0;
int padding_before = tflite_migration::ComputePaddingWithOffset(
stride_i, dilation_i, input_size, filter_size, out_size, &offset);
int padding_after = padding_before + offset;
padding_values[2 * i] = padding_before;
padding_values[2 * i + 1] = padding_after;
}
}
if (input_zp_value == 0 ||
absl::c_all_of(padding_values, [](int v) { return v == 0; })) {
padding = CreateConstValue<int32_t>(
builder, loc, {num_dims - 2, 2},
SmallVector<int32_t>(padding_values.begin() + 2,
padding_values.end() - 2));
return input;
}
padding =
CreateConstValue<int32_t>(builder, loc, {num_dims - 2, 2},
SmallVector<int32_t>(2 * (num_dims - 2), 0));
Value temp_padding =
CreateConstValue<int32_t>(builder, loc, {num_dims, 2}, padding_values);
SmallVector<int64_t> output_shape(input_shape.getShape().begin(),
input_shape.getShape().end());
for (int i : spatial_dims) {
output_shape[i] += padding_values[2 * i] + padding_values[2 * i + 1];
}
return builder.create<TF::PadV2Op>(
loc, RankedTensorType::get(output_shape, builder.getI8Type()), input,
temp_padding,
CreateScalarConstValue<int8_t>(builder, loc, input_zp_value));
}
Value PackOperand(OpBuilder &builder, Location loc, Value value, int pack_dim) {
ShapedType value_type = mlir::cast<ShapedType>(value.getType());
const int rank = value_type.getRank();
SmallVector<int64_t> packed_shape(value_type.getShape().begin(),
value_type.getShape().end());
RankedTensorType shape_type =
RankedTensorType::get({rank}, builder.getI64Type());
Value shape_value = builder.create<TF::ShapeOp>(loc, shape_type, value);
if (packed_shape[pack_dim] % 2 != 0) {
packed_shape[pack_dim] += 1;
SmallVector<int32_t> padding(rank * 2, 0);
padding[pack_dim * 2 + 1] = 1;
Value padding_value =
CreateConstValue<int32_t>(builder, loc, {rank, 2}, padding);
value = builder.create<TF::PadV2Op>(
loc, RankedTensorType::get(packed_shape, builder.getI8Type()), value,
padding_value, CreateScalarConstValue<int8_t>(builder, loc, 0));
SmallVector<int64_t> shape_add(rank, 0);
shape_add[pack_dim] = 1;
shape_value = builder.create<TF::AddOp>(
loc, shape_type, shape_value,
CreateConstValue<int64_t>(builder, loc, {rank}, shape_add));
}
packed_shape[pack_dim] /= 2;
SmallVector<int64_t> divisor(rank, 1);
divisor[pack_dim] = 2;
RankedTensorType packed_output_type =
RankedTensorType::get(packed_shape, builder.getI8Type());
Value packed_shape_value = builder.create<TF::DivOp>(
loc, shape_type, shape_value,
CreateConstValue<int64_t>(builder, loc, {rank}, divisor));
Value packed_low_begin_value = CreateConstValue<int64_t>(
builder, loc, {rank}, SmallVector<int64_t>(rank, 0));
Value packed_low_value =
builder.create<TF::SliceOp>(loc, packed_output_type, value,
packed_low_begin_value, packed_shape_value);
packed_low_value = builder.create<TF::BitwiseAndOp>(
loc, packed_output_type, packed_low_value,
CreateScalarConstValue<int8_t>(builder, loc, 0x0F));
SmallVector<int64_t> packed_high_begin(rank, 0);
packed_high_begin[pack_dim] = packed_shape[pack_dim];
Value packed_high_begin_value =
CreateConstValue<int64_t>(builder, loc, {rank}, packed_high_begin);
Value packed_high_value =
builder.create<TF::SliceOp>(loc, packed_output_type, value,
packed_high_begin_value, packed_shape_value);
packed_high_value = builder.create<TF::LeftShiftOp>(
loc, packed_output_type, packed_high_value,
CreateScalarConstValue<int8_t>(builder, loc, 4));
Operation *packed = builder.create<TF::BitwiseOrOp>(
loc, packed_output_type, packed_low_value, packed_high_value);
return ConstantFoldOpIfPossible(packed).front();
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.h"
#include <cstdint>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Debug.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
namespace mlir::quant {
namespace {
void PackOperandTestHelper(
const llvm::SmallVector<int64_t>& unpacked_shape,
const llvm::SmallVector<int8_t>& unpacked_values, int pack_dim,
const llvm::SmallVector<int64_t>& expected_packed_shape,
const llvm::SmallVector<int8_t>& expected_packed_values) {
MLIRContext context;
OwningOpRef<ModuleOp> module(ModuleOp::create(UnknownLoc::get(&context)));
OpBuilder builder(&module->getBodyRegion());
context.loadDialect<TF::TensorFlowDialect>();
Value value = CreateConstValue<int8_t>(builder, module->getLoc(),
unpacked_shape, unpacked_values);
Value packed_value = PackOperand(builder, module->getLoc(), value, pack_dim);
DenseIntElementsAttr packed_value_attr;
ASSERT_TRUE(matchPattern(packed_value, m_Constant(&packed_value_attr)));
ShapedType packed_shape_type =
mlir::dyn_cast<ShapedType>(packed_value.getType());
llvm::SmallVector<int64_t> packed_shape(packed_shape_type.getShape().begin(),
packed_shape_type.getShape().end());
EXPECT_THAT(packed_shape, testing::ElementsAreArray(expected_packed_shape));
llvm::SmallVector<int8_t> packed_value_vector(
packed_value_attr.getValues<int8_t>());
EXPECT_THAT(packed_value_vector,
testing::ElementsAreArray(expected_packed_values));
}
TEST(TfToXlaAttributeUtilsTest, PackOperandPackDimSizeEven) {
PackOperandTestHelper({2, 2},
{0x01, 0x02, 0x03, 0x04},
0,
{1, 2},
{0x31, 0x42});
}
TEST(TfToXlaAttributeUtilsTest, PackOperandPackDimSizeOdd) {
PackOperandTestHelper(
{2, 3},
{0x01, 0x02, 0x03, 0x04, 0x05, 0x06},
1,
{2, 2},
{0x31, 0x02, 0x64, 0x05});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f8005be0-efb5-4e02-b34f-67e3f01f5cb9 | cpp | tensorflow/tensorflow | non_max_suppression | tensorflow/lite/kernels/non_max_suppression.cc | tensorflow/lite/kernels/internal/non_max_suppression_test.cc | #include "tensorflow/lite/kernels/internal/reference/non_max_suppression.h"
#include <initializer_list>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace non_max_suppression {
constexpr int kInputTensorBoxes = 0;
constexpr int kInputTensorScores = 1;
constexpr int kInputTensorMaxOutputSize = 2;
constexpr int kInputTensorIouThreshold = 3;
constexpr int kInputTensorScoreThreshold = 4;
constexpr int kInputTensorSigma = 5;
constexpr int kNMSOutputTensorSelectedIndices = 0;
constexpr int kNMSOutputTensorNumSelectedIndices = 1;
constexpr int kSoftNMSOutputTensorSelectedIndices = 0;
constexpr int kSoftNMSOutputTensorSelectedScores = 1;
constexpr int kSoftNMSOutputTensorNumSelectedIndices = 2;
TfLiteStatus SetTensorSizes(TfLiteContext* context, TfLiteTensor* tensor,
std::initializer_list<int> values) {
TfLiteIntArray* size = TfLiteIntArrayCreate(values.size());
int index = 0;
for (const auto& v : values) {
size->data[index++] = v;
}
return context->ResizeTensor(context, tensor, size);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const int num_inputs = NumInputs(node);
const bool is_soft_nms = num_inputs == 6;
if (num_inputs != 5 && num_inputs != 6) {
TF_LITE_KERNEL_LOG(context, "Found NMS op with invalid num inputs: %d",
NumInputs(node));
return kTfLiteError;
}
const TfLiteTensor* input_boxes;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorBoxes, &input_boxes));
TF_LITE_ENSURE_EQ(context, input_boxes->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_boxes), 2);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(input_boxes, 1), 4);
const int num_boxes = SizeOfDimension(input_boxes, 0);
const TfLiteTensor* input_scores;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorScores, &input_scores));
TF_LITE_ENSURE_EQ(context, input_scores->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_scores), 1);
TF_LITE_ENSURE_EQ(context, num_boxes, SizeOfDimension(input_scores, 0));
const TfLiteTensor* input_max_output_size;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorMaxOutputSize,
&input_max_output_size));
TF_LITE_ENSURE_EQ(context, input_max_output_size->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_max_output_size), 0);
const bool is_max_output_size_const =
IsConstantOrPersistentTensor(input_max_output_size);
int max_output_size_value = 0;
if (is_max_output_size_const) {
max_output_size_value = *GetTensorData<int>(input_max_output_size);
TF_LITE_ENSURE(context, (max_output_size_value >= 0));
}
const TfLiteTensor* input_iou_threshold;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorIouThreshold,
&input_iou_threshold));
TF_LITE_ENSURE_EQ(context, input_iou_threshold->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_iou_threshold), 0);
const TfLiteTensor* input_score_threshold;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorScoreThreshold,
&input_score_threshold));
TF_LITE_ENSURE_EQ(context, input_iou_threshold->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_score_threshold), 0);
if (is_soft_nms) {
const TfLiteTensor* input_sigma;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorSigma, &input_sigma));
TF_LITE_ENSURE_EQ(context, input_sigma->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_sigma), 0);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 3);
TfLiteTensor* output_selected_indices;
TF_LITE_ENSURE_OK(
context,
GetOutputSafe(context, node, kSoftNMSOutputTensorSelectedIndices,
&output_selected_indices));
output_selected_indices->type = kTfLiteInt32;
TfLiteTensor* output_selected_scores;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
kSoftNMSOutputTensorSelectedScores,
&output_selected_scores));
output_selected_scores->type = kTfLiteFloat32;
TfLiteTensor* output_num_selected_indices;
TF_LITE_ENSURE_OK(
context,
GetOutputSafe(context, node, kSoftNMSOutputTensorNumSelectedIndices,
&output_num_selected_indices));
output_num_selected_indices->type = kTfLiteInt32;
SetTensorSizes(context, output_num_selected_indices, {});
if (is_max_output_size_const) {
SetTensorSizes(context, output_selected_indices, {max_output_size_value});
SetTensorSizes(context, output_selected_scores, {max_output_size_value});
} else {
SetTensorToDynamic(output_selected_indices);
SetTensorToDynamic(output_selected_scores);
}
} else {
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
TfLiteTensor* output_selected_indices;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kNMSOutputTensorSelectedIndices,
&output_selected_indices));
output_selected_indices->type = kTfLiteInt32;
TfLiteTensor* output_num_selected_indices;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
kNMSOutputTensorNumSelectedIndices,
&output_num_selected_indices));
output_num_selected_indices->type = kTfLiteInt32;
SetTensorSizes(context, output_num_selected_indices, {});
if (is_max_output_size_const) {
SetTensorSizes(context, output_selected_indices, {max_output_size_value});
} else {
SetTensorToDynamic(output_selected_indices);
}
}
return kTfLiteOk;
}
void ResetUnusedElementsToZeroes(const int max_output_size,
const int num_selected_indices,
int* selected_indices,
float* selected_scores) {
for (int i = num_selected_indices; i < max_output_size; ++i) {
selected_indices[i] = 0;
if (selected_scores) {
selected_scores[i] = 0.0;
}
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const bool is_soft_nms = NumInputs(node) == 6;
const TfLiteTensor* input_boxes;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorBoxes, &input_boxes));
const int num_boxes = SizeOfDimension(input_boxes, 0);
const TfLiteTensor* input_scores;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorScores, &input_scores));
const TfLiteTensor* input_max_output_size;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorMaxOutputSize,
&input_max_output_size));
const int max_output_size_value = *GetTensorData<int>(input_max_output_size);
TF_LITE_ENSURE(context, (max_output_size_value >= 0));
const bool is_max_output_size_const =
IsConstantOrPersistentTensor(input_max_output_size);
const TfLiteTensor* input_iou_threshold;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorIouThreshold,
&input_iou_threshold));
const float iou_threshold = *GetTensorData<float>(input_iou_threshold);
const TfLiteTensor* input_score_threshold;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorScoreThreshold,
&input_score_threshold));
const float score_threshold = *GetTensorData<float>(input_score_threshold);
TfLiteTensor* output_selected_indices = nullptr;
TfLiteTensor* output_selected_scores = nullptr;
TfLiteTensor* output_num_selected_indices = nullptr;
if (is_soft_nms) {
const TfLiteTensor* input_sigma;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorSigma, &input_sigma));
const float soft_nms_sigma = *GetTensorData<float>(input_sigma);
if (soft_nms_sigma < 0) {
TF_LITE_KERNEL_LOG(context, "Invalid sigma value for soft NMS: %f",
soft_nms_sigma);
return kTfLiteError;
}
TF_LITE_ENSURE_OK(
context,
GetOutputSafe(context, node, kSoftNMSOutputTensorSelectedIndices,
&output_selected_indices));
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
kSoftNMSOutputTensorSelectedScores,
&output_selected_scores));
TF_LITE_ENSURE_OK(
context,
GetOutputSafe(context, node, kSoftNMSOutputTensorNumSelectedIndices,
&output_num_selected_indices));
if (!is_max_output_size_const) {
SetTensorSizes(context, output_selected_indices, {max_output_size_value});
SetTensorSizes(context, output_selected_scores, {max_output_size_value});
}
reference_ops::NonMaxSuppression(
input_boxes->data.f, num_boxes, input_scores->data.f,
max_output_size_value, iou_threshold, score_threshold, soft_nms_sigma,
output_selected_indices->data.i32, output_selected_scores->data.f,
output_num_selected_indices->data.i32);
ResetUnusedElementsToZeroes(
max_output_size_value, *output_num_selected_indices->data.i32,
output_selected_indices->data.i32, output_selected_scores->data.f);
} else {
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kNMSOutputTensorSelectedIndices,
&output_selected_indices));
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
kNMSOutputTensorNumSelectedIndices,
&output_num_selected_indices));
if (!is_max_output_size_const) {
SetTensorSizes(context, output_selected_indices, {max_output_size_value});
}
reference_ops::NonMaxSuppression(
input_boxes->data.f, num_boxes, input_scores->data.f,
max_output_size_value, iou_threshold, score_threshold, 0.0,
output_selected_indices->data.i32, nullptr,
output_num_selected_indices->data.i32);
ResetUnusedElementsToZeroes(max_output_size_value,
*output_num_selected_indices->data.i32,
output_selected_indices->data.i32, nullptr);
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_NON_MAX_SUPPRESSION_V4() {
static TfLiteRegistration r = {nullptr, nullptr, non_max_suppression::Prepare,
non_max_suppression::Eval};
return &r;
}
TfLiteRegistration* Register_NON_MAX_SUPPRESSION_V5() {
static TfLiteRegistration r = {nullptr, nullptr, non_max_suppression::Prepare,
non_max_suppression::Eval};
return &r;
}
}
}
} | #include "tensorflow/lite/kernels/internal/reference/non_max_suppression.h"
#include <algorithm>
#include <cmath>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
constexpr int kNumBoxes = 6;
void InitializeCandidates(std::vector<float>* boxes, std::vector<float>* scores,
bool flip_coordinates = false) {
if (!flip_coordinates) {
*boxes = {
0, 0, 1, 1,
0, 0.1, 1, 1.1,
0, -0.1, 1, 0.9,
0, 10, 1, 11,
0, 10.1, 1, 11.1,
0, 100, 1, 101
};
} else {
*boxes = {
1, 1, 0, 0,
0, 0.1, 1, 1.1,
0, .9f, 1, -0.1,
0, 10, 1, 11,
1, 10.1f, 0, 11.1,
1, 101, 0, 100
};
}
*scores = {0.9, 0.75, 0.6, 0.95, 0.5, 0.3};
}
template <typename T>
void MatchFirstNElements(int num_elements, const std::vector<T>& test_values,
const std::vector<T>& reference_values) {
EXPECT_LT(num_elements, test_values.size());
EXPECT_EQ(num_elements, reference_values.size());
for (int i = 0; i < num_elements; ++i) {
EXPECT_EQ(test_values[i], reference_values[i]);
}
}
TEST(NonMaxSuppression, TestZeroBoxes) {
std::vector<float> boxes(1);
std::vector<float> scores(1);
const float iou_threshold = 0.5;
const float score_threshold = 0.4;
const int max_output_size = 4;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
reference_ops::NonMaxSuppression(
boxes.data(), 0, scores.data(), max_output_size,
iou_threshold, score_threshold, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 0);
}
TEST(NonMaxSuppression, TestSelectFromIdenticalBoxes) {
std::vector<float> boxes(kNumBoxes * 4);
std::vector<float> scores(kNumBoxes);
for (int i = 0; i < kNumBoxes; ++i) {
boxes[i * 4 + 0] = 0;
boxes[i * 4 + 1] = 0;
boxes[i * 4 + 2] = 1;
boxes[i * 4 + 3] = 1;
scores[i] = 0.75;
}
const float iou_threshold = 0.5;
float score_threshold = 0.5;
const int max_output_size = kNumBoxes;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 1);
MatchFirstNElements(1, selected_scores, {.75});
score_threshold = 0.95;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 0);
}
TEST(NonMaxSuppression, TestSelectFromThreeClustersWithZeroScoreThreshold) {
std::vector<float> boxes;
std::vector<float> scores;
InitializeCandidates(&boxes, &scores);
const float iou_threshold = 0.5;
int max_output_size;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
max_output_size = 100;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
0.0, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 3);
MatchFirstNElements(3, selected_indices, {3, 0, 5});
MatchFirstNElements(3, selected_scores, {0.95, 0.9, 0.3});
max_output_size = 2;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
0.0, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, max_output_size);
MatchFirstNElements(max_output_size, selected_indices, {3, 0});
MatchFirstNElements(max_output_size, selected_scores, {0.95, 0.9});
max_output_size = 0;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
0.0, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 0);
}
TEST(NonMaxSuppression, TestSelectFromThreeClustersWithScoreThreshold) {
std::vector<float> boxes;
std::vector<float> scores;
InitializeCandidates(&boxes, &scores);
const float iou_threshold = 0.5;
const float score_threshold = 0.4;
int max_output_size;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
max_output_size = 100;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 2);
MatchFirstNElements(2, selected_indices, {3, 0});
MatchFirstNElements(2, selected_scores, {0.95, 0.9});
max_output_size = 1;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 1);
MatchFirstNElements(1, selected_indices, {3});
MatchFirstNElements(1, selected_scores, {0.95});
}
TEST(NonMaxSuppression, TestSelectFromThreeClustersWithFlippedCoordinates) {
std::vector<float> boxes;
std::vector<float> scores;
InitializeCandidates(&boxes, &scores, true);
const float iou_threshold = 0.5;
const float score_threshold = 0.4;
const int max_output_size = 3;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 2);
MatchFirstNElements(2, selected_indices, {3, 0});
MatchFirstNElements(2, selected_scores, {0.95, 0.9});
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
0.0, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 3);
MatchFirstNElements(3, selected_indices, {3, 0, 5});
MatchFirstNElements(3, selected_scores, {0.95, 0.9, 0.3});
}
TEST(NonMaxSuppression, TestIoUThresholdBoundaryCases) {
std::vector<float> boxes;
std::vector<float> scores;
InitializeCandidates(&boxes, &scores);
const float score_threshold = 0.4;
const int max_output_size = 4;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size,
0.0, score_threshold, 0.0,
selected_indices.data(), selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 1);
MatchFirstNElements(1, selected_indices, {3});
MatchFirstNElements(1, selected_scores, {0.95});
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size,
0.9999,
0.0, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, max_output_size);
MatchFirstNElements(max_output_size, selected_indices, {3, 0, 1, 2});
MatchFirstNElements(max_output_size, selected_scores, {0.95, 0.9, 0.75, 0.6});
}
TEST(NonMaxSuppression, TestSelectFromThreeClustersWithSoftNMS) {
std::vector<float> boxes;
std::vector<float> scores;
InitializeCandidates(&boxes, &scores);
const float iou_threshold = 1.0;
float score_threshold = 0.0;
const float soft_nms_sigma = 0.5;
int max_output_size = 6;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, soft_nms_sigma, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 6);
EXPECT_THAT(selected_indices, ElementsAreArray({3, 0, 1, 5, 4, 2}));
EXPECT_THAT(selected_scores,
ElementsAreArray(
ArrayFloatNear({0.95, 0.9, 0.384, 0.3, 0.256, 0.197}, 1e-3)));
score_threshold = 0.299;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, soft_nms_sigma, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 4);
MatchFirstNElements(4, selected_indices, {3, 0, 1, 5});
}
TEST(NonMaxSuppression, TestNullSelectedScoresOutput) {
std::vector<float> boxes;
std::vector<float> scores;
InitializeCandidates(&boxes, &scores);
const float iou_threshold = 0.5;
const float score_threshold = 0.4;
int max_output_size;
std::vector<int> selected_indices(6);
int num_selected_indices = -1;
max_output_size = 100;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, 0.0, selected_indices.data(),
nullptr, &num_selected_indices);
EXPECT_EQ(num_selected_indices, 2);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/non_max_suppression.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/non_max_suppression_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2b8f4dea-181a-4111-9c9e-1972e76f79b6 | cpp | google/tensorstore | bit_span | tensorstore/util/bit_span.h | tensorstore/util/bit_span_test.cc | #ifndef TENSORSTORE_UTIL_BIT_SPAN_H_
#define TENSORSTORE_UTIL_BIT_SPAN_H_
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <type_traits>
#include "absl/base/attributes.h"
#include "tensorstore/util/small_bit_set.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_bit_span {
template <bool FillValue, typename T>
void FillBits(T* base, std::ptrdiff_t offset, std::ptrdiff_t size) {
constexpr std::ptrdiff_t kBitsPerBlock = sizeof(T) * 8;
constexpr const T kAllOnes = ~static_cast<T>(0);
assert(offset >= 0);
std::ptrdiff_t end;
for (base += offset / kBitsPerBlock, offset %= kBitsPerBlock,
end = size + offset;
end >= kBitsPerBlock; ++base, offset = 0, end -= kBitsPerBlock) {
const T mask = kAllOnes << offset;
if (FillValue) {
*base |= mask;
} else {
*base &= ~mask;
}
}
if (end) {
const T mask = (kAllOnes << offset) ^ (kAllOnes << (end % kBitsPerBlock));
if (FillValue) {
*base |= mask;
} else {
*base &= ~mask;
}
}
}
template <typename T, typename U>
void CopyBits(const U* source, std::ptrdiff_t source_offset, T* dest,
std::ptrdiff_t dest_offset, std::ptrdiff_t size) {
std::copy(BitIterator<const U>(source, source_offset),
BitIterator<const U>(source, source_offset + size),
BitIterator<T>(dest, dest_offset));
}
}
template <typename T, std::ptrdiff_t Extent = dynamic_extent>
class BitSpan {
static_assert(std::is_unsigned_v<T>, "Storage type T must be unsigned.");
static_assert(Extent == dynamic_extent || Extent >= 0,
"Extent must be dynamic_extent or >= 0.");
public:
using ExtentType =
std::conditional_t<Extent == dynamic_extent, std::ptrdiff_t,
std::integral_constant<std::ptrdiff_t, Extent>>;
using size_type = std::ptrdiff_t;
using difference_type = std::ptrdiff_t;
using iterator = BitIterator<T>;
using const_iterator = BitIterator<const T>;
using pointer = BitIterator<T>;
using const_pointer = BitIterator<T>;
using value_type = bool;
using reference = BitRef<T>;
using base_type = T;
using element_type = std::conditional_t<std::is_const_v<T>, const bool, bool>;
constexpr static std::ptrdiff_t kBitsPerBlock = sizeof(T) * 8;
constexpr static std::ptrdiff_t static_extent = Extent;
constexpr BitSpan(T* base ABSL_ATTRIBUTE_LIFETIME_BOUND,
std::ptrdiff_t offset, std::ptrdiff_t size)
: BitSpan(BitIterator<T>(base, offset), size) {}
constexpr BitSpan(BitIterator<T> begin, std::ptrdiff_t size) : begin_(begin) {
if constexpr (Extent == dynamic_extent) {
assert(size >= 0);
size_ = size;
} else {
assert(size == Extent);
}
}
template <
typename U, std::ptrdiff_t E,
std::enable_if_t<((std::is_same_v<T, U> || std::is_same_v<T, const U>)&&(
E == Extent || Extent == dynamic_extent))>* = nullptr>
constexpr BitSpan(BitSpan<U, E> other)
: begin_(other.begin()), size_(other.size()) {}
constexpr T* base() const { return begin().base(); }
constexpr std::ptrdiff_t offset() const { return begin().offset(); }
constexpr ExtentType size() const { return size_; }
BitIterator<T> begin() const { return begin_; }
BitIterator<T> end() const { return begin_ + size_; }
constexpr BitRef<T> operator[](std::ptrdiff_t i) const {
assert(i >= 0 && i <= size());
return *(begin() + i);
}
template <bool FillValue, int&... ExplicitArgumentBarrier, typename X = T>
std::enable_if_t<!std::is_const_v<X>> fill() const {
internal_bit_span::FillBits<FillValue>(base(), offset(), size());
}
template <int&... ExplicitArgumentBarrier, typename X = T>
std::enable_if_t<!std::is_const_v<X>> fill(bool value) const {
if (value) {
fill<true>();
} else {
fill<false>();
}
}
template <typename U, std::ptrdiff_t E, int&... ExplicitArgumentBarrier,
typename X = T>
std::enable_if_t<!std::is_const_v<X> &&
(E == Extent || Extent == dynamic_extent ||
E == dynamic_extent)>
DeepAssign(BitSpan<U, E> other) {
assert(other.size() == size());
internal_bit_span::CopyBits(other.base(), other.offset(), base(), offset(),
size());
}
private:
BitIterator<T> begin_;
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS ExtentType size_;
};
template <typename Block>
inline constexpr std::ptrdiff_t BitVectorSizeInBlocks(std::ptrdiff_t length) {
return (length + sizeof(Block) * 8 - 1) / (sizeof(Block) * 8);
}
}
#endif | #include "tensorstore/util/bit_span.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::BitIterator;
using ::tensorstore::BitSpan;
using ::tensorstore::BitVectorSizeInBlocks;
static_assert(
std::is_convertible_v<BitSpan<uint32_t>, BitSpan<const uint32_t>>);
static_assert(
std::is_convertible_v<BitSpan<const uint32_t, 3>, BitSpan<const uint32_t>>);
static_assert(
std::is_convertible_v<BitSpan<uint32_t, 3>, BitSpan<const uint32_t>>);
TEST(BitSpanTest, Basic) {
uint16_t data[2] = {0, 0};
BitSpan<uint16_t> s(data, 11, 10);
EXPECT_EQ(10, s.size());
EXPECT_EQ(data, s.base());
EXPECT_EQ(11, s.offset());
}
TEST(BitSpanTest, ConstructFromIterator) {
uint16_t data[2] = {0, 0};
BitSpan<uint16_t> s(BitIterator<uint16_t>(data, 11), 10);
EXPECT_EQ(10, s.size());
EXPECT_EQ(data, s.base());
EXPECT_EQ(11, s.offset());
}
TEST(BitSpanTest, Iterate) {
uint16_t data[2] = {0, 0};
BitSpan<uint16_t> s(data, 11, 10);
std::array<bool, 10> arr = {1, 1, 0, 0, 1, 1, 1, 0, 1, 0};
std::copy(arr.begin(), arr.end(), s.begin());
EXPECT_THAT(data, ::testing::ElementsAre(0x9800 ,
0xb ));
std::array<bool, 10> arr2;
std::copy(s.begin(), s.end(), arr2.begin());
EXPECT_EQ(arr, arr2);
std::sort(s.begin(), s.end());
EXPECT_THAT(s, ::testing::ElementsAre(0, 0, 0, 0, 1, 1, 1, 1, 1, 1));
}
TEST(BitSpanTest, Convert) {
uint16_t data[2] = {0, 0};
BitSpan<uint16_t, 10> s_static(data, 11, 10);
BitSpan<uint16_t> s2 = s_static;
BitSpan<const uint16_t> s2_const = s2;
EXPECT_EQ(data, s_static.base());
EXPECT_EQ(11, s_static.offset());
EXPECT_EQ(10, s_static.size());
EXPECT_EQ(data, s2.base());
EXPECT_EQ(11, s2.offset());
EXPECT_EQ(10, s2.size());
EXPECT_EQ(data, s2_const.base());
EXPECT_EQ(11, s2_const.offset());
EXPECT_EQ(10, s2_const.size());
}
TEST(BitSpanTest, FillPartialSingleBlockTrue) {
uint16_t data[2] = {0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 10, 4).fill(true);
EXPECT_THAT(data,
::testing::ElementsAre(0xbeaa , 0xaaaa));
}
TEST(BitSpanTest, FillPartialSingleBlockFalse) {
uint16_t data[2] = {0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 11, 4).fill(false);
EXPECT_THAT(data,
::testing::ElementsAre(0x82aa , 0xaaaa));
}
TEST(BitSpanTest, FillPartialTwoBlocksTrue) {
uint16_t data[2] = {0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 11, 10).fill(true);
EXPECT_THAT(data, ::testing::ElementsAre(0xfaaa ,
0xaabf ));
}
TEST(BitSpanTest, FillPartialTwoBlocksFalse) {
uint16_t data[2] = {0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 11, 10).fill(false);
EXPECT_THAT(data, ::testing::ElementsAre(0x02aa ,
0xaaa0 ));
}
TEST(BitSpanTest, FillOneBlockExactEndTrue) {
uint16_t data[2] = {0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 13, 3).fill(true);
EXPECT_THAT(data, ::testing::ElementsAre(0xeaaa ,
0xaaaa ));
}
TEST(BitSpanTest, FillOneBlockExactEndFalse) {
uint16_t data[2] = {0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 13, 3).fill(false);
EXPECT_THAT(data, ::testing::ElementsAre(0x0aaa ,
0xaaaa ));
}
TEST(BitSpanTest, FillTwoBlockExactEndTrue) {
uint16_t data[3] = {0xaaaa, 0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 13, 19).fill(true);
EXPECT_THAT(data, ::testing::ElementsAre(0xeaaa ,
0xffff ,
0xaaaa ));
}
TEST(BitSpanTest, FillTwoBlockExactEndFalse) {
uint16_t data[3] = {0xaaaa, 0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 13, 19).fill(false);
EXPECT_THAT(data, ::testing::ElementsAre(0x0aaa ,
0x0000 ,
0xaaaa ));
}
TEST(BitSpanTest, FillPartialThreeBlocksTrue) {
uint16_t data[3] = {0xaaaa, 0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 11, 23).fill(true);
EXPECT_THAT(data, ::testing::ElementsAre(0xfaaa ,
0xffff ,
0xaaab ));
}
TEST(BitSpanTest, FillPartialThreeBlocksFalse) {
uint16_t data[3] = {0xaaaa, 0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 11, 23).fill(false);
EXPECT_THAT(data, ::testing::ElementsAre(0x02aa ,
0x0000 ,
0xaaa8 ));
}
TEST(BitSpanTest, DeepAssign) {
uint16_t data[2] = {0x9e0e ,
0xe1f1 };
BitSpan<uint16_t> s1(data, 11, 10);
uint16_t data2[2] = {0x1e0e ,
0xe1f1 };
BitSpan<uint16_t> s2(data2, 9, 10);
s2.DeepAssign(s1);
EXPECT_THAT(data, ::testing::ElementsAre(0x9e0e ,
0xe1f1 ));
EXPECT_THAT(data2, ::testing::ElementsAre(0x660e ,
0xe1f4 ));
}
static_assert(BitVectorSizeInBlocks<uint64_t>(0) == 0, "");
static_assert(BitVectorSizeInBlocks<uint64_t>(1) == 1, "");
static_assert(BitVectorSizeInBlocks<uint64_t>(63) == 1, "");
static_assert(BitVectorSizeInBlocks<uint64_t>(64) == 1, "");
static_assert(BitVectorSizeInBlocks<uint64_t>(65) == 2, "");
static_assert(BitVectorSizeInBlocks<uint32_t>(65) == 3, "");
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/bit_span.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/bit_span_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
496b588b-9d15-443d-9b70-61c9b5967273 | cpp | google/quiche | deterministic_connection_id_generator | quiche/quic/core/deterministic_connection_id_generator.cc | quiche/quic/core/deterministic_connection_id_generator_test.cc | #include "quiche/quic/core/deterministic_connection_id_generator.h"
#include <optional>
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
DeterministicConnectionIdGenerator::DeterministicConnectionIdGenerator(
uint8_t expected_connection_id_length)
: expected_connection_id_length_(expected_connection_id_length) {
if (expected_connection_id_length_ >
kQuicMaxConnectionIdWithLengthPrefixLength) {
QUIC_BUG(quic_bug_465151159_01)
<< "Issuing connection IDs longer than allowed in RFC9000";
}
}
std::optional<QuicConnectionId>
DeterministicConnectionIdGenerator::GenerateNextConnectionId(
const QuicConnectionId& original) {
if (expected_connection_id_length_ == 0) {
return EmptyQuicConnectionId();
}
const uint64_t connection_id_hash64 = QuicUtils::FNV1a_64_Hash(
absl::string_view(original.data(), original.length()));
if (expected_connection_id_length_ <= sizeof(uint64_t)) {
return QuicConnectionId(
reinterpret_cast<const char*>(&connection_id_hash64),
expected_connection_id_length_);
}
char new_connection_id_data[255] = {};
const absl::uint128 connection_id_hash128 = QuicUtils::FNV1a_128_Hash(
absl::string_view(original.data(), original.length()));
static_assert(sizeof(connection_id_hash64) + sizeof(connection_id_hash128) <=
sizeof(new_connection_id_data),
"bad size");
memcpy(new_connection_id_data, &connection_id_hash64,
sizeof(connection_id_hash64));
memcpy(new_connection_id_data + sizeof(connection_id_hash64),
&connection_id_hash128, sizeof(connection_id_hash128));
return QuicConnectionId(new_connection_id_data,
expected_connection_id_length_);
}
std::optional<QuicConnectionId>
DeterministicConnectionIdGenerator::MaybeReplaceConnectionId(
const QuicConnectionId& original, const ParsedQuicVersion& version) {
if (original.length() == expected_connection_id_length_) {
return std::optional<QuicConnectionId>();
}
QUICHE_DCHECK(version.AllowsVariableLengthConnectionIds());
std::optional<QuicConnectionId> new_connection_id =
GenerateNextConnectionId(original);
if (!new_connection_id.has_value()) {
QUIC_BUG(unset_next_connection_id);
return std::nullopt;
}
QUICHE_DCHECK_EQ(
*new_connection_id,
static_cast<QuicConnectionId>(*GenerateNextConnectionId(original)));
QUICHE_DCHECK_EQ(expected_connection_id_length_, new_connection_id->length());
QUIC_DLOG(INFO) << "Replacing incoming connection ID " << original << " with "
<< *new_connection_id;
return new_connection_id;
}
} | #include "quiche/quic/core/deterministic_connection_id_generator.h"
#include <optional>
#include <ostream>
#include <vector>
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
namespace quic {
namespace test {
namespace {
struct TestParams {
TestParams(int connection_id_length)
: connection_id_length_(connection_id_length) {}
TestParams() : TestParams(kQuicDefaultConnectionIdLength) {}
friend std::ostream& operator<<(std::ostream& os, const TestParams& p) {
os << "{ connection ID length: " << p.connection_id_length_ << " }";
return os;
}
int connection_id_length_;
};
std::vector<struct TestParams> GetTestParams() {
std::vector<struct TestParams> params;
std::vector<int> connection_id_lengths{7, 8, 9, 16, 20};
for (int connection_id_length : connection_id_lengths) {
params.push_back(TestParams(connection_id_length));
}
return params;
}
class DeterministicConnectionIdGeneratorTest
: public QuicTestWithParam<TestParams> {
public:
DeterministicConnectionIdGeneratorTest()
: connection_id_length_(GetParam().connection_id_length_),
generator_(DeterministicConnectionIdGenerator(connection_id_length_)),
version_(ParsedQuicVersion::RFCv1()) {}
protected:
int connection_id_length_;
DeterministicConnectionIdGenerator generator_;
ParsedQuicVersion version_;
};
INSTANTIATE_TEST_SUITE_P(DeterministicConnectionIdGeneratorTests,
DeterministicConnectionIdGeneratorTest,
::testing::ValuesIn(GetTestParams()));
TEST_P(DeterministicConnectionIdGeneratorTest,
NextConnectionIdIsDeterministic) {
QuicConnectionId connection_id64a = TestConnectionId(33);
QuicConnectionId connection_id64b = TestConnectionId(33);
EXPECT_EQ(connection_id64a, connection_id64b);
EXPECT_EQ(*generator_.GenerateNextConnectionId(connection_id64a),
*generator_.GenerateNextConnectionId(connection_id64b));
QuicConnectionId connection_id72a = TestConnectionIdNineBytesLong(42);
QuicConnectionId connection_id72b = TestConnectionIdNineBytesLong(42);
EXPECT_EQ(connection_id72a, connection_id72b);
EXPECT_EQ(*generator_.GenerateNextConnectionId(connection_id72a),
*generator_.GenerateNextConnectionId(connection_id72b));
}
TEST_P(DeterministicConnectionIdGeneratorTest,
NextConnectionIdLengthIsCorrect) {
const char connection_id_bytes[255] = {};
for (uint8_t i = 0; i < sizeof(connection_id_bytes) - 1; ++i) {
QuicConnectionId connection_id(connection_id_bytes, i);
std::optional<QuicConnectionId> replacement_connection_id =
generator_.GenerateNextConnectionId(connection_id);
ASSERT_TRUE(replacement_connection_id.has_value());
EXPECT_EQ(connection_id_length_, replacement_connection_id->length());
}
}
TEST_P(DeterministicConnectionIdGeneratorTest, NextConnectionIdHasEntropy) {
for (uint64_t i = 0; i < 256; ++i) {
QuicConnectionId connection_id_i = TestConnectionId(i);
std::optional<QuicConnectionId> new_i =
generator_.GenerateNextConnectionId(connection_id_i);
ASSERT_TRUE(new_i.has_value());
EXPECT_NE(connection_id_i, *new_i);
for (uint64_t j = i + 1; j <= 256; ++j) {
QuicConnectionId connection_id_j = TestConnectionId(j);
EXPECT_NE(connection_id_i, connection_id_j);
std::optional<QuicConnectionId> new_j =
generator_.GenerateNextConnectionId(connection_id_j);
ASSERT_TRUE(new_j.has_value());
EXPECT_NE(*new_i, *new_j);
}
}
}
TEST_P(DeterministicConnectionIdGeneratorTest,
OnlyReplaceConnectionIdWithWrongLength) {
const char connection_id_input[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d,
0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14};
for (int i = 0; i < kQuicMaxConnectionIdWithLengthPrefixLength; i++) {
QuicConnectionId input = QuicConnectionId(connection_id_input, i);
std::optional<QuicConnectionId> output =
generator_.MaybeReplaceConnectionId(input, version_);
if (i == connection_id_length_) {
EXPECT_FALSE(output.has_value());
} else {
ASSERT_TRUE(output.has_value());
EXPECT_EQ(*output, generator_.GenerateNextConnectionId(input));
}
}
}
TEST_P(DeterministicConnectionIdGeneratorTest, ReturnLength) {
EXPECT_EQ(generator_.ConnectionIdLength(0x01), connection_id_length_);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/deterministic_connection_id_generator.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/deterministic_connection_id_generator_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
0d00504b-3897-43c0-a93d-bf046bfb5a6e | cpp | tensorflow/tensorflow | conditional_code_motion | third_party/xla/xla/service/conditional_code_motion.cc | third_party/xla/xla/service/conditional_code_motion_test.cc | #include "xla/service/conditional_code_motion.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <stack>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/literal.h"
#include "xla/map_util.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace conditional_opt {
HloInstruction* CloneNestedTuples(HloInstruction* tuple) {
if (!tuple->shape().IsTuple()) {
return tuple;
}
std::vector<HloInstruction*> tuple_users, gte_users;
for (int i = 0; i < tuple->shape().tuple_shapes_size(); ++i) {
gte_users.push_back(nullptr);
}
for (auto* tuple_user : tuple->users()) {
VLOG(2) << "tuple_user: " << tuple_user->ToString() << "\n";
if (tuple_user->opcode() != HloOpcode::kGetTupleElement ||
tuple_user == tuple->parent()->root_instruction()) {
tuple_users.push_back(tuple_user);
} else {
gte_users[tuple_user->tuple_index()] = tuple_user;
}
}
if (!tuple_users.empty() || tuple->user_count() == 0 ||
tuple == tuple->parent()->root_instruction()) {
VLOG(5) << "CLONING: " << tuple->ToString() << "\n";
int64_t tuple_size = tuple->shape().tuple_shapes_size();
std::vector<HloInstruction*> operands;
operands.reserve(tuple_size);
for (int64_t j = 0; j < tuple_size; ++j) {
HloInstruction* gte =
(gte_users[j] == nullptr)
? tuple->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(
tuple->shape().tuple_shapes(j), tuple, j))
: gte_users[j];
CHECK_NE(gte, nullptr);
operands.push_back(CloneNestedTuples(gte));
}
HloInstruction* new_tuple =
tuple->parent()->AddInstruction(HloInstruction::CreateTuple(operands));
VLOG(2) << "new_tuple: " << new_tuple->ToString() << "\n";
if (tuple == tuple->parent()->root_instruction()) {
tuple->parent()->set_root_instruction(new_tuple,
true);
} else {
for (auto tuple_user : tuple_users) {
TF_CHECK_OK(tuple->ReplaceUseWithDifferentShape(tuple_user, new_tuple));
}
}
return new_tuple;
}
for (auto gte_user : gte_users) {
if (gte_user != nullptr) {
auto gte = CloneNestedTuples(gte_user);
CHECK_NE(gte, nullptr);
}
}
return tuple;
}
class BoundaryVisitor {
public:
explicit BoundaryVisitor(HloInstruction* conditional) {
Boundary b(Boundary::Position::kInsideBranch);
b.mutable_operands().push_back(conditional);
worklist_.push_back(b);
}
BoundaryVisitor() {}
Boundary PopNextBoundary() {
CHECK(!worklist_.empty());
Boundary b = worklist_.front();
worklist_.pop_front();
while (!worklist_.empty() && ContainsKey(visited_, b)) {
b = worklist_.front();
worklist_.pop_front();
}
visited_.insert(b);
return b;
}
void AddToWorkList(const Boundary& b) {
CHECK(!b.operands().empty());
worklist_.push_back(b);
}
bool HasNextBoundary() {
while (!worklist_.empty()) {
Boundary b = worklist_.front();
if (!ContainsKey(visited_, b)) {
break;
}
worklist_.pop_front();
}
return !worklist_.empty();
}
private:
std::deque<Boundary> worklist_;
absl::flat_hash_set<Boundary> visited_;
};
template <class OpCollection>
int64_t CountNonLeafOps(const OpCollection& ops) {
absl::flat_hash_set<HloInstruction*> op_set;
for (auto op : ops) {
if (!op_set.contains(op) && op->opcode() != HloOpcode::kConstant) {
op_set.insert(op);
}
}
return op_set.size();
}
int64_t ReusesCarriedBy(HloOpcode op, HloOpcode user) {
switch (user) {
case HloOpcode::kGetTupleElement:
return 0;
case HloOpcode::kConvert:
switch (op) {
case HloOpcode::kConvolution:
case HloOpcode::kDot:
return 0;
default:
break;
}
break;
default:
break;
}
switch (op) {
case HloOpcode::kParameter:
case HloOpcode::kConstant:
case HloOpcode::kGetTupleElement:
return 0;
case HloOpcode::kConditional:
return 10;
default:
return -10;
}
}
bool WorthHoisting(HloOpcode op, HloOpcode child_op) {
switch (op) {
case HloOpcode::kConvert:
switch (child_op) {
case HloOpcode::kAllReduce:
case HloOpcode::kReshape:
case HloOpcode::kGetTupleElement:
return true;
default:
return false;
}
case HloOpcode::kGetTupleElement:
case HloOpcode::kTuple:
switch (child_op) {
case HloOpcode::kParameter:
return false;
default:
return true;
}
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kReduce:
case HloOpcode::kConstant:
case HloOpcode::kReshape:
case HloOpcode::kBroadcast:
return true;
default:
if (HloInstruction::IsOpElementwise(op)) {
return true;
}
return false;
}
}
bool InstructionWithinBranchIdentical(
const std::vector<HloInstruction*>& instructions,
bool is_layout_sensitive) {
auto eq_operand = [&](const HloInstruction* a, const HloInstruction* b) {
bool eq_operands = is_layout_sensitive
? ShapeUtil::Equal(a->shape(), b->shape())
: ShapeUtil::Compatible(a->shape(), b->shape());
return eq_operands;
};
auto eq_computations = [](const HloComputation* a, const HloComputation* b) {
return *a == *b;
};
if (instructions.empty()) {
return false;
}
if (instructions[0]->IsCrossModuleAllReduce()) {
return std::all_of(
instructions.begin(), instructions.end(),
[&](HloInstruction* instruction) {
if (!instruction->IsCrossModuleAllReduce()) {
return false;
}
auto old_channel_id = instruction->channel_id();
instruction->set_channel_id(instructions[0]->channel_id());
bool eq_instructions = instructions[0]->Identical(
*instruction, eq_operand, eq_computations, is_layout_sensitive);
instruction->set_channel_id(old_channel_id);
return eq_instructions;
});
}
return std::all_of(instructions.begin(), instructions.end(),
[&](HloInstruction* instruction) {
return instructions[0]->Identical(
*instruction, eq_operand, eq_computations,
is_layout_sensitive);
});
}
void CopyOutOfConditional(
Boundary& boundary, HloInstruction* conditional,
absl::flat_hash_map<Boundary, Boundary>& hoisted_boundaries) {
CHECK(boundary.IsInsideBranch());
absl::InlinedVector<HloInstruction*, 4> new_operands;
const HloInstruction* branch0_inst = boundary.operands()[0];
for (int i = 0; i < branch0_inst->operands().size(); ++i) {
Boundary operand_boundary(boundary.GetPosition());
for (HloInstruction* operand : boundary.operands()) {
operand_boundary.mutable_operands().push_back(operand->operands()[i]);
}
VLOG(2) << "Looking for: " << operand_boundary.ToString();
auto hoisted_boundaries_it = hoisted_boundaries.find(operand_boundary);
CHECK(hoisted_boundaries_it != hoisted_boundaries.end());
Boundary hoisted_boundary = hoisted_boundaries_it->second;
CHECK(hoisted_boundary.IsOutsideBranchUser());
CHECK_EQ(hoisted_boundary.operands().size(), 1);
new_operands.push_back(hoisted_boundary.operands()[0]);
}
HloInstruction* new_instruction = conditional->parent()->AddInstruction(
branch0_inst->CloneWithNewOperands(branch0_inst->shape(), new_operands));
VLOG(2) << "new instruction:" << new_instruction->ToString();
Boundary hoisted_boundary(Boundary::Position::kOutsideBranchUser);
hoisted_boundary.mutable_operands().push_back(new_instruction);
hoisted_boundaries[boundary] = hoisted_boundary;
}
void CopyIntoConditional(
Boundary& boundary, HloInstruction* conditional,
absl::flat_hash_map<Boundary, Boundary>& hoisted_boundaries) {
CHECK(boundary.IsOutsideBranchUser() || boundary.IsOutsideBranchOperand());
CHECK_EQ(boundary.operands().size(), 1);
int num_branches = conditional->branch_count();
std::vector<absl::InlinedVector<HloInstruction*, 4>> new_operands(
num_branches);
HloInstruction* op = boundary.operands()[0];
for (HloInstruction* operand : op->operands()) {
Boundary operand_boundary(boundary.GetPosition());
operand_boundary.mutable_operands().push_back(operand);
VLOG(2) << "Looking for: " << operand_boundary.ToString();
auto hoisted_boundaries_it = hoisted_boundaries.find(operand_boundary);
if (hoisted_boundaries_it != hoisted_boundaries.end()) {
Boundary hoisted_boundary = hoisted_boundaries_it->second;
CHECK(hoisted_boundary.IsInsideBranch());
CHECK_EQ(hoisted_boundary.operands().size(), num_branches);
for (int j = 0; j < num_branches; ++j) {
new_operands[j].push_back(hoisted_boundary.operands()[j]);
}
} else {
for (int j = 0; j < num_branches; ++j) {
switch (operand->opcode()) {
case HloOpcode::kConstant: {
auto new_operand =
conditional->branch_computation(j)->AddInstruction(
operand->Clone());
VLOG(2) << "new instruction:" << new_operand->ToString();
new_operands[j].push_back(new_operand);
break;
}
case HloOpcode::kGetTupleElement: {
auto gte = Cast<HloGetTupleElementInstruction>(operand);
int64_t index = gte->tuple_index();
HloInstruction* root =
conditional->branch_computation(j)->root_instruction();
CHECK(root->opcode() == HloOpcode::kTuple &&
index < root->operand_count())
<< root->ToString() << " " << gte->ToString();
auto new_operand = root->mutable_operand(index);
VLOG(2) << "new instruction:" << new_operand->ToString();
new_operands[j].push_back(new_operand);
break;
}
default:
LOG(FATAL) << "Unexpected out-of-boundary instruction:"
<< operand->ToString() << "\n";
}
}
}
}
Boundary hoisted_boundary(Boundary::Position::kInsideBranch);
for (int j = 0; j < num_branches; ++j) {
HloInstruction* new_instruction =
conditional->branch_computation(j)->AddInstruction(
op->CloneWithNewOperands(op->shape(), new_operands[j]));
VLOG(2) << "new instruction:" << new_instruction->ToString();
hoisted_boundary.mutable_operands().push_back(new_instruction);
}
hoisted_boundaries[boundary] = hoisted_boundary;
}
absl::flat_hash_set<int64_t> FindSpecialConverts(HloInstruction* old_root,
int branch_count,
HloInstruction* conditional,
bool is_layout_sensitive) {
absl::flat_hash_set<int64_t> special_convert;
auto convert_invalid =
[](const HloInstruction* convert_set_candidate) -> bool {
bool invalid_user = absl::c_any_of(
convert_set_candidate->users(), [](const HloInstruction* user) -> bool {
return (user->opcode() == HloOpcode::kConvert);
});
bool invalid_producer =
absl::c_any_of(convert_set_candidate->operands(),
[](const HloInstruction* operand) -> bool {
return (operand->opcode() == HloOpcode::kConvert);
});
return (invalid_user || invalid_producer);
};
for (int64_t operand_num = 0; operand_num < old_root->operand_count();
++operand_num) {
if (old_root->operand(operand_num)->opcode() != HloOpcode::kConvert) {
continue;
}
bool replica = true;
HloInstruction* special_convert_candidate =
old_root->mutable_operand(operand_num);
auto repeated =
absl::c_count_if(old_root->operands(),
[&](const HloInstruction* operand) -> bool {
return (special_convert_candidate == operand);
}) > 1;
if (convert_invalid(special_convert_candidate) || repeated) {
continue;
}
for (int others = 1; others < branch_count; ++others) {
HloInstruction* others_root =
conditional->branch_computation(others)->root_instruction();
const HloInstruction* other_convert = others_root->operand(operand_num);
if (other_convert->opcode() != HloOpcode::kConvert ||
convert_invalid(other_convert)) {
replica = false;
break;
}
bool eq_shape =
is_layout_sensitive
? ShapeUtil::Equal(other_convert->shape(),
special_convert_candidate->shape()) &&
ShapeUtil::Equal(
other_convert->operand(0)->shape(),
special_convert_candidate->operand(0)->shape())
: ShapeUtil::Compatible(other_convert->shape(),
special_convert_candidate->shape()) &&
ShapeUtil::Compatible(
other_convert->operand(0)->shape(),
special_convert_candidate->operand(0)->shape());
if (!eq_shape) {
replica = false;
break;
}
auto repeated =
absl::c_count_if(others_root->operands(),
[&](const HloInstruction* operand) -> bool {
return (special_convert_candidate == operand);
}) > 1;
if (repeated) {
replica = false;
break;
}
}
if (replica) {
special_convert.insert(operand_num);
}
}
return special_convert;
}
absl::Status RestructureConditionalInstruction(HloComputation* computation,
HloInstruction* conditional) {
HloInstruction* old_root = computation->root_instruction();
std::vector<HloInstruction*> new_operands;
int cur_index = 0;
for (; cur_index < ShapeUtil::TupleElementCount(conditional->shape());
++cur_index) {
new_operands.push_back(
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetTupleElementShape(conditional->shape(), cur_index),
conditional, cur_index)));
}
HloInstruction* new_tuple =
computation->AddInstruction(HloInstruction::CreateTuple(new_operands));
if (old_root == conditional) {
computation->set_root_instruction(new_tuple);
} else {
std::vector<HloInstruction*> new_tuple_users;
for (auto conditional_user : conditional->users()) {
auto is_new_gte = absl::c_find_if(
new_operands,
[&](HloInstruction* instr) { return instr == conditional_user; });
if (is_new_gte == new_operands.end()) {
new_tuple_users.push_back(conditional_user);
}
}
for (auto new_tuple_user : new_tuple_users) {
TF_RETURN_IF_ERROR(
conditional->ReplaceUseWith(new_tuple_user, new_tuple));
}
}
VLOG(2) << "computation after root restructure:\n" << computation->ToString();
return absl::OkStatus();
}
absl::StatusOr<bool> ConvertSpecialMove(HloInstruction* conditional,
bool is_layout_sensitive) {
int branch_count = conditional->branch_count();
if (branch_count <= 0) {
return false;
}
for (int branch_num = 0; branch_num < branch_count; ++branch_num) {
HloInstruction* branch_root =
conditional->branch_computation(branch_num)->root_instruction();
if (branch_root->opcode() != HloOpcode::kTuple) {
return false;
}
}
HloInstruction* old_root =
conditional->branch_computation(0)->root_instruction();
VLOG(2) << "BEFORE :" << conditional->GetModule()->ToString();
auto find_gte = [](const HloInstruction* conditional_result,
int64_t index) -> HloInstruction* {
for (HloInstruction* instr : conditional_result->users()) {
if (instr->opcode() != HloOpcode::kGetTupleElement) {
return nullptr;
}
if (instr->tuple_index() == index) {
return instr;
}
}
return nullptr;
};
absl::flat_hash_set<int64_t> special_convert = FindSpecialConverts(
old_root, branch_count, conditional, is_layout_sensitive);
if (special_convert.empty()) {
return false;
}
TF_RETURN_IF_ERROR(
RestructureConditionalInstruction(conditional->parent(), conditional));
for (int branch = 0; branch < branch_count; branch++) {
old_root = conditional->branch_computation(branch)->root_instruction();
absl::flat_hash_map<HloInstruction*, int64_t> map_inst_to_tuple_index;
std::vector<HloInstruction*> new_operands(old_root->operand_count());
absl::flat_hash_set<HloInstruction*> to_hoist_set;
for (int64_t operand_num = 0; operand_num < old_root->operand_count();
++operand_num) {
map_inst_to_tuple_index[old_root->mutable_operand(operand_num)] =
operand_num;
}
for (int64_t operand_num = 0; operand_num < old_root->operand_count();
++operand_num) {
HloInstruction* hoist = old_root->mutable_operand(operand_num);
if (!special_convert.contains(operand_num)) {
new_operands[operand_num] = old_root->mutable_operand(operand_num);
continue;
}
to_hoist_set.insert(hoist);
int64_t new_tuple_count = old_root->operand_count();
bool inplace = true;
CHECK(!hoist->operands().empty());
for (HloInstruction* prod : hoist->operands()) {
if (inplace) {
map_inst_to_tuple_index[prod] = map_inst_to_tuple_index[hoist];
new_operands[map_inst_to_tuple_index[hoist]] = prod;
inplace = false;
} else {
map_inst_to_tuple_index[prod] = new_tuple_count++;
new_operands.push_back(prod);
}
}
}
HloComputation* cur_branch = conditional->branch_computation(branch);
HloInstruction* new_branch_root =
cur_branch->AddInstruction(HloInstruction::CreateTuple(new_operands));
cur_branch->set_root_instruction(new_branch_root, true );
TF_CHECK_OK(cur_branch->RemoveInstruction(old_root));
if (branch != 0) {
continue;
}
HloComputation* conditional_parent = conditional->parent();
HloInstruction* newconditional =
conditional_parent->AddInstruction(HloInstruction::CreateConditional(
cur_branch->root_instruction()->shape(),
conditional->mutable_operand(0),
absl::MakeSpan(conditional->branch_computations()),
absl::MakeSpan(conditional->operands()).subspan(1)));
TF_RETURN_IF_ERROR(
conditional->ReplaceAllUsesWithDifferentShape(newconditional));
TF_CHECK_OK(conditional_parent->RemoveInstruction(conditional));
conditional = newconditional;
for (HloInstruction* hoist : to_hoist_set) {
VLOG(2) << "Hoisting instruction:" << hoist->ToString();
int64_t hoist_index = map_inst_to_tuple_index[hoist];
HloInstruction* gte_hoist = find_gte(conditional, hoist_index);
CHECK(gte_hoist != nullptr);
std::vector<HloInstruction*> new_operands;
for (HloInstruction* op : hoist->operands()) {
HloInstruction* gte = conditional_parent->AddInstruction(
HloInstruction::CreateGetTupleElement(op->shape(), conditional,
map_inst_to_tuple_index[op]));
new_operands.push_back(gte);
}
HloInstruction* hoisted = conditional_parent->AddInstruction(
hoist->CloneWithNewOperands(hoist->shape(), new_operands));
VLOG(2) << "Hoisted instruction in parent:" << hoisted->ToString();
TF_RETURN_IF_ERROR(gte_hoist->ReplaceAllUsesWith(hoisted));
TF_CHECK_OK(conditional_parent->RemoveInstruction(gte_hoist));
}
}
VLOG(2) << "AFTER :" << conditional->GetModule()->ToString();
return true;
}
absl::StatusOr<bool> ConditionalCodeMotion::MoveInstructionOut(
HloInstruction* conditional, std::vector<Boundary>& to_move_out,
std::vector<Boundary>& new_boundaries) {
if (to_move_out.empty()) {
return false;
}
VLOG(1) << "Modifying code--number of boundaries to move out of conditional:"
<< to_move_out.size() << "\n";
HloComputation* conditional_parent = conditional->parent();
std::vector<HloInstruction*> old_conditional_users = conditional->users();
absl::flat_hash_map<Boundary, Boundary> hoisted_boundaries;
VLOG(2) << "before opt:"
<< conditional_parent->ToString(HloPrintOptions::Fingerprint())
<< "\n";
int64_t op_index = 0;
for (const Boundary& b : new_boundaries) {
HloInstruction* op = b.operands()[0];
CHECK(op != nullptr);
VLOG(2) << "Mapping new boundary instr: " << op->ToString() << "\n";
HloInstruction* gtr = conditional_parent->AddInstruction(
HloInstruction::CreateGetTupleElement(op->shape(), conditional,
op_index++));
Boundary b2(Boundary::Position::kOutsideBranchUser);
b2.mutable_operands().push_back(gtr);
hoisted_boundaries[b] = b2;
}
for (int64_t i = to_move_out.size() - 1; i >= 0; i--) {
CopyOutOfConditional(to_move_out[i], conditional, hoisted_boundaries);
}
VLOG(2) << "Done copy branch instructions out\n"
<< conditional_parent->ToString(HloPrintOptions::Fingerprint())
<< "\n";
for (auto user_instr : old_conditional_users) {
VLOG(2) << "Checking conditional user: " << user_instr->ToString() << "\n";
CHECK(user_instr->opcode() == HloOpcode::kGetTupleElement);
auto tuple_opd = static_cast<HloGetTupleElementInstruction*>(user_instr);
int64_t index = tuple_opd->tuple_index();
Boundary old_user_boundary(Boundary::Position::kInsideBranch);
for (const HloComputation* called_computation :
conditional->called_computations()) {
HloInstruction* root = called_computation->root_instruction();
CHECK(root->operands().size() > index);
old_user_boundary.mutable_operands().push_back(root->operands()[index]);
}
CHECK(ContainsKey(hoisted_boundaries, old_user_boundary));
HloInstruction* new_opd =
hoisted_boundaries[old_user_boundary].operands()[0];
CHECK(new_opd != nullptr);
VLOG(2) << "Try replace all uses of :" << old_user_boundary.ToString()
<< "\n";
TF_RETURN_IF_ERROR(user_instr->ReplaceAllUsesWith(new_opd));
TF_RETURN_IF_ERROR(conditional_parent->RemoveInstruction(user_instr));
}
VLOG(2) << "Done changing conditional users\n"
<< conditional_parent->ToString() << "\n";
int64_t branch_count = conditional->branch_count();
for (int i = 0; i < branch_count; i++) {
auto computation = conditional->branch_computation(i);
std::vector<HloInstruction*> elements;
for (const auto& b1 : new_boundaries) {
HloInstruction* op = b1.operands()[i];
CHECK(op != nullptr);
VLOG(2) << "Adding to root " << i << " with " << op->ToString() << "\n";
elements.push_back(op);
}
HloInstruction* tuple =
computation->AddInstruction(HloInstruction::CreateTuple(elements));
computation->set_root_instruction(tuple, true);
VLOG(2) << "computation is :" << computation->ToString() << "\n";
for (const auto& b2 : to_move_out) {
auto instr_to_remove = b2.operands()[i];
if (!computation->IsMarkedAsDead(instr_to_remove) &&
instr_to_remove->IsDead()) {
VLOG(2) << "Removing boundary:" << b2.ToString() << "\n";
VLOG(2) << "computation: " << computation->ToString() << "\n";
TF_RETURN_IF_ERROR(computation->RemoveInstruction(instr_to_remove));
}
}
}
HloInstruction* new_root =
conditional->branch_computation(0)->root_instruction();
*conditional->mutable_shape() = new_root->shape();
conditional->copy_sharding(new_root);
VLOG(2) << "done moving instructions out of branches\n"
<< conditional_parent->ToString(HloPrintOptions::Fingerprint());
return true;
}
absl::StatusOr<bool> ConditionalCodeMotion::MoveUserInstructionsIn(
HloInstruction* conditional, std::vector<Boundary>& to_move_in) {
if (to_move_in.empty()) {
return false;
}
absl::flat_hash_map<Boundary, Boundary> hoisted_boundaries;
int64_t to_move_in_size = to_move_in.size();
int64_t branch_count = conditional->branch_count();
HloGetTupleElementInstruction* tuple_use =
DynCast<HloGetTupleElementInstruction>(to_move_in[0].operands()[0]);
int64_t use_index = (tuple_use != nullptr && tuple_use->user_count() == 1)
? tuple_use->tuple_index()
: -1;
VLOG(2) << "Tuple use index = " << use_index << "\n";
int64_t op_index =
conditional->shape().IsTuple()
? ((use_index >= 0) ? conditional->shape().tuple_shapes_size() - 1
: conditional->shape().tuple_shapes_size())
: 0;
Boundary b_opd_use(Boundary::Position::kInsideBranch);
Boundary b_old_root(Boundary::Position::kInsideBranch);
for (int i = 0; i < branch_count; i++) {
auto computation = conditional->branch_computation(i);
auto old_root = computation->root_instruction();
b_old_root.mutable_operands().push_back(old_root);
std::vector<HloInstruction*> operands;
if (old_root->opcode() == HloOpcode::kTuple) {
for (int i = 0; i < old_root->operand_count(); ++i) {
if (i != use_index) {
operands.push_back(old_root->operands()[i]);
} else {
b_opd_use.mutable_operands().push_back(old_root->operands()[i]);
}
}
} else if (old_root->shape().IsTuple()) {
const Shape& old_shape = old_root->shape();
for (int i = 0; i < old_shape.tuple_shapes_size(); ++i) {
auto element =
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
old_shape.tuple_shapes(i), old_root, i));
if (i != use_index) {
operands.push_back(element);
} else {
b_opd_use.mutable_operands().push_back(element);
}
}
} else {
b_opd_use.mutable_operands().push_back(conditional);
}
HloInstruction* new_root =
computation->AddInstruction(HloInstruction::CreateTuple(operands));
VLOG(2) << "setting new root: " << new_root->ToString() << "\n";
computation->set_root_instruction(new_root,
true);
if (old_root->opcode() == HloOpcode::kTuple) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(old_root));
}
VLOG(2) << "new branch computation: " << computation->ToString() << "\n";
}
if (use_index != -1) {
for (auto* user : conditional->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement &&
user->tuple_index() > use_index) {
user->set_tuple_index(user->tuple_index() - 1);
}
}
}
Boundary conditional_boundary(Boundary::Position::kOutsideBranchUser);
conditional_boundary.mutable_operands().push_back(conditional);
hoisted_boundaries[conditional_boundary] = b_old_root;
if (use_index >= 0) {
VLOG(2) << "Mapping GTE: " << tuple_use->ToString() << "\n";
Boundary tuple_use_boundary(Boundary::Position::kOutsideBranchUser);
tuple_use_boundary.mutable_operands().push_back(tuple_use);
hoisted_boundaries[tuple_use_boundary] = b_opd_use;
}
int64_t cp_start = (tuple_use != nullptr) ? 1 : 0;
for (int64_t to_move_index = cp_start; to_move_index < to_move_in_size;
to_move_index++) {
Boundary b_to_move = to_move_in[to_move_index];
HloInstruction* op = b_to_move.operands()[0];
CHECK(op != nullptr);
bool to_be_used_outside = true;
VLOG(2) << "Mapping new boundary instr: " << op->ToString() << "\n";
if (to_move_index < to_move_in_size - 1 && op->user_count() == 1 &&
op->users()[0] == to_move_in[to_move_index + 1].operands()[0]) {
to_be_used_outside = false;
VLOG(2) << "Instruction is not to be used outside the branch\n";
}
Boundary b(Boundary::Position::kInsideBranch);
CopyIntoConditional(b_to_move, conditional, hoisted_boundaries);
if (to_be_used_outside) {
for (int i = 0; i < branch_count; ++i) {
auto computation = conditional->branch_computation(i);
auto new_op = hoisted_boundaries[b_to_move].operands()[i];
auto new_root = computation->root_instruction();
new_root->AppendOperand(new_op);
*new_root->mutable_shape()->add_tuple_shapes() = new_op->shape();
VLOG(2) << "Extending conditional root " << i << " : "
<< new_root->ToString() << "\n";
}
HloInstruction* gtr = conditional->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(op->shape(), conditional,
op_index++));
TF_RETURN_IF_ERROR(op->ReplaceAllUsesWith(gtr));
if (conditional->parent()->root_instruction() == op) {
conditional->parent()->set_root_instruction(gtr);
}
}
}
VLOG(2) << "Done copying instructions inside branch: "
<< conditional->ToString(HloPrintOptions::Fingerprint()) << "\n";
HloInstruction* new_root =
conditional->branch_computation(0)->root_instruction();
*conditional->mutable_shape() = new_root->shape();
conditional->copy_sharding(new_root);
if (use_index != -1) {
for (auto* user : conditional->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement) {
VLOG(2) << "Resetting shape of user: " << user->ToString() << "\n";
*user->mutable_shape() =
conditional->shape().tuple_shapes(user->tuple_index());
}
}
}
VLOG(2) << "Done moving user instructions inside branches\n"
<< conditional->parent()->ToString(HloPrintOptions::Fingerprint());
return true;
}
class MoveOperandIntoBranch {
public:
MoveOperandIntoBranch() = default;
absl::Status operator()(HloInstruction* inst, HloInstruction*& user) {
VLOG(1) << "operand to move into branch: " << inst->ToString();
VLOG(2) << "MoveIntoBranches user =" << user->ToString() << "\n";
CHECK(inst->user_count() == 1 || inst->opcode() == HloOpcode::kBroadcast);
absl::InlinedVector<HloInstruction*, 4> new_operands;
std::vector<std::vector<int64_t>> matching_tuple_indices;
TF_RETURN_IF_ERROR(
ReplaceInputInUser(inst, user, new_operands, matching_tuple_indices));
TF_RETURN_IF_ERROR(
MoveInputIntoBranch(inst, user, new_operands, matching_tuple_indices));
if (inst->user_count() == 0) {
TF_RETURN_IF_ERROR(inst->parent()->RemoveInstruction(inst));
}
return absl::OkStatus();
}
private:
HloInstruction* InsertIntoBranch(HloInstruction* inst,
HloInstruction* branch_input) {
VLOG(2) << "Branch input=" << branch_input->ToString() << "\n";
auto branch_comp = branch_input->parent();
std::vector<HloInstruction*> operands(inst->operand_count());
for (int64_t i = 0; i < inst->operand_count(); ++i) {
VLOG(2) << "processing operand =" << i << "\n";
if (branch_input->shape().IsTuple()) {
int64_t j = std::find(inst->operands().begin(), inst->operands().end(),
inst->operands()[i]) -
inst->operands().begin();
VLOG(2) << "operand index = " << j << "\n";
CHECK(j < branch_input->shape().tuple_shapes_size());
if (j < i) {
operands[i] = operands[j];
} else {
CHECK(op_map_.contains(inst->operands()[i]));
int64_t index = op_map_[inst->operands()[i]];
operands[i] =
branch_comp->AddInstruction(HloInstruction::CreateGetTupleElement(
branch_input->shape().tuple_shapes(index), branch_input,
index));
}
} else {
CHECK(inst->operands()[i] == inst->operands()[0]);
operands[i] = branch_input;
}
}
return branch_comp->AddInstruction(
inst->CloneWithNewOperands(inst->shape(), operands));
}
bool UpdateParamShape(
std::vector<std::vector<int64_t>>& matching_tuple_indices,
const Shape* param_shape, HloInstruction*& branch_param,
HloInstruction*& param_tuple) {
bool used = false;
for (int64_t matching_index = matching_tuple_indices.size() - 1;
matching_index >= 0; --matching_index) {
auto* new_tuple = CloneNestedTuples(branch_param);
CHECK_NE(new_tuple, nullptr);
VLOG(5) << "Cloned new tuple:" << new_tuple->parent()->ToString() << "\n";
std::vector<std::vector<HloInstruction*>> gte_users;
gte_users.reserve(branch_param->shape().tuple_shapes_size());
for (int64_t j = 0; j < branch_param->shape().tuple_shapes_size(); ++j) {
gte_users.push_back(std::vector<HloInstruction*>());
}
for (auto* param_user : branch_param->users()) {
if (param_user->opcode() == HloOpcode::kGetTupleElement) {
CHECK_LT(param_user->tuple_index(), gte_users.size());
gte_users[param_user->tuple_index()].push_back(param_user);
}
}
used = false;
*branch_param->mutable_shape() = *param_shape;
const Shape* new_param_shape = nullptr;
for (auto param_users : gte_users) {
if (param_users.empty()) continue;
CHECK_EQ(param_users[0]->opcode(), HloOpcode::kGetTupleElement);
auto tuple_index = param_users[0]->tuple_index();
VLOG(1) << "Processing gte users: " << param_users.size() << "\n";
VLOG(1) << "tuple_index: " << tuple_index << "\n";
VLOG(1) << "matching_tuple_indices: "
<< matching_tuple_indices[matching_index][0] << "\n";
if (matching_tuple_indices[matching_index].end() ==
std::find(matching_tuple_indices[matching_index].begin(),
matching_tuple_indices[matching_index].end(),
tuple_index)) {
continue;
}
for (HloInstruction* param_user : param_users) {
VLOG(1) << "param_user: " << param_user->ToString() << "\n";
if (new_param_shape == nullptr) {
branch_param = param_user;
if (matching_index > 0) {
param_tuple = branch_param;
}
CHECK_GT(param_shape->tuple_shapes_size(), tuple_index);
new_param_shape = ¶m_shape->tuple_shapes(tuple_index);
param_shape = new_param_shape;
VLOG(1) << "new_param_shape: " << param_shape->ToString();
*param_user->mutable_shape() = *new_param_shape;
VLOG(1) << "branch parameter: " << param_user->ToString();
used = true;
} else {
VLOG(1) << "new_param_shape=" << new_param_shape->ToString();
*param_user->mutable_shape() = *new_param_shape;
TF_CHECK_OK(param_user->ReplaceAllUsesWith(branch_param));
}
}
}
if (!used) {
break;
}
}
return used;
}
absl::Status ReplaceInputInUser(
HloInstruction* input, HloInstruction*& user,
absl::InlinedVector<HloInstruction*, 4>& new_operands,
std::vector<std::vector<int64_t>>& matching_tuple_indices) {
for (int64_t j = 0; j < input->operand_count(); ++j) {
VLOG(2) << "Push back input operand index: " << j;
auto operand = input->mutable_operand(j);
if (std::find(new_operands.begin(), new_operands.end(), operand) ==
new_operands.end()) {
new_operands.push_back(operand);
}
}
if (user->opcode() == HloOpcode::kTuple) {
for (HloInstruction *input_now = input, *user_now = user;
user_now->opcode() != HloOpcode::kConditional;
input_now = user_now, user_now = user_now->users()[0]) {
std::vector<int64_t> matching_tuple_index;
for (int64_t i = 0; i < user_now->operand_count(); ++i) {
if (user_now->operand(i) != input_now) {
continue;
}
matching_tuple_index.push_back(i);
}
CHECK(!matching_tuple_index.empty());
matching_tuple_indices.push_back(matching_tuple_index);
CHECK_EQ(user_now->user_count(), 1);
}
CHECK(!matching_tuple_indices.empty());
int64_t repl_count = 0;
for (auto opd_index : matching_tuple_indices[0]) {
HloInstruction* new_input =
(repl_count < new_operands.size())
? new_operands[repl_count++]
: input->AddInstruction(HloInstruction::CreateTuple({}));
op_map_[new_input] = opd_index;
VLOG(2) << "Mapping operand " << repl_count << " = "
<< new_input->ToString() << " to " << opd_index;
TF_RETURN_IF_ERROR(
user->ReplaceOperandWithDifferentShape(opd_index, new_input));
*user->mutable_shape()->mutable_tuple_shapes(opd_index) =
new_input->shape();
}
while (repl_count < new_operands.size()) {
HloInstruction* new_input = new_operands[repl_count++];
auto new_input_in_user = std::find(user->operands().begin(),
user->operands().end(), new_input);
int64_t opd_index = (new_input_in_user == user->operands().end())
? user->operand_count()
: new_input_in_user - user->operands().begin();
op_map_[new_input] = opd_index;
CHECK(op_map_.contains(new_input));
VLOG(2) << "Mapping operand " << new_input->ToString() << " to "
<< opd_index;
user->AppendOperand(new_input);
user->mutable_shape()->mutable_tuple_shapes()->push_back(
new_input->shape());
}
int64_t nesting_index = 1;
for (auto user_now = user->users()[0];
nesting_index < matching_tuple_indices.size() &&
user_now->opcode() != HloOpcode::kConditional;
user = user_now, user_now = user_now->users()[0], nesting_index++) {
VLOG(2) << "Replacing tuple: " << user->ToString();
CHECK(user_now->shape().IsTuple());
for (auto opd_index : matching_tuple_indices[nesting_index]) {
*user_now->mutable_shape()->mutable_tuple_shapes(opd_index) =
user->shape();
}
VLOG(2) << "Done replacing tuple:" << user->ToString();
CHECK_EQ(user_now->user_count(), 1);
}
VLOG(2) << "User: " << user->ToString() << "\n";
}
return absl::OkStatus();
}
absl::Status MoveInputIntoBranch(
HloInstruction* input, HloInstruction*& user,
absl::InlinedVector<HloInstruction*, 4>& new_operands,
std::vector<std::vector<int64_t>>& matching_tuple_indices) {
HloInstruction* cond =
(user->opcode() != HloOpcode::kConditional && user->user_count() == 1)
? user->users()[0]
: user;
if (user == cond) {
auto new_input =
input->AddInstruction(HloInstruction::CreateTuple(new_operands));
for (int64_t i = 0; i < new_operands.size(); ++i) {
op_map_[new_operands[i]] = i;
}
user = new_input;
TF_RETURN_IF_ERROR(input->ReplaceUseWithDifferentShape(cond, new_input));
}
TF_RET_CHECK(cond->opcode() == HloOpcode::kConditional)
<< "User has non-conditional users";
for (int64_t branch = 0; branch < cond->branch_count(); ++branch) {
if (cond->operand(branch + 1) != user) {
continue;
}
VLOG(2) << "Modifying conditional branch: " << branch << "\n";
auto branch_comp = cond->branch_computation(branch);
auto branch_param = branch_comp->parameter_instruction(0);
auto* param_shape = &user->shape();
VLOG(2) << "param_shape: " << param_shape->ToString() << "\n";
VLOG(2) << "branch parameter: " << branch_param->ToString() << "\n";
HloInstruction* param_tuple = branch_param;
if (matching_tuple_indices.empty()) {
VLOG(2) << "The original input is passed in as conditional parameter "
"directly.";
VLOG(5) << branch_comp->ToString() << "\n";
*branch_param->mutable_shape() = *param_shape;
if (branch_param == branch_comp->root_instruction()) {
VLOG(2) << "Cloning root user";
auto new_user =
branch_comp->AddInstruction(HloInstruction::CreateGetTupleElement(
branch_param->shape().tuple_shapes(0), branch_param, 0));
VLOG(2) << "new_user: " << new_user->ToString() << "\n";
branch_comp->set_root_instruction(new_user,
true);
}
} else {
if (!UpdateParamShape(matching_tuple_indices, param_shape, branch_param,
param_tuple)) {
VLOG(2) << "instruction is not used in this branch.";
continue;
}
}
auto inserted = InsertIntoBranch(input, param_tuple);
VLOG(2) << "Inserted operands:" << inserted->ToString() << "\n";
std::vector<HloInstruction*> tuple_users = branch_param->users();
for (auto param_user : tuple_users) {
if (param_user == inserted ||
(param_user->opcode() == HloOpcode::kGetTupleElement &&
param_user != branch_comp->root_instruction())) {
continue;
}
TF_RETURN_IF_ERROR(
branch_param->ReplaceUseWithDifferentShape(param_user, inserted));
if (branch_comp->root_instruction()->opcode() ==
HloOpcode::kGetTupleElement &&
!branch_comp->root_instruction()->operand(0)->shape().IsTuple()) {
branch_comp->set_root_instruction(
branch_comp->root_instruction()->mutable_operands()[0]);
}
UpdateTupleUsers(inserted);
}
}
return absl::OkStatus();
}
void UpdateTupleUsers(HloInstruction* param_user) {
for (auto new_user : param_user->users()) {
if (new_user->opcode() == HloOpcode::kTuple) {
for (int64_t opd_index = 0; opd_index < new_user->operand_count();
++opd_index) {
if (new_user->operands()[opd_index] != param_user) {
continue;
}
*new_user->mutable_shape()->mutable_tuple_shapes(opd_index) =
param_user->shape();
UpdateTupleUsers(new_user);
VLOG(2) << "Updated tuple user: " << new_user->ToString() << "\n";
}
}
}
}
absl::flat_hash_map<const HloInstruction*, int64_t> op_map_;
};
absl::StatusOr<bool> ConditionalCodeMotion::MoveOperandInstructionsIn(
HloInstruction* conditional, std::vector<Boundary>& to_move_in) {
int64_t to_move_in_size = to_move_in.size();
CHECK_GT(to_move_in_size, 0);
VLOG(2) << "Before moving operand instructions inside branch: "
<< conditional->ToString(HloPrintOptions::Fingerprint()) << "\n";
HloInstruction* user = conditional;
int64_t user_index = 0;
MoveOperandIntoBranch move_into_branch;
for (int64_t to_move_index = 0; to_move_index < to_move_in_size;
to_move_index++) {
Boundary b_to_move = to_move_in[to_move_index];
HloInstruction* op = b_to_move.operands()[0];
CHECK_NE(op, nullptr);
if (op->user_count() == 1) {
user = op->users()[0];
user_index = user->operand_index(op);
}
if (op->opcode() == HloOpcode::kTuple) {
continue;
}
VLOG(1) << "Mapping new boundary instr: " << op->ToString() << "\n";
VLOG(1) << "current user = " << user->ToString();
std::vector<std::pair<HloInstruction*, int64_t>> users;
for (auto* user_now = user; user_now != conditional;
user_now = user_now->users()[0]) {
CHECK_EQ(user_now->user_count(), 1);
VLOG(1) << "Saving user: " << user_now->ToString() << "\n";
users.push_back(std::make_pair(
user_now->users()[0], user_now->users()[0]->operand_index(user_now)));
}
TF_RETURN_IF_ERROR(move_into_branch(op, user));
for (int64_t i = users.size() - 1; i > 0; --i) {
CHECK_NE(users[i].first, nullptr);
CHECK_NE(users[i - 1].first, nullptr);
users[i - 1].first = users[i].first->mutable_operand(users[i].second);
}
if (!users.empty()) {
user = users.front().first->mutable_operand(users.front().second);
VLOG(1) << "Updated user: " << user->ToString() << "\n";
}
}
VLOG(2) << "Done moving operand instructions inside branch: "
<< conditional->ToString(HloPrintOptions::Fingerprint()) << "\n";
return true;
}
class GroupConnectedBoundaries {
private:
std::vector<Boundary> connected_boundaries_, new_boundaries_;
int64_t connected_boundaries_memory_increase_ = 0;
HloInstruction* conditional_;
HloComputation* conditional_parent_;
bool is_layout_sensitive_;
absl::flat_hash_map<HloInstruction*, int>& visited_count_;
std::vector<std::vector<int64_t>>& move_config_;
std::vector<std::vector<int64_t>>& reuse_config_;
absl::Span<int64_t> search_config_vec_;
int64_t& search_config_;
int64_t search_subscript_;
absl::flat_hash_map<const int64_t*, int64_t> flipped_;
int64_t FlipMutation(int64_t* loc, const int64_t non_zero,
const std::string& msg) {
if (search_config_ == 0 || ContainsKey(flipped_, loc)) {
VLOG(2) << "Configured not to search or loc is already flipped.";
return *loc;
}
int c = ConditionalCodeMotion::flip_start(search_config_);
VLOG(2) << "flip start index = " << c << "\n";
if (c > 0) {
search_config_--;
return *loc;
}
auto flip_count = ConditionalCodeMotion::DecrementMaxFlip(&search_config_);
VLOG(2) << "max flip count = " << flip_count << "\n";
VLOG(2) << "Updating max Flipping configuration = " << search_config_
<< "\n";
if (flip_count == 0) {
VLOG(2) << "Maximum flip count has reached. ";
if (search_subscript_ + 1 < search_config_vec_.size()) {
VLOG(2) << "search_subscript_ = " << search_subscript_;
VLOG(2) << "search config vec size = " << search_config_vec_.size();
search_config_ = search_config_vec_[++search_subscript_];
} else {
return *loc;
}
}
auto flip_stride = ConditionalCodeMotion::flip_stride(search_config_);
search_config_ += flip_stride;
VLOG(2) << "flip stride = " << flip_stride << "\n";
VLOG(2) << "Updating Flipping Stride = " << search_config_ << "\n";
flipped_[loc] = *loc;
switch (*loc) {
case 0:
*loc = non_zero;
break;
default:
*loc = 0;
break;
}
VLOG(2) << "Flipping decision for: " << msg << ": from " << flipped_[loc]
<< " to " << *loc << "\n";
return *loc;
}
static std::vector<int64_t>& EnsureSearchConfig(
std::vector<int64_t>& search_config) {
if (search_config.empty()) {
search_config.push_back(0);
}
return search_config;
}
public:
explicit GroupConnectedBoundaries(
HloInstruction* conditional, bool is_layout_sensitive,
absl::flat_hash_map<HloInstruction*, int>& visited_count,
std::vector<std::vector<int64_t>>* move_config,
std::vector<std::vector<int64_t>>* reuse_config,
std::vector<int64_t>& search_config)
: conditional_(conditional),
conditional_parent_(conditional->parent()),
is_layout_sensitive_(is_layout_sensitive),
visited_count_(visited_count),
move_config_(*move_config),
reuse_config_(*reuse_config),
search_config_vec_(EnsureSearchConfig(search_config)),
search_config_(search_config_vec_.front()),
search_subscript_(0) {
VLOG(2) << "Initializing Group Connected Boundaries\n";
}
int64_t ReusesCarriedBy(HloInstruction* op, HloInstruction* user) {
std::vector<int64_t>& curconfig =
reuse_config_[static_cast<uint32_t>(op->opcode())];
int64_t config =
(search_config_ < 0)
? FlipMutation(&curconfig[static_cast<uint32_t>(user->opcode())],
-10,
absl::StrCat(HloOpcodeString(op->opcode()), "->",
HloOpcodeString(user->opcode())))
: curconfig[static_cast<uint32_t>(user->opcode())];
VLOG(2) << "ConditionalCodeMotion: Add reuses carried by instr: "
<< op->ToString() << "=>" << user->ToString() << " : " << config
<< "\n";
if (config < 0) {
int count1 = CountNonLeafOps(op->users());
int count2 = CountNonLeafOps(user->operands());
return (-config) / count1 / count2;
}
return config;
}
void clear_recently_visited() {
for (const auto& boundary : new_boundaries_) {
visited_count_.erase(boundary.operands()[0]);
}
}
bool WorthHoisting(HloInstruction* instruction, Boundary::Position pos,
int64_t index) {
VLOG(1) << "Check Worth hoisting\n";
HloOpcode opcode = instruction->opcode();
if (opcode == HloOpcode::kTuple &&
instruction == conditional_parent_->root_instruction()) {
VLOG(1) << "Do not move conditional parent.";
return false;
}
if (pos == Boundary::Position::kOutsideBranchOperand) {
if (opcode == HloOpcode::kTuple && instruction->has_sharding()) {
VLOG(1) << "Not moving operand because of sharding annotations.";
return false;
}
if (instruction->user_count() > 1) {
VLOG(1) << "Not moving operand b/c it has >1 users.";
return false;
}
if (instruction->HasSideEffect()) {
VLOG(1) << "Not moving operand b/c it has side effects.";
return false;
}
if (opcode == HloOpcode::kGetTupleElement) {
VLOG(1) << "Do not move GetTupleElement.";
return false;
}
}
if (DynCast<HloChannelInstruction>(instruction) &&
pos != Boundary::Position::kInsideBranch) {
VLOG(1) << "It is not safe to move collectives inside branches.";
return false;
}
if (opcode == HloOpcode::kParameter) {
return false;
}
if (opcode == HloOpcode::kGetTupleElement &&
pos == Boundary::Position::kOutsideBranchOperand) {
return false;
}
std::vector<int64_t>& curconfig =
move_config_[static_cast<uint32_t>(opcode)];
auto col = (curconfig.size() == 1) ? 0
: (instruction->operand_count() > 0)
? static_cast<uint32_t>(instruction->operand(0)->opcode())
: 0;
VLOG(2) << "column = " << col << "\n";
VLOG(2) << "config size = " << curconfig.size() << "\n";
VLOG(2) << "search_config = " << search_config_ << "\n";
CHECK(col < curconfig.size());
uint32_t config =
(search_config_ > 0)
? FlipMutation(&curconfig[col], 1,
absl::StrCat("Move-", HloOpcodeString(opcode)))
: curconfig[col];
VLOG(2) << "Checking instruction is worth moving: " << config << "\n";
VLOG(2) << "after checking search_config = " << search_config_ << "\n";
return (config != 0);
}
int64_t ReusesBeforeBoundary(HloInstruction* user) {
int64_t reuses = 0;
for (auto op : user->operands()) {
if (!ContainsKey(visited_count_, op) && op != conditional_) {
continue;
}
if (auto tuple_gte = DynCast<HloGetTupleElementInstruction>(user)) {
if (op->opcode() == HloOpcode::kConditional) {
auto tuple = op->branch_computation(0)->root_instruction();
if (tuple->opcode() == HloOpcode::kTuple) {
auto index = tuple_gte->tuple_index();
CHECK(index < tuple->operand_count());
op = tuple->mutable_operand(index);
}
}
reuses += ReusesCarriedBy(op, user->users()[0]);
} else {
reuses += ReusesCarriedBy(op, user);
}
}
VLOG(2) << "Reuses before instruction " << user->ToString() << ":" << reuses
<< "\n";
return reuses;
}
int64_t ReusesAfterBoundary(HloInstruction* user, int64_t tuple_idx = -1) {
CHECK(user != nullptr);
if (user->opcode() == HloOpcode::kConstant) {
return 0;
}
auto all_users = user->users();
if (tuple_idx < 0 && all_users.size() > 1) {
VLOG(2) << "Having multiple users from: " << user->ToString() << "\n";
return 0;
}
if (!all_users.empty()) {
auto op = all_users[0];
int64_t reuses = 0;
if (tuple_idx >= 0) {
VLOG(2) << "Reuse for conditional operands with tuple index = "
<< tuple_idx << "\n";
VLOG(2) << "user op = " << op->ToString();
if (op->opcode() == HloOpcode::kConditional) {
int64_t reuse_count = 0;
for (int64_t i = 0; i < conditional_->branch_count(); ++i) {
VLOG(5) << "Counting use in branch " << i << "\n";
if (conditional_->operand(i + 1) != user) {
continue;
}
CHECK_EQ(conditional_->branch_computation(i)
->parameter_instructions()
.size(),
1);
auto param_i =
conditional_->branch_computation(i)->parameter_instruction(0);
if (param_i ==
conditional_->branch_computation(i)->root_instruction()) {
VLOG(5) << "parameter is root.\n";
reuse_count++;
continue;
}
if (!param_i->shape().IsTuple() && param_i->user_count() > 0) {
VLOG(5) << "parameter is not tuple and is used. \n";
reuse_count++;
continue;
}
for (auto* param_i_user : param_i->users()) {
if (param_i_user->opcode() == HloOpcode::kGetTupleElement &&
param_i_user->tuple_index() == tuple_idx) {
reuse_count++;
VLOG(5) << "Found user" << param_i_user->ToString() << "\n";
break;
}
}
}
VLOG(2) << "Reuse count for conditional:" << reuse_count << "\n";
if (reuse_count < conditional_->branch_count()) {
reuses += 10;
}
} else if (op->opcode() == HloOpcode::kTuple) {
VLOG(2) << "new tuple index = " << op->operand_index(user);
return ReusesAfterBoundary(op, op->operand_index(user));
} else {
return ReusesAfterBoundary(op, tuple_idx);
}
} else if (op ==
conditional_->branch_computation(0)->root_instruction()) {
int64_t index = op->operand_index(user);
for (auto op2 : conditional_->users()) {
if (op2->opcode() == HloOpcode::kGetTupleElement) {
auto tuple_opd = static_cast<HloGetTupleElementInstruction*>(op2);
if (index == tuple_opd->tuple_index()) {
all_users = op2->users();
if (!all_users.empty()) {
reuses += ReusesCarriedBy(user, all_users[0]);
break;
}
}
}
}
} else if (ContainsKey(visited_count_, op)) {
reuses += ReusesCarriedBy(user, op);
}
VLOG(2) << "reuses after instruction " << user->ToString() << ":"
<< reuses << "\n";
return reuses;
}
return 0;
}
int64_t BenefitForMovingBoundaries(const std::vector<Boundary>& boundaries,
bool perform_reuse_analysis = true) {
int64_t reuses_before = 0, reuses_after = 0;
if ((boundaries[0].IsInsideBranch() ||
boundaries[0].IsOutsideBranchOperand()) &&
absl::c_count_if(boundaries, [](const Boundary& b) {
return b.operands()[0]->opcode() != HloOpcode::kTuple;
}) == 0) {
return -1;
}
if (boundaries.size() == 1) {
if (boundaries[0].IsOutsideBranchUser() &&
boundaries[0].operands()[0]->opcode() ==
HloOpcode::kGetTupleElement) {
return -1;
}
}
if (!perform_reuse_analysis) {
return 1;
}
auto get_copy_folding_benefit = [&](HloInstruction* hlo) -> int64_t {
if (hlo->opcode() != HloOpcode::kCopy) {
return 0;
}
const HloGetTupleElementInstruction* gte =
DynCast<HloGetTupleElementInstruction>(hlo->operand(0));
if (gte == nullptr) {
return 0;
}
const HloInstruction* conditional = gte->operand(0);
if (conditional != conditional_) {
return 0;
}
int64_t benefit = 0;
for (auto* branch : conditional->called_computations()) {
HloInstruction* root = branch->root_instruction();
if (root->opcode() == HloOpcode::kTuple) {
const auto* tuple_operand = root->operand(gte->tuple_index());
if (tuple_operand->opcode() == HloOpcode::kCopy) {
if (Shape::Equal()(tuple_operand->operand(0)->shape(),
hlo->shape())) {
benefit += 10;
}
}
}
}
return benefit;
};
for (const Boundary& b : boundaries) {
auto op = b.operands()[0];
if (op == conditional_->branch_computation(0)->root_instruction()) {
continue;
}
VLOG(2) << "Benefit for " << op->ToString();
reuses_before += ReusesBeforeBoundary(op);
VLOG(2) << "Reuses before boundary so far: " << reuses_before << "\n";
reuses_after += ReusesAfterBoundary(
op, boundaries[0].IsOutsideBranchOperand() ? 0 : -1);
VLOG(2) << "Reuese after boundary so far : " << reuses_after << "\n";
}
int64_t copy_folding_benefit = 0;
if (boundaries[0].IsOutsideBranchUser()) {
for (const Boundary& b : boundaries) {
auto op = b.operands()[0];
copy_folding_benefit += get_copy_folding_benefit(op);
}
}
VLOG(2) << "Copy folding benefit: " << copy_folding_benefit;
if (reuses_after == 0 && reuses_before == 0 && copy_folding_benefit == 0) {
return -1;
} else if (boundaries[0].IsInsideBranch()) {
return reuses_after - reuses_before;
} else if (boundaries[0].IsOutsideBranchUser()) {
return reuses_before - reuses_after - 1 + copy_folding_benefit;
} else {
CHECK(boundaries[0].IsOutsideBranchOperand());
return reuses_after > 0;
}
}
Boundary GetNextBoundary(const Boundary& b, int64_t op_index) {
Boundary b2(b.GetPosition());
for (int j = 0; j < b.operands().size(); ++j) {
HloInstruction* inst = b.operands()[j];
CHECK(inst != nullptr);
HloInstruction* op = (b.IsInsideBranch() || b.IsOutsideBranchOperand())
? inst->operands()[op_index]
: inst->users()[op_index];
CHECK(op != nullptr);
b2.mutable_operands().push_back(op);
}
return b2;
}
bool IsSafeToMoveBoundary(const Boundary& next_boundary) {
VLOG(1) << "Check is safe to move boundary.\n";
int64_t next_boundary_count =
(next_boundary.IsInsideBranch() ||
next_boundary.IsOutsideBranchOperand())
? next_boundary.operands()[0]->user_count()
: CountNonLeafOps(next_boundary.operands()[0]->operands());
if (next_boundary_count <= 1) {
if (next_boundary.IsOutsideBranchOperand() &&
next_boundary.operands()[0]->users()[0] == conditional_ &&
next_boundary.operands()[0] == conditional_->operand(0)) {
return false;
}
return true;
} else {
if (!ContainsKey(visited_count_, next_boundary.operands()[0])) {
VLOG(1) << "Skip next boundary " << next_boundary.ToString() << "\n"
<< " because it has multiple dependents: "
<< next_boundary_count << "\n";
visited_count_[next_boundary.operands()[0]] = 1;
new_boundaries_.push_back(next_boundary);
} else {
auto pos = std::find(new_boundaries_.begin(), new_boundaries_.end(),
next_boundary);
if (pos != new_boundaries_.end() ||
next_boundary.operands().size() == 1) {
int count = ++visited_count_[next_boundary.operands()[0]];
if (count == next_boundary_count) {
VLOG(2) << "Recovering next boundary " << next_boundary.ToString()
<< "\n"
<< " because all of its dependents have been visited: "
<< next_boundary_count << "\n";
visited_count_.erase(next_boundary.operands()[0]);
if (pos != new_boundaries_.end()) {
new_boundaries_.erase(pos);
}
return true;
}
} else {
VLOG(1) << "Skip incompatible multi-dependent boundary: "
<< next_boundary.ToString() << ":" << next_boundary_count
<< "\n";
}
}
}
return false;
}
void AddBoundaries(const Boundary& boundary) {
auto calc_memory_size = [](const HloInstruction* hlo) -> int64_t {
if (hlo->shape().IsTuple()) {
return 0;
}
return ShapeUtil::ByteSizeOf(hlo->shape(), 1) >> 9;
};
BoundaryVisitor visitor;
visitor.AddToWorkList(boundary);
int64_t boundary_index = 0;
while (visitor.HasNextBoundary()) {
Boundary b = visitor.PopNextBoundary();
VLOG(1) << "visiting boundary " << b.ToString() << "\n";
VLOG(1) << "boundary index=" << boundary_index << "\n";
if ((b.IsOutsideBranchUser() || b.IsOutsideBranchOperand() ||
InstructionWithinBranchIdentical(b.operands(),
is_layout_sensitive_)) &&
IsSafeToMoveBoundary(b) &&
WorthHoisting(b.operands()[0], b.GetPosition(), boundary_index)) {
connected_boundaries_.push_back(b);
boundary_index++;
auto output_size = calc_memory_size(b.operands()[0]);
connected_boundaries_memory_increase_ -= output_size;
VLOG(1) << "memory incr = " << connected_boundaries_memory_increase_
<< " after subtracting output size.\n";
VLOG(1) << "boundary can be moved.";
int64_t operand_count =
(b.IsInsideBranch() || b.IsOutsideBranchOperand())
? b.operands()[0]->operand_count()
: b.operands()[0]->users().size();
for (int i = 0; i < operand_count; i++) {
Boundary next_boundary = GetNextBoundary(b, i);
VLOG(1) << "Add operand/user " << i << " to visit later\n";
visitor.AddToWorkList(next_boundary);
connected_boundaries_memory_increase_ +=
calc_memory_size(next_boundary.operands()[0]);
VLOG(1) << "memory incr = " << connected_boundaries_memory_increase_
<< " after adding shape size of operand " << i << "\n";
}
} else if (b.IsOutsideBranchOperand() &&
b.operands()[0]->opcode() == HloOpcode::kBroadcast &&
connected_boundaries_.size() > 1 &&
absl::c_find(
b.operands()[0]->users(),
connected_boundaries_[connected_boundaries_.size() - 1]
.operands()[0]) != b.operands()[0]->users().end() &&
connected_boundaries_[connected_boundaries_.size() - 1]
.operands()[0]
->opcode() != HloOpcode::kTuple) {
VLOG(1) << "Replicating multi-use broadcasts:" << b.ToString() << "\n";
connected_boundaries_.push_back(b);
auto output_size = calc_memory_size(b.operands()[0]) -
calc_memory_size(b.operands()[0]->operand(0));
connected_boundaries_memory_increase_ -= output_size;
VLOG(1) << "memory incr = " << connected_boundaries_memory_increase_;
VLOG(1) << "boundary can be moved.";
} else {
VLOG(1) << "boundary cannot be moved\n";
visited_count_[b.operands()[0]] = 1;
new_boundaries_.push_back(b);
}
}
}
std::pair<std::vector<Boundary>, int64_t> BoundariesToMoveInOrOut(
HloInstruction* conditional, const Boundary& b) {
HloInstruction* inst = b.operands()[0];
if (inst == conditional) {
int branch_count = inst->branch_count();
Boundary boundary_in(Boundary::Position::kInsideBranch);
for (int i = 0; i < branch_count; i++) {
HloComputation* branch_computation = inst->branch_computation(i);
HloInstruction* root_inst = branch_computation->root_instruction();
CHECK(root_inst != nullptr);
boundary_in.mutable_operands().push_back(root_inst);
}
new_boundaries_.push_back(boundary_in);
for (auto u : inst->users()) {
Boundary boundary_in(Boundary::Position::kOutsideBranchUser);
boundary_in.mutable_operands().push_back(u);
new_boundaries_.push_back(boundary_in);
}
for (int64_t opd_idx = 1; opd_idx < inst->operand_count(); opd_idx++) {
HloInstruction* u = inst->mutable_operand(opd_idx);
Boundary boundary_in(Boundary::Position::kOutsideBranchOperand);
boundary_in.mutable_operands().push_back(u);
new_boundaries_.push_back(boundary_in);
}
} else {
AddBoundaries(b);
}
return std::pair<std::vector<Boundary>, int64_t>(
connected_boundaries_, connected_boundaries_memory_increase_);
}
void AddNewBoundaries(std::vector<Boundary>& b) {
b.insert(b.end(), new_boundaries_.begin(), new_boundaries_.end());
}
};
ConditionalCodeMotion::Decision ConditionalCodeMotion::ConsiderCodeMotion(
HloInstruction* conditional, const Boundary& cur_boundary,
std::vector<Boundary>& to_move, std::vector<Boundary>& new_boundaries,
absl::flat_hash_map<HloInstruction*, int>& visited_count) {
GroupConnectedBoundaries connect(conditional, is_layout_sensitive_,
visited_count, &move_config_, &reuse_config_,
search_config_);
auto move_in_or_out =
connect.BoundariesToMoveInOrOut(conditional, cur_boundary);
if (!move_in_or_out.first.empty()) {
auto benefit = connect.BenefitForMovingBoundaries(
move_in_or_out.first, search_config_map_.empty());
VLOG(2) << "benefit of moving in or out "
<< cur_boundary.operands()[0]->ToString() << ":" << benefit << "\n";
if (benefit >= 0) {
if (move_in_or_out.second > 0 &&
move_in_or_out.second / move_in_or_out.first.size() >
memory_increase_allowance_) {
VLOG(1) << "Stop moving operands because of memory pressure: "
<< move_in_or_out.second << " / " << move_in_or_out.first.size()
<< " > " << memory_increase_allowance_ << "\n";
benefit = -1;
} else {
VLOG(1) << "Increase memory pressure by " << move_in_or_out.second
<< "\n";
memory_increase_ += move_in_or_out.second;
}
}
if (benefit >= 0) {
new_boundaries.clear();
connect.AddNewBoundaries(new_boundaries);
to_move = move_in_or_out.first;
return Decision(to_move[0].IsInsideBranch()
? Decision::Direction::kMoveOutOfBranch
: Decision::Direction::kMoveIntoBranch,
benefit);
} else {
connect.clear_recently_visited();
}
} else {
connect.AddNewBoundaries(new_boundaries);
}
return Decision(Decision::Direction::kNoChange, 0);
}
absl::StatusOr<bool> ConditionalCodeMotion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "Begin a new pass of conditional code motion optimization.\n";
if (!ConsumeFuel("conditional_code_motion", [&] {
return "Skipping conditional opt after allowed limit reaching 0.\n";
})) {
return false;
}
bool changed = false;
bool cleanup_changed = false;
{
HloPassPipeline subpipeline("before_conditional_code_motion");
subpipeline.AddPass<HloCSE>(is_layout_sensitive_);
subpipeline.AddPass<HloDCE>();
TF_ASSIGN_OR_RETURN(auto cleanup_changed_now,
subpipeline.Run(module, execution_threads));
cleanup_changed |= cleanup_changed_now;
}
std::vector<HloInstruction*> conditional_ops;
absl::flat_hash_map<HloComputation*, int> conditional_computations;
for (auto* comp : module->MakeComputationPostOrder(execution_threads)) {
for (auto* instr : comp->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kConditional) {
int branch_count = instr->branch_count();
for (int i = 0; i < branch_count; ++i) {
HloComputation* branch_i = instr->branch_computation(i);
if (ContainsKey(conditional_computations, branch_i)) {
conditional_computations[branch_i]++;
} else {
conditional_computations[branch_i] = 0;
}
}
if (instr->shape().IsTuple()) {
bool can_change_tuple_shape = true;
for (auto user : instr->users()) {
VLOG(2) << "user is : " << user->ToString() << "\n";
if (user->opcode() != HloOpcode::kGetTupleElement) {
can_change_tuple_shape = false;
}
}
if (can_change_tuple_shape) {
conditional_ops.push_back(instr);
}
} else {
conditional_ops.push_back(instr);
}
}
}
}
int64_t conditional_index = 0;
HloCloneContext clone_context(module);
for (HloInstruction* conditional : conditional_ops) {
if (conditional_index == 0 || !search_config_map_.empty()) {
auto config_entry = search_config_map_.find(conditional_index);
if (config_entry != search_config_map_.end()) {
search_config_ = (*config_entry).second;
VLOG(2) << "config entry value extracted:" << search_config_.size();
search_config_index_ = 0;
}
VLOG(2) << "Obtaining default configuration for conditional "
<< conditional_index << "\n";
SetDefaultMoveConfig();
VLOG(2) << "Done obtaining default configuration\n";
conditional_index++;
}
int branch_count = conditional->branch_count();
bool conditional_is_shared = false;
for (int i = 0; i < branch_count; ++i) {
HloComputation* branch_i = conditional->branch_computation(i);
if (conditional_computations[branch_i] > 0) {
conditional_is_shared = true;
break;
}
}
std::vector<std::vector<Boundary>> to_move_out, to_move_in;
std::vector<std::vector<Boundary>> new_boundaries_for_moveout;
std::vector<std::vector<Boundary>> new_boundaries_for_movein;
absl::flat_hash_map<HloInstruction*, int> visited_count;
int benefit_move_out = 0, benefit_move_in = 0;
Decision::Direction final_d = Decision::Direction::kNoChange;
BoundaryVisitor visitor(conditional);
VLOG(2) << "Analyzing conditional:" << conditional->ToString() << "\n";
while (visitor.HasNextBoundary()) {
std::vector<Boundary> to_move, next_boundary;
Boundary boundary = visitor.PopNextBoundary();
VLOG(2) << "Analyzing boundary:" << boundary.ToString() << "\n";
auto d = ConsiderCodeMotion(conditional, boundary, to_move, next_boundary,
visited_count);
switch (d.GetDirection()) {
case Decision::Direction::kMoveOutOfBranch:
VLOG(2) << "Local Decision is move out of branch\n";
to_move_out.push_back(to_move);
new_boundaries_for_moveout.push_back(next_boundary);
benefit_move_out += d.GetBenefit();
if (benefit_move_out >= benefit_move_in) {
final_d = Decision::Direction::kMoveOutOfBranch;
VLOG(2) << "Current Decision is move out of branch ("
<< to_move_out.size() << ")\n";
} else {
VLOG(2) << "Current Decision remains move into branch\n";
}
break;
case Decision::Direction::kMoveIntoBranch:
VLOG(2) << "Decision is move into branch\n";
to_move_in.push_back(to_move);
new_boundaries_for_movein.push_back(next_boundary);
benefit_move_in += d.GetBenefit();
if (benefit_move_out >= benefit_move_in) {
VLOG(2) << "Current Decision remains move out of branch\n";
} else {
final_d = Decision::Direction::kMoveIntoBranch;
VLOG(2) << "Current Decision is move into branch ("
<< to_move_in.size() << ")\n";
}
break;
case Decision::Direction::kNoChange:
VLOG(2) << "Decision is no change\n";
for (const Boundary& b : next_boundary) {
visitor.AddToWorkList(b);
VLOG(2) << "Adding new boundary to worklist:" << b.ToString()
<< "\n";
}
break;
}
}
if (final_d != Decision::Direction::kNoChange && conditional_is_shared) {
for (int i = 0; i < branch_count; ++i) {
HloComputation* branch_i = conditional->branch_computation(i);
if (conditional_computations[branch_i] > 0) {
HloComputation* clone_i =
conditional->GetModule()->AddEmbeddedComputation(
branch_i->Clone("clone", &clone_context));
conditional->set_branch_computation(i, clone_i);
conditional_computations[branch_i]--;
auto update_boundary = [&](Boundary& boundary) {
auto cloned_instr =
clone_context.FindInstruction(boundary.operands()[i]);
CHECK(cloned_instr != nullptr);
VLOG(2) << "boundary before cloning:" << boundary.operands()[i]
<< "\n";
boundary.mutable_operands()[i] = cloned_instr;
VLOG(2) << "boundary after cloning:" << boundary.operands()[i]
<< "\n";
};
if (final_d == Decision::Direction::kMoveOutOfBranch) {
for (int i = 0; i < to_move_out.size(); ++i) {
std::vector<Boundary>& m = to_move_out[i];
std::for_each(m.begin(), m.end(), update_boundary);
}
for (int i = 0; i < new_boundaries_for_moveout.size(); ++i) {
std::vector<Boundary>& m = new_boundaries_for_moveout[i];
std::for_each(m.begin(), m.end(), update_boundary);
}
}
}
}
VLOG(2) << "Cloned branches as needed: " << conditional->ToString()
<< "\n";
}
if (final_d == Decision::Direction::kMoveOutOfBranch) {
CHECK(to_move_out.size() == new_boundaries_for_moveout.size());
for (int i = 0; i < to_move_out.size(); ++i) {
TF_ASSIGN_OR_RETURN(bool result,
MoveInstructionOut(conditional, to_move_out[i],
new_boundaries_for_moveout[i]));
changed |= result;
}
VLOG(2) << "Done moving out of branches " << to_move_out.size()
<< " times. \n";
if (!ConsumeFuel("conditional_code_motion", [&] {
return "Skipping conditional opt after allowed limit reaching "
"0.\n";
})) {
break;
}
} else if (final_d == Decision::Direction::kMoveIntoBranch) {
CHECK(to_move_in.size() == new_boundaries_for_movein.size());
for (int i = 0; i < to_move_in.size(); ++i) {
if (to_move_in[i].empty()) {
continue;
}
VLOG(2) << "before opt:"
<< conditional->parent()->ToString(
HloPrintOptions::Fingerprint());
if (to_move_in[i][0].IsOutsideBranchOperand()) {
VLOG(1) << "Modifying code---number of operand boundaries to move in:"
<< to_move_in[i].size() << "\n";
TF_ASSIGN_OR_RETURN(bool result, MoveOperandInstructionsIn(
conditional, to_move_in[i]));
changed |= result;
} else {
VLOG(1) << "Modifying code---number of user boundaries to move in:"
<< to_move_in[i].size() << "\n";
CHECK(to_move_in[i][0].IsOutsideBranchUser());
TF_ASSIGN_OR_RETURN(
bool result, MoveUserInstructionsIn(conditional, to_move_in[i]));
changed |= result;
}
VLOG(2) << "Before removing instructions:"
<< conditional->parent()->ToString() << "\n";
for (int64_t j = to_move_in[i].size() - 1; j >= 0; j--) {
Boundary boundary_to_move_in = to_move_in[i][j];
HloInstruction* op = boundary_to_move_in.operands()[0];
if (op->user_count() == 0 && op->parent() != nullptr) {
VLOG(2) << "Removing boundary:" << boundary_to_move_in.ToString()
<< "\n";
TF_RETURN_IF_ERROR(conditional->parent()->RemoveInstruction(op));
VLOG(2) << "Done removing boundary.\n";
}
}
VLOG(2) << "Done moving instructions inside branches\n"
<< conditional->parent()->ToString(
HloPrintOptions::Fingerprint())
<< "\n";
VLOG(2) << "Done moving into branches " << to_move_in.size()
<< " times. \n";
if (!ConsumeFuel("conditional_code_motion", [&] {
return "Skipping conditional opt after allowed limit reaching "
"0.\n";
})) {
break;
}
}
} else if (pursue_full_conditional_code_motion_ && !conditional_is_shared) {
TF_ASSIGN_OR_RETURN(
bool convert_result,
ConvertSpecialMove(conditional, is_layout_sensitive_));
if (convert_result) {
VLOG(2) << "Done special moving of convert\n";
if (!ConsumeFuel("conditional_code_motion", [&] {
return "Skipping conditional opt after allowed limit reaching "
"0.\n";
})) {
break;
}
}
changed |= convert_result;
}
}
if (changed) {
HloPassPipeline subpipeline(
"after_conditional_code_motion_after_convert_hoisting");
VLOG(2) << "starting after motion passes: DCE\n";
subpipeline.AddPass<HloDCE>();
subpipeline.AddPass<TupleSimplifier>();
subpipeline.AddPass<HloDCE>();
TF_ASSIGN_OR_RETURN(auto cleanup_changed_now, subpipeline.Run(module));
cleanup_changed |= cleanup_changed_now;
}
if (cleanup_changed) {
VLOG(2) << "subpipeline cleanup have modified code\n";
}
return changed;
}
void ConditionalCodeMotion::SetDefaultMoveConfig() {
VLOG(2) << "search_config_index = " << search_config_index_ << "\n";
VLOG(2) << "search_config_ size = " << search_config_.size() << "\n";
int64_t cur_search_config = (search_config_index_ < 0 ||
search_config_index_ >= search_config_.size())
? 0
: search_config_[search_config_index_];
enum class TuningOption {
kDoNotTune = 0,
kTuneTransformationDecision = 1,
kTuneReuseModel = 2,
};
TuningOption tuning_option =
(cur_search_config == 0) ? TuningOption::kDoNotTune
: (cur_search_config > 0) ? TuningOption::kTuneTransformationDecision
: TuningOption::kTuneReuseModel;
auto row = HloOpcodeCount();
auto col = row;
VLOG(2) << "Start setting default configuration\n";
reuse_config_.clear();
move_config_.clear();
reuse_config_.reserve(row);
move_config_.reserve(row);
for (int64_t opcode = 0; opcode < row; ++opcode) {
std::vector<int64_t> reuse_vec(col, 0);
for (uint32_t j = 0; j < col; ++j) {
reuse_vec[j] = ReusesCarriedBy(static_cast<HloOpcode>(opcode),
static_cast<HloOpcode>(j));
}
reuse_config_.push_back(reuse_vec);
std::vector<int64_t> move_vec;
switch (tuning_option) {
case TuningOption::kTuneTransformationDecision:
move_vec.push_back(1);
break;
case TuningOption::kTuneReuseModel:
case TuningOption::kDoNotTune:
move_vec.reserve(col);
for (uint32_t j = 0; j < col; ++j) {
move_vec.push_back(WorthHoisting(static_cast<HloOpcode>(opcode),
static_cast<HloOpcode>(j)));
}
break;
}
move_config_.push_back(move_vec);
}
}
void ConditionalCodeMotion::ParseSearchConfiguration(
const std::string& search_config) {
if (search_config.empty()) {
return;
}
search_config_index_ = 0;
std::vector<std::string> configs = absl::StrSplit(search_config, ';');
for (const std::string& config : configs) {
std::vector<std::string> specs = absl::StrSplit(config, ',');
CHECK_EQ(specs.size(), 4);
int64_t condition_index;
CHECK(absl::SimpleAtoi(specs[0], &condition_index));
auto& cur_config_entry = search_config_map_[condition_index];
int64_t flip_start, max_flip, flip_stride;
CHECK(absl::SimpleAtoi(specs[1], &flip_start));
CHECK(absl::SimpleAtoi(specs[2], &max_flip));
CHECK(absl::SimpleAtoi(specs[3], &flip_stride));
int64_t cur_config = MakeSearchConfig(flip_start, max_flip, flip_stride);
cur_config_entry.push_back(cur_config);
VLOG(2) << "Setting search config " << condition_index << "->" << cur_config
<< "\n";
}
}
}
} | #include "xla/service/conditional_code_motion.h"
#include <optional>
#include <sstream>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
namespace xla {
namespace conditional_opt {
using ConditionalCodeMotionTest = HloTestBase;
namespace op = xla::testing::opcode_matchers;
TEST_F(ConditionalCodeMotionTest, MoveSubsetTupleOut) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}, f32[2,512,364]{2,1,0}) tuple(%convert.2894, %reshape.8493)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%add = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.9717, f32[2,512,364]{2,1,0} %reshape.9717)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}, f32[2,512,364]{2,1,0}) tuple(%convert.3604, %add)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
conditional = (bf16[2,512,364]{2,1,0}, f32[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = bf16[2,512,364]{2,1,0} get-tuple-element(conditional), index=0
get-first-index.2 = f32[2,512,364]{2,1,0} get-tuple-element(conditional), index=1
ROOT result = (bf16[2,512,364]{2,1,0}, f32[2,512,364]{2,1,0}) tuple(get-first-index, get-first-index.2)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Tuple(op::Convert(), op::GetTupleElement())));
}
TEST_F(ConditionalCodeMotionTest, VerifyConditionalAnalysisWithWhileTuple) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
body {
%p_body = (f32[2], bf16[2], s32[]) parameter(0)
%val = f32[2] get-tuple-element(p_body), index=0
%val2 = bf16[2] get-tuple-element(p_body), index=1
%const = s32[] constant(-1)
ROOT root = (f32[2], bf16[2], s32[]) tuple(%val, %val2, %const)
}
condition {
%p_cond = (f32[2], bf16[2], s32[]) parameter(0)
%gte = s32[] get-tuple-element(%p_cond), index=2
%const = s32[] constant(42)
ROOT result = pred[] compare(%gte, %const), direction=EQ
}
on_true {
%arg_tuple.1 = f32[2] parameter(0)
%const = s32[] constant(42)
%add.8493 = f32[2] add(f32[2] %arg_tuple.1, f32[2] %arg_tuple.1)
%convert.2894 = bf16[2] convert(f32[2] %add.8493)
ROOT %tuple.1 = (f32[2], bf16[2], s32[]) tuple(%add.8493, %convert.2894, %const)
}
on_false {
%arg_tuple.1 = f32[2] parameter(0)
%const = s32[] constant(42)
%add.8493 = f32[2] add(f32[2] %arg_tuple.1, f32[2] %arg_tuple.1)
%convert.2894 = bf16[2] convert(f32[2] %add.8493)
%while_init = (f32[2], bf16[2], s32[]) tuple(%add.8493, %convert.2894, %const)
ROOT while = (f32[2], bf16[2], s32[]) while(%while_init), condition=condition, body=body
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = f32[2] parameter(1)
ROOT conditional = (f32[2], bf16[2], s32[]) conditional(pred.1, arg_tuple.11, arg_tuple.11), true_computation=on_true, false_computation=on_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(ConditionalCodeMotionTest, MoveConvertOutConditionalRoot) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.8493, f32[2,512,364]{2,1,0} %reshape.8493)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %add.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.9717, f32[2,512,364]{2,1,0} %reshape.9717)
%sub.8493 = f32[2,512,364]{2,1,0} subtract(f32[2,512,364]{2,1,0} %add.8493, f32[2,512,364]{2,1,0} %reshape.9717)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
ROOT conditional = (bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Tuple(op::Convert())));
}
TEST_F(ConditionalCodeMotionTest, MoveConvertOutConditional) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.8493, f32[2,512,364]{2,1,0} %reshape.8493)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %add.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.9717, f32[2,512,364]{2,1,0} %reshape.9717)
%sub.8493 = f32[2,512,364]{2,1,0} subtract(f32[2,512,364]{2,1,0} %add.8493, f32[2,512,364]{2,1,0} %reshape.9717)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
conditional = (bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = bf16[2,512,364]{2,1,0} get-tuple-element(conditional), index=0
ROOT result = (bf16[2,512,364]{2,1,0}) tuple(get-first-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Tuple(op::Convert())));
}
TEST_F(ConditionalCodeMotionTest, ConditionalShapeNotMutable) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.8493, f32[2,512,364]{2,1,0} %reshape.8493)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %add.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.9717, f32[2,512,364]{2,1,0} %reshape.9717)
%sub.8493 = f32[2,512,364]{2,1,0} subtract(f32[2,512,364]{2,1,0} %add.8493, f32[2,512,364]{2,1,0} %reshape.9717)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
conditional = (bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = bf16[2,512,364]{2,1,0} get-tuple-element(conditional), index=0
ROOT result = (bf16[2,512,364]{2,1,0}, (bf16[2,512,364]{2,1,0})) tuple(get-first-index, conditional)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(ConditionalCodeMotionTest, MoveConvertOut) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
conditional = (bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = bf16[2,512,364]{2,1,0} get-tuple-element(conditional), index=0
add.1 = bf16[2,512,364]{2,1,0} add(bf16[2,512,364]{2,1,0} get-first-index, bf16[2,512,364]{2,1,0} get-first-index)
ROOT result = (bf16[2,512,364]{2,1,0}) tuple(add.1)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
CHECK_NE(conditional, nullptr);
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 1);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 1);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Tuple(op::Add(
op::Convert(op::Reshape(op::GetTupleElement(op::Conditional()))),
op::Convert(op::Reshape(op::GetTupleElement(op::Conditional())))))));
}
TEST_F(ConditionalCodeMotionTest, UserShareOperandCannotBeMoved) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
on_true {
arg_tuple.1 = (f32[]) parameter(0)
get-tuple-element.1 = f32[] get-tuple-element(arg_tuple.1), index=0
constant.1 = f32[] constant(1)
constant.2 = f32[] constant(2)
constant.3 = f32[] constant(3)
constant.4 = f32[] constant(4)
constant.5 = f32[] constant(5)
add.1 = f32[] add(get-tuple-element.1, constant.1)
add.2 = f32[] add(add.1, constant.2)
add.3 = f32[] add(add.1, constant.3)
add.4 = f32[] add(add.3, constant.5)
multiply.1 = f32[] multiply(add.4, constant.4)
ROOT tuple.6 = (f32[], f32[]) tuple(multiply.1, add.4)
}
on_false {
arg_tuple.2 = (f32[]) parameter(0)
get-tuple-element.2 = f32[] get-tuple-element(arg_tuple.2), index=0
constant.6 = f32[] constant(1)
constant.7 = f32[] constant(2)
constant.8 = f32[] constant(3)
constant.9 = f32[] constant(4)
constant.10 = f32[] constant(5)
add.4 = f32[] add(get-tuple-element.2, constant.6)
sub.1 = f32[] subtract(add.4, constant.7)
add.5 = f32[] add(add.4, constant.8)
add.6 = f32[] add(add.5, constant.10)
multiply.2 = f32[] multiply(sub.1, constant.9)
ROOT tuple.6 = (f32[], f32[]) tuple(multiply.2, add.6)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[]) parameter(1)
tuple.2 = (f32[]) parameter(2)
conditional = (f32[], f32[])
conditional(pred.1, tuple.1, tuple.2), true_computation=on_true,
false_computation=on_false
get-first-index = f32[] get-tuple-element(conditional), index=0
get-second-index = f32[] get-tuple-element(conditional), index=1
ROOT result = f32[] add(get-first-index, get-second-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 9);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 11);
std::optional<int> on_false_sub_idx;
std::optional<int> on_false_add_idx;
for (int i = 0; i < on_false->root_instruction()->operand_count(); ++i) {
const HloInstruction* root_operand =
on_false->root_instruction()->operand(i);
if (root_operand->opcode() == HloOpcode::kAdd) {
on_false_add_idx = i;
} else if (root_operand->opcode() == HloOpcode::kSubtract) {
on_false_sub_idx = i;
}
}
ASSERT_TRUE(on_false_add_idx.has_value());
ASSERT_TRUE(on_false_sub_idx.has_value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Add(
op::Multiply(
op::GetTupleElement(op::Conditional(), *on_false_sub_idx),
op::Constant()),
op::GetTupleElement(op::Conditional(), *on_false_add_idx))));
}
TEST_F(ConditionalCodeMotionTest, ConditionalBoundaryAliasingBug) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
on_true {
arg_tuple.1 = (f32[], f32[]) parameter(0)
get-tuple-element.1 = f32[] get-tuple-element(arg_tuple.1), index=0
get-tuple-element.2 = f32[] get-tuple-element(arg_tuple.1), index=1
cos = f32[] cosine(get-tuple-element.2)
multiply.1 = f32[] multiply(get-tuple-element.1, cos)
ROOT res.1 = (f32[], f32[]) tuple(multiply.1, cos)
}
on_false {
arg_tuple.1 = (f32[], f32[]) parameter(0)
get-tuple-element.3 = f32[] get-tuple-element(arg_tuple.1), index=0
constant.6 = f32[] constant(3)
multiply.2 = f32[] multiply(get-tuple-element.3, constant.6)
constant.2 = f32[] constant(0)
ROOT res.2 = (f32[], f32[]) tuple(multiply.2, constant.2)
}
ENTRY main {
pred.1 = pred[] parameter(0)
param.2 = f32[] parameter(1)
param.3 = f32[] parameter(2)
tuple = (f32[], f32[]) tuple(param.2, param.3)
conditional = (f32[], f32[])
conditional(pred.1, tuple, tuple), true_computation=on_true,
false_computation=on_false
get-tuple-element.3 = f32[] get-tuple-element(conditional), index=0
get-tuple-element.4 = f32[] get-tuple-element(conditional), index=1
ROOT result = f32[] add(get-tuple-element.3, get-tuple-element.4)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_false = conditional->branch_computation(1);
std::optional<int> on_false_gte_idx;
std::optional<int> on_false_const_idx;
for (int i = 0; i < on_false->root_instruction()->operand_count(); ++i) {
const HloInstruction* root_operand =
on_false->root_instruction()->operand(i);
if (root_operand->opcode() == HloOpcode::kGetTupleElement) {
on_false_gte_idx = i;
} else if (root_operand->opcode() == HloOpcode::kConstant) {
on_false_const_idx = i;
}
}
ASSERT_TRUE(on_false_gte_idx.has_value());
ASSERT_TRUE(on_false_const_idx.has_value());
EXPECT_THAT(on_false->root_instruction()->operand(*on_false_const_idx),
op::Constant(LiteralUtil::CreateR0<float>(3.0)));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root->operand(0),
op::Multiply(
op::GetTupleElement(op::Conditional(), *on_false_gte_idx),
op::GetTupleElement(op::Conditional(), *on_false_const_idx)));
}
TEST_F(ConditionalCodeMotionTest, ConditionalRootElementChanged) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
on_true {
arg_tuple.1 = (f32[]) parameter(0)
get-tuple-element.1 = f32[] get-tuple-element(arg_tuple.1), index=0
constant.1 = f32[] constant(1)
constant.2 = f32[] constant(2)
add.1 = f32[] add(get-tuple-element.1, constant.1)
add.2 = f32[] add(get-tuple-element.1, constant.2)
add.3 = f32[] add(add.1, add.2)
ROOT tuple.3 = (f32[]) tuple(add.3)
}
on_false {
arg_tuple.2 = (f32[]) parameter(0)
get-tuple-element.2 = f32[] get-tuple-element(arg_tuple.2), index=0
constant.3 = f32[] constant(1)
constant.4 = f32[] constant(2)
add.4 = f32[] add(constant.4, constant.3)
add.5 = f32[] add(get-tuple-element.2, constant.4)
add.6 = f32[] add(add.4, add.5)
ROOT tuple.4 = (f32[]) tuple(add.6)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[]) parameter(1)
tuple.2 = (f32[]) parameter(2)
conditional = (f32[])
conditional(pred.1, tuple.1, tuple.2), true_computation=on_true,
false_computation=on_false
get-first-index = f32[] get-tuple-element(conditional), index=0
ROOT result = f32[] add(get-first-index, get-first-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
EXPECT_EQ(on_true->instruction_count(), 3);
EXPECT_THAT(on_true->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 0)));
const HloComputation* on_false = conditional->branch_computation(1);
EXPECT_EQ(on_false->instruction_count(), 4);
std::optional<int> on_false_const_idx;
std::optional<int> on_false_gte_idx;
for (int i = 0; i < on_false->root_instruction()->operand_count(); ++i) {
const HloInstruction* root_operand =
on_false->root_instruction()->operand(i);
if (root_operand->opcode() == HloOpcode::kConstant) {
on_false_const_idx = i;
} else if (root_operand->opcode() == HloOpcode::kGetTupleElement) {
on_false_gte_idx = i;
}
}
ASSERT_TRUE(on_false_const_idx.has_value());
ASSERT_TRUE(on_false_gte_idx.has_value());
EXPECT_THAT(on_false->root_instruction()->operand(*on_false_const_idx),
op::Constant(LiteralUtil::CreateR0<float>(2.0)));
EXPECT_THAT(on_false->root_instruction()->operand(*on_false_gte_idx),
op::GetTupleElement(op::Parameter(0), 0));
HloInstruction* root = module->entry_computation()->root_instruction();
auto get_first_index_matcher = op::Add(
op::Add(op::GetTupleElement(op::Conditional(), *on_false_const_idx),
op::Constant(LiteralUtil::CreateR0<float>(1.0))),
op::Add(op::GetTupleElement(op::Conditional(), *on_false_gte_idx),
op::Constant(LiteralUtil::CreateR0<float>(2.0))));
EXPECT_THAT(root, op::Add(get_first_index_matcher, get_first_index_matcher));
}
TEST_F(ConditionalCodeMotionTest, ConditionalIsRootInstruction) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
on_true {
arg_tuple.1 = (f32[]) parameter(0)
get-tuple-element.1 = f32[] get-tuple-element(arg_tuple.1), index=0
constant.1 = f32[] constant(1)
constant.2 = f32[] constant(2)
constant.3 = f32[] constant(3)
constant.4 = f32[] constant(4)
constant.5 = f32[] constant(5)
add.1 = f32[] add(get-tuple-element.1, constant.1)
add.2 = f32[] add(add.1, constant.2)
add.3 = f32[] add(add.1, constant.3)
add.4 = f32[] add(add.3, constant.5)
multiply.1 = f32[] multiply(add.2, constant.4)
ROOT tuple.6 = (f32[], f32[]) tuple(multiply.1, add.4)
}
on_false {
arg_tuple.2 = (f32[]) parameter(0)
get-tuple-element.2 = f32[] get-tuple-element(arg_tuple.2), index=0
constant.6 = f32[] constant(1)
constant.7 = f32[] constant(2)
constant.8 = f32[] constant(3)
constant.9 = f32[] constant(4)
constant.10 = f32[] constant(5)
add.4 = f32[] add(get-tuple-element.2, constant.6)
sub.1 = f32[] subtract(add.4, constant.7)
add.5 = f32[] add(add.4, constant.8)
add.6 = f32[] add(add.5, constant.10)
multiply.2 = f32[] multiply(sub.1, constant.9)
ROOT tuple.6 = (f32[], f32[]) tuple(multiply.2, add.6)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[]) parameter(1)
tuple.2 = (f32[]) parameter(2)
ROOT conditional = (f32[], f32[])
conditional(pred.1, tuple.1, tuple.2), true_computation=on_true,
false_computation=on_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(ConditionalCodeMotionTest, LayoutMisMatchCannotMovedOut) {
absl::string_view hlo_string =
R"(
HloModule LayoutMisMatchCannotMovedOut
%add.64 (x.139: bf16[], y.139: bf16[]) -> bf16[] {
%x.139 = bf16[]{:T(512)} parameter(0)
%y.139 = bf16[]{:T(512)} parameter(1)
ROOT %add.44073 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.139, bf16[]{:T(512)} %y.139)
}
%add.181 (x.256: bf16[], y.256: bf16[]) -> bf16[] {
%x.256 = bf16[]{:T(512)} parameter(0)
%y.256 = bf16[]{:T(512)} parameter(1)
ROOT %add.44842 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.256, bf16[]{:T(512)} %y.256)
}
on_true {
%arg_tuple.1 = (bf16[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = bf16[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%all-reduce.1 = bf16[93184,4]{1,0}
all-reduce(bf16[93184,4]{1,0} %get-tuple-element.1),
channel_id=188, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.64
%convert.2894 = f32[93184,4]{1,0} convert(bf16[93184, 4]{1,0} %all-reduce.1)
ROOT %tuple.1 = (f32[93184,4]{1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (bf16[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = bf16[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%copy.1 = bf16[93184,4]{0,1} copy(bf16[93184,4]{1,0} %get-tuple-element.3)
%all-reduce.2 = bf16[93184,4]{0, 1}
all-reduce(bf16[93184,4]{0, 1} %copy.1),
channel_id=188, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.181
%convert.3604 = f32[93184,4]{0,1} convert(bf16[93184,4]{0,1} %all-reduce.2)
ROOT %tuple.2 = (f32[93184,4]{0,1}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (bf16[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (bf16[93184,4]{1,0}) parameter(2)
conditional = (f32[93184,4]{1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = f32[93184,4]{1,0} get-tuple-element(conditional), index=0
ROOT result = (f32[93184,4]{1,0}) tuple(get-first-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(ConditionalCodeMotionTest, MoveCrossModuleAllReduceOut) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
%add.64 (x.139: bf16[], y.139: bf16[]) -> bf16[] {
%x.139 = bf16[]{:T(512)} parameter(0)
%y.139 = bf16[]{:T(512)} parameter(1)
ROOT %add.44073 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.139, bf16[]{:T(512)} %y.139)
}
%add.181 (x.256: bf16[], y.256: bf16[]) -> bf16[] {
%x.256 = bf16[]{:T(512)} parameter(0)
%y.256 = bf16[]{:T(512)} parameter(1)
ROOT %add.44842 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.256, bf16[]{:T(512)} %y.256)
}
on_true {
arg_tuple.1 = (bf16[2,54,168,128], bf16[2,52,168,128]) parameter(0)
get-tuple-element.11 = bf16[2,54,168,128] get-tuple-element(arg_tuple.1), index=0
get-tuple-element.12 = bf16[2,52,168,128] get-tuple-element(arg_tuple.1), index=1
convolution.1 = bf16[3,3,128,128] convolution(bf16[2,54,168,128]
get-tuple-element.11, bf16[2,52,168,128]
get-tuple-element.12), window={size=52x168 pad=0_0x1_1},
dim_labels=f01b_i01o->01bf
all-reduce.1 = bf16[3,3,128,128]
all-reduce(bf16[3,3,128,128] %convolution.1),
channel_id=188, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.64, metadata={op_type="Conv2DBackpropFilter"
op_name="gradients/resnet50/conv2d_22/Conv2D_grad/Conv2DBackpropFilter"}
convert.1 = f32[3,3,128,128] convert(bf16[3,3,128,128] %all-reduce.1),
metadata={op_type="Cast" op_name="Cast_15"}
ROOT tuple.1 = (f32[3,3,128,128]) tuple(convert.1)
}
on_false {
arg_tuple.2 = (bf16[2,86,104,128], bf16[2,84,104,128]) parameter(0)
get-tuple-element.21 = bf16[2,86,104,128]
get-tuple-element(arg_tuple.2), index=0
get-tuple-element.22 = bf16[2,84,104,128]
get-tuple-element(arg_tuple.2), index=1
convolution.2 = bf16[3,3,128,128]
convolution(bf16[2,86,104,128] get-tuple-element.21, bf16[2,84,104,128]
get-tuple-element.22), window={size=84x104 pad=0_0x1_1},
dim_labels=f01b_i01o->01bf
all-reduce.2 = bf16[3,3,128,128]
all-reduce(bf16[3,3,128,128] %convolution.2),
channel_id=485, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.181, metadata={op_type="Conv2DBackpropFilter"
op_name="gradients/resnet50/conv2d_22/Conv2D_grad/Conv2DBackpropFilter"}
convert.2 = f32[3,3,128,128]
convert(bf16[3,3,128,128] %all-reduce.2),
metadata={op_type="Cast" op_name="Cast_15"}
ROOT tuple.2 = (f32[3,3,128,128]) tuple(convert.2)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.3 = (bf16[2,54,168,128], bf16[2,52,168,128]) parameter(1)
arg_tuple.4 = (bf16[2,86,104,128], bf16[2,84,104,128]) parameter(2)
arg_tuple.5 = f32[3,3,128,128] parameter(3)
conditional = (f32[3,3,128,128])
conditional(pred.1, arg_tuple.3, arg_tuple.4), true_computation=on_true,
false_computation=on_false
get-first-index = f32[3,3,128,128]
get-tuple-element(conditional), index=0
add.1 = f32[3,3,128,128] add(f32[3,3,128,128] get-first-index, f32[3,3,128,128] get-first-index)
ROOT result = (f32[3,3,128,128]) tuple(add.1)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
CHECK(conditional != nullptr);
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 5);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 5);
ASSERT_TRUE(ShapeUtil::Compatible(
conditional->shape(), ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(
BF16, {3, 3, 128, 128})})));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Tuple(op::Add(
op::Convert(op::AllReduce(op::GetTupleElement(op::Conditional()))),
op::Convert(
op::AllReduce(op::GetTupleElement(op::Conditional())))))));
}
TEST_F(ConditionalCodeMotionTest, DoNotMoveAllReduceIn) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
%add.64 (x.139: bf16[], y.139: bf16[]) -> bf16[] {
%x.139 = bf16[]{:T(512)} parameter(0)
%y.139 = bf16[]{:T(512)} parameter(1)
ROOT %add.44073 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.139, bf16[]{:T(512)} %y.139)
}
%add.181 (x.256: bf16[], y.256: bf16[]) -> bf16[] {
%x.256 = bf16[]{:T(512)} parameter(0)
%y.256 = bf16[]{:T(512)} parameter(1)
ROOT %add.44842 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.256, bf16[]{:T(512)} %y.256)
}
on_true {
arg_tuple.1 = (bf16[2,54,168,128], bf16[2,52,168,128]) parameter(0)
get-tuple-element.11 = bf16[2,54,168,128] get-tuple-element(arg_tuple.1), index=0
get-tuple-element.12 = bf16[2,52,168,128] get-tuple-element(arg_tuple.1), index=1
convolution.1 = bf16[3,3,128,128] convolution(bf16[2,54,168,128]
get-tuple-element.11, bf16[2,52,168,128]
get-tuple-element.12), window={size=52x168 pad=0_0x1_1},
dim_labels=f01b_i01o->01bf
add.1 = bf16[3,3,128,128] add(bf16[3,3,128,128] convolution.1, bf16[3,3,128,128] convolution.1)
ROOT tuple.1 = (bf16[3,3,128,128]) tuple(add.1)
}
on_false {
arg_tuple.2 = (bf16[2,86,104,128], bf16[2,84,104,128]) parameter(0)
get-tuple-element.21 = bf16[2,86,104,128]
get-tuple-element(arg_tuple.2), index=0
get-tuple-element.22 = bf16[2,84,104,128]
get-tuple-element(arg_tuple.2), index=1
convolution.2 = bf16[3,3,128,128]
convolution(bf16[2,86,104,128] get-tuple-element.21, bf16[2,84,104,128]
get-tuple-element.22), window={size=84x104 pad=0_0x1_1},
dim_labels=f01b_i01o->01bf
add.2 = bf16[3,3,128,128] add(bf16[3,3,128,128] convolution.2, bf16[3,3,128,128] convolution.2)
ROOT tuple.2 = (bf16[3,3,128,128]) tuple(add.2)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.3 = (bf16[2,54,168,128], bf16[2,52,168,128]) parameter(1)
arg_tuple.4 = (bf16[2,86,104,128], bf16[2,84,104,128]) parameter(2)
arg_tuple.5 = f32[3,3,128,128] parameter(3)
conditional = (bf16[3,3,128,128])
conditional(pred.1, arg_tuple.3, arg_tuple.4), true_computation=on_true,
false_computation=on_false
get-first-index = bf16[3,3,128,128] get-tuple-element(conditional), index=0
all-reduce.2 = bf16[3,3,128,128]
all-reduce(bf16[3,3,128,128] %get-first-index),
channel_id=485, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.181, metadata={op_type="Conv2DBackpropFilter"
op_name="gradients/resnet50/conv2d_22/Conv2D_grad/Conv2DBackpropFilter"}
convert.2 = f32[3,3,128,128]
convert(bf16[3,3,128,128] %all-reduce.2),
metadata={op_type="Cast" op_name="Cast_15"}
ROOT result = (f32[3,3,128,128]) tuple(convert.2)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
CHECK(conditional != nullptr);
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 6);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 6);
ASSERT_TRUE(ShapeUtil::Compatible(
conditional->shape(), ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(
BF16, {3, 3, 128, 128})})));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Tuple(op::Convert(op::AllReduce(
op::GetTupleElement(op::Conditional()))))));
}
TEST_F(ConditionalCodeMotionTest, MovePowOpIn) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
on_true {
arg_tuple.1 = (f32[10]) parameter(0)
get-tuple-element.1 = f32[10] get-tuple-element(arg_tuple.1), index=0
add.1 = f32[10] add(get-tuple-element.1, get-tuple-element.1)
ROOT tuple.3 = (f32[10]) tuple(add.1)
}
on_false {
arg_tuple.2 = (f32[10]) parameter(0)
get-tuple-element.2 = f32[10] get-tuple-element(arg_tuple.2), index=0
mul.1 = f32[10] multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple.4 = (f32[10]) tuple(mul.1)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[10]) parameter(1)
tuple.2 = (f32[10]) parameter(2)
conditional = (f32[10])
conditional(pred.1, tuple.1, tuple.2), true_computation=on_true,
false_computation=on_false
get-first-index = f32[10] get-tuple-element(conditional), index=0
ROOT pow.1 = f32[10] power(get-first-index, get-first-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 5);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 5);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::GetTupleElement(op::Conditional())));
}
TEST_F(ConditionalCodeMotionTest, MoveInWithMultipleGTE) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
on_true {
arg_tuple.1 = (f32[10]) parameter(0)
get-tuple-element.1 = f32[10] get-tuple-element(arg_tuple.1), index=0
add.1 = f32[10] add(get-tuple-element.1, get-tuple-element.1)
ROOT tuple.3 = (f32[10]) tuple(add.1)
}
on_false {
arg_tuple.2 = (f32[10]) parameter(0)
get-tuple-element.2 = f32[10] get-tuple-element(arg_tuple.2), index=0
mul.1 = f32[10] multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple.4 = (f32[10]) tuple(mul.1)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[10]) parameter(1)
tuple.2 = (f32[10]) parameter(2)
conditional = (f32[10])
conditional(pred.1, tuple.1, tuple.2), true_computation=on_true,
false_computation=on_false
get-first-index = f32[10] get-tuple-element(conditional), index=0
get-first-index.2 = f32[10] get-tuple-element(conditional), index=0
pow.1 = f32[10] power(get-first-index, get-first-index.2)
ROOT tuple.3 = (f32[10], f32[10]) tuple(pow.1, get-first-index.2)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Tuple(op::GetTupleElement(op::Conditional()),
op::GetTupleElement(op::Conditional())));
}
TEST_F(ConditionalCodeMotionTest, MoveOutWithSharedBranch) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
branch {
arg_tuple.1 = (f32[10]) parameter(0)
get-tuple-element.1 = f32[10] get-tuple-element(arg_tuple.1), index=0
add.1 = f32[10] add(get-tuple-element.1, get-tuple-element.1)
ROOT tuple.3 = (f32[10]) tuple(add.1)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[10]) parameter(1)
tuple.2 = (f32[10]) parameter(2)
conditional = (f32[10])
conditional(pred.1, tuple.1, tuple.2), true_computation=branch,
false_computation=branch
get-first-index = f32[10] get-tuple-element(conditional), index=0
ROOT pow.1 = f32[10] power(get-first-index, get-first-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 1);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 1);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Power(op::Add(op::GetTupleElement(op::Conditional()),
op::GetTupleElement(op::Conditional())),
op::Add(op::GetTupleElement(op::Conditional()),
op::GetTupleElement(op::Conditional())))));
}
TEST_F(ConditionalCodeMotionTest, MovePowInWithNonTupleRoot) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
branch {
arg_tuple.1 = (f32[10]) parameter(0)
get-tuple-element.1 = f32[10] get-tuple-element(arg_tuple.1), index=0
ROOT add.1 = f32[10] add(get-tuple-element.1, get-tuple-element.1)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[10]) parameter(1)
tuple.2 = (f32[10]) parameter(2)
conditional = f32[10]
conditional(pred.1, tuple.1, tuple.2), true_computation=branch,
false_computation=branch
ROOT pow.1 = f32[10] power(conditional, conditional)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 5);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 5);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::GetTupleElement(op::Conditional())));
}
TEST_F(ConditionalCodeMotionTest, MovePowInWithEmptyBranch) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
branch1 {
arg_tuple.1 = (f32[10]) parameter(0)
get-tuple-element.1 = f32[10] get-tuple-element(arg_tuple.1), index=0
add.1 = f32[10] add(get-tuple-element.1, get-tuple-element.1)
ROOT tuple.3 = (f32[10]) tuple(add.1)
}
branch2 {
ROOT arg_tuple.1 = (f32[10]) parameter(0)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[10]) parameter(1)
tuple.2 = (f32[10]) parameter(2)
conditional = (f32[10])
conditional(pred.1, tuple.1, tuple.2), true_computation=branch1,
false_computation=branch2
get-first-index = f32[10] get-tuple-element(conditional), index=0
ROOT pow.1 = f32[10] power(get-first-index, get-first-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 5);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 4);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::GetTupleElement(op::Conditional())));
}
TEST_F(ConditionalCodeMotionTest, MovePowInWithNonTupleParameter) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
branch {
arg.1 = f32[10] parameter(0)
ROOT add.1 = f32[10] add(arg.1, arg.1)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = f32[10] parameter(1)
tuple.2 = f32[10] parameter(2)
conditional = f32[10]
conditional(pred.1, tuple.1, tuple.2), true_computation=branch,
false_computation=branch
ROOT pow.1 = f32[10] power(conditional, conditional)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 4);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 4);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::GetTupleElement(op::Conditional())));
}
TEST_F(ConditionalCodeMotionTest, MoveCopyInBranch) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
branch1 {
arg_tuple.1 = (s32[], f32[10,3]{0,1}) parameter(0)
constant.1 = s32[] constant(4)
get-tuple-element.1 = s32[] get-tuple-element(arg_tuple.1), index=0
add.1 = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = f32[10,3]{0,1} get-tuple-element(arg_tuple.1), index=1
slice.1 = f32[4,3]{0,1} slice(get-tuple-element.2),
slice={[0:4:1], [0:3:1]}
constant.2 = f32[] constant(0.0)
ROOT tuple.1 = (f32[4,3]{0,1}, s32[],f32[]) tuple(slice.1, add.1, constant.2)
}
branch2 {
arg_tuple.2 = (s32[], f32[4,3]{1,0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(arg_tuple.2), index=0
copy.1 = s32[] copy(get-tuple-element.3)
get-tuple-element.4 = f32[4,3]{1,0} get-tuple-element(arg_tuple.2), index=1
copy.2 = f32[4,3]{0,1} copy(get-tuple-element.4)
constant.2 = f32[] constant(0.0)
ROOT tuple.2 = (f32[4,3]{0,1}, s32[], f32[]) tuple(copy.2, copy.1, constant.2)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.3 = (s32[], f32[10,3]{0,1}) parameter(1)
tuple.4 = (s32[], f32[4,3]{1,0}) parameter(2)
conditional = (f32[4,3]{0,1}, s32[], f32[])
conditional(pred.1, tuple.3, tuple.4), true_computation=branch1,
false_computation=branch2
get-zero-index = f32[4,3]{0,1} get-tuple-element(conditional), index=0
get-first-index = s32[] get-tuple-element(conditional), index=1
get-second-index = f32[] get-tuple-element(conditional), index=2
copy.3 = f32[4,3]{1,0} copy(get-zero-index)
ROOT tuple.5 = (f32[4,3]{0,1}, s32[], f32[]) tuple(copy.3, get-first-index,
get-second-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
VLOG(1) << module->ToString();
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 9);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 8);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Tuple(op::GetTupleElement(op::Conditional(), 2),
op::GetTupleElement(op::Conditional(), 0),
op::GetTupleElement(op::Conditional(), 1))));
}
TEST_F(ConditionalCodeMotionTest, MoveCopy2InBranch) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
%branch0 (state.2: (f32[1,3,2])) -> (f32[1,3,2]) {
%state.2 = (f32[1,3,2]{1,2,0}) parameter(0)
%get-tuple-element.32 = f32[1,3,2]{1,2,0} get-tuple-element((f32[1,3,2]{1,2,0}) %state.2), index=0
%copy.1 = f32[1,3,2]{0,2,1} copy(f32[1,3,2]{1,2,0} %get-tuple-element.32)
ROOT %tuple.13 = (f32[1,3,2]{0,2,1}) tuple(f32[1,3,2]{0,2,1} %copy.1)
}
%branch1 (state.1: (s32[], f32[8,3,2], s32[2])) -> (f32[1,3,2]) {
%state.1 = (s32[], f32[8,3,2]{0,2,1}, s32[2]{0}) parameter(0)
%get-tuple-element.17 = f32[8,3,2]{0,2,1} get-tuple-element((s32[], f32[8,3,2]{0,2,1}, s32[2]{0}) %state.1), index=1
%get-tuple-element.18 = s32[2]{0} get-tuple-element((s32[], f32[8,3,2]{0,2,1}, s32[2]{0}) %state.1), index=2
%get-tuple-element.16 = s32[] get-tuple-element((s32[], f32[8,3,2]{0,2,1}, s32[2]{0}) %state.1), index=0
%dynamic-slice.3 = s32[1]{0} dynamic-slice(s32[2]{0} %get-tuple-element.18, s32[] %get-tuple-element.16), dynamic_slice_sizes={1}
%reshape.19 = s32[] reshape(s32[1]{0} %dynamic-slice.3)
%constant.21 = s32[] constant(0)
%dynamic-slice.4 = f32[1,3,2]{0,2,1} dynamic-slice(f32[8,3,2]{0,2,1} %get-tuple-element.17, s32[] %reshape.19, s32[] %constant.21, s32[] %constant.21), dynamic_slice_sizes={1,3,2}
ROOT %tuple.9 = (f32[1,3,2]{0,2,1}) tuple(f32[1,3,2]{0,2,1} %dynamic-slice.4)
}
ENTRY %f32_8_3_2__1-1.32 (idxs.1: s32[2], single_io.2: f32[8,3,2], repeated_io_0.3: f32[1,3,2]) -> (f32[1,3,2]) {
%idxs.1 = s32[2]{0} parameter(0)
%slice.10 = s32[1]{0} slice(s32[2]{0} %idxs.1), slice={[0:1]}
%reshape.11 = s32[] reshape(s32[1]{0} %slice.10)
%constant.12 = s32[] constant(0)
%compare.13 = pred[] compare(s32[] %reshape.11, s32[] %constant.12), direction=EQ
%repeated_io_0.3 = f32[1,3,2]{1,2,0} parameter(2)
%tuple.11 = (f32[1,3,2]{1,2,0}) tuple(f32[1,3,2]{1,2,0} %repeated_io_0.3)
%constant.5 = s32[] constant(1)
%single_io.2 = f32[8,3,2]{0,2,1} parameter(1)
%tuple.15 = (s32[], f32[8,3,2]{0,2,1}, s32[2]{0}) tuple(s32[] %constant.5, f32[8,3,2]{0,2,1} %single_io.2, s32[2]{0} %idxs.1)
%conditional.28 = (f32[1,3,2]{0,2,1}) conditional(pred[] %compare.13, (f32[1,3,2]{1,2,0}) %tuple.11, (s32[], f32[8,3,2]{0,2,1}, s32[2]{0}) %tuple.15), true_computation=%branch0, false_computation=%branch1
%get-tuple-element.33 = f32[1,3,2]{0,2,1} get-tuple-element((f32[1,3,2]{0,2,1}) %conditional.28), index=0
%copy.2 = f32[1,3,2]{1,2,0} copy(f32[1,3,2]{0,2,1} %get-tuple-element.33)
ROOT %tuple.16 = (f32[1,3,2]{1,2,0}) tuple(f32[1,3,2]{1,2,0} %copy.2)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
}
TEST_F(ConditionalCodeMotionTest, MoveReplicatedTupleEntryOut) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
%add.64 (x.139: bf16[], y.139: bf16[]) -> bf16[] {
%x.139 = bf16[]{:T(512)} parameter(0)
%y.139 = bf16[]{:T(512)} parameter(1)
ROOT %add.44073 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.139, bf16[]{:T(512)} %y.139)
}
%add.181 (x.256: bf16[], y.256: bf16[]) -> bf16[] {
%x.256 = bf16[]{:T(512)} parameter(0)
%y.256 = bf16[]{:T(512)} parameter(1)
ROOT %add.44842 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.256, bf16[]{:T(512)} %y.256)
}
on_true {
arg_tuple.1 = (bf16[2,54,168,128], bf16[2,52,168,128]) parameter(0)
get-tuple-element.11 = bf16[2,54,168,128] get-tuple-element(arg_tuple.1), index=0
get-tuple-element.12 = bf16[2,52,168,128] get-tuple-element(arg_tuple.1), index=1
convolution.1 = bf16[3,3,128,128] convolution(bf16[2,54,168,128]
get-tuple-element.11, bf16[2,52,168,128]
get-tuple-element.12), window={size=52x168 pad=0_0x1_1},
dim_labels=f01b_i01o->01bf
all-reduce.1 = bf16[3,3,128,128]
all-reduce(bf16[3,3,128,128] %convolution.1),
channel_id=188, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.64
convert.1 = f32[3,3,128,128] convert(bf16[3,3,128,128] %all-reduce.1)
all-reduce.3 = bf16[3,3,128,128]
all-reduce(bf16[3,3,128,128] %convolution.1),
channel_id=188, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.64
convert.3 = f32[3,3,128,128] convert(bf16[3,3,128,128] %all-reduce.3)
ROOT tuple.1 = (f32[3,3,128,128], f32[3,3,128,128]) tuple(convert.1, convert.3)
}
on_false {
arg_tuple.2 = (bf16[2,86,104,128], bf16[2,84,104,128]) parameter(0)
get-tuple-element.21 = bf16[2,86,104,128]
get-tuple-element(arg_tuple.2), index=0
get-tuple-element.22 = bf16[2,84,104,128]
get-tuple-element(arg_tuple.2), index=1
convolution.2 = bf16[3,3,128,128]
convolution(bf16[2,86,104,128] get-tuple-element.21, bf16[2,84,104,128]
get-tuple-element.22), window={size=84x104 pad=0_0x1_1},
dim_labels=f01b_i01o->01bf
all-reduce.2 = bf16[3,3,128,128]
all-reduce(bf16[3,3,128,128] %convolution.2),
channel_id=485, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.181
convert.2 = f32[3,3,128,128]
convert(bf16[3,3,128,128] %all-reduce.2)
ROOT tuple.2 = (f32[3,3,128,128], f32[3,3,128,128]) tuple(convert.2, convert.2)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.3 = (bf16[2,54,168,128], bf16[2,52,168,128]) parameter(1)
arg_tuple.4 = (bf16[2,86,104,128], bf16[2,84,104,128]) parameter(2)
conditional = (f32[3,3,128,128], f32[3,3,128,128])
conditional(pred.1, arg_tuple.3, arg_tuple.4), true_computation=on_true,
false_computation=on_false
get-first-index = f32[3,3,128,128]
get-tuple-element(conditional), index=0
add.1 = f32[3,3,128,128] add(f32[3,3,128,128] get-first-index, f32[3,3,128,128] get-first-index)
ROOT result = (f32[3,3,128,128]) tuple(add.1)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 5);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 5);
ASSERT_TRUE(ShapeUtil::Compatible(
conditional->shape(), ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(
BF16, {3, 3, 128, 128})})));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Tuple(op::Add(
op::Convert(op::AllReduce(op::GetTupleElement(op::Conditional()))),
op::Convert(
op::AllReduce(op::GetTupleElement(op::Conditional())))))));
}
TEST_F(ConditionalCodeMotionTest, DoNotMoveWithExtraOperand) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
branch {
arg.1 = f32[10] parameter(0)
ROOT add.1 = f32[10] add(arg.1, arg.1)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = f32[10] parameter(1)
tuple.2 = f32[10] parameter(2)
conditional = f32[10]
conditional(pred.1, tuple.1, tuple.2), true_computation=branch,
false_computation=branch
ROOT pow.1 = f32[10] power(conditional, tuple.2)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(ConditionalCodeMotionTest, MultipleIndependentMoveIns) {
absl::string_view hlo_string =
R"(
HloModule FromNMT
%add.31755 (x.139: f32[], y.139: bf16[]) -> bf16[] {
%x.139 = bf16[]{:T(512)} parameter(0)
%y.139 = bf16[]{:T(512)} parameter(1)
ROOT %add.44073 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.139, bf16[]{:T(512)} %y.139)
}
%nmt.1 {
%wide_param.3 = (bf16[1024,4096]{1,0}, bf16[18,64,1024]{2,1,0}, s32[]) parameter(0)
%get-tuple-element.16525 = bf16[1024,4096]{1,0} get-tuple-element((bf16[1024,4096]{1,0}, bf16[18,64,1024]{2,1,0}, s32[]) %wide_param.3), index=0
%get-tuple-element.16527 = bf16[18,64,1024]{2,1,0} get-tuple-element((bf16[1024,4096]{1,0}, bf16[18,64,1024]{2,1,0}, s32[]) %wide_param.3), index=1
%get-tuple-element.16588 = s32[] get-tuple-element((bf16[1024,4096]{1,0}, bf16[18,64,1024]{2,1,0}, s32[]) %wide_param.3), index=2
%add.3764 = s32[] add(s32[] %get-tuple-element.16588, s32[] %get-tuple-element.16588), metadata={op_type="Sub" op_name="sub"}
%reshape.9821 = s32[1]{0} reshape(s32[] %add.3764)
%reshape.9822 = s32[] reshape(s32[1]{0} %reshape.9821)
%constant.13127 = s32[] constant(0)
%dynamic-slice.1245 = bf16[1,64,1024]{2,1,0} dynamic-slice(bf16[18,64,1024]{2,1,0} %get-tuple-element.16527, s32[] %reshape.9822, s32[] %constant.13127, s32[] %constant.13127), dynamic_slice_sizes={1,64,1024}
%reshape.9825 = bf16[64,1024]{1,0} reshape(bf16[1,64,1024]{2,1,0} %dynamic-slice.1245), metadata={op_type="GatherV2" op_name="GatherV2"}
%logistic.814 = bf16[64,1024]{1,0} logistic(bf16[64,1024]{1,0} %reshape.9825), metadata={op_type="Sigmoid" op_name="Sigmoid"}
%multiply.4890 = bf16[64,1024]{1,0} multiply(bf16[64,1024]{1,0} %reshape.9825, bf16[64,1024]{1,0} %logistic.814), metadata={op_type="Mul" op_name="mul"}
%tanh.573 = bf16[64,1024]{1,0} tanh(bf16[64,1024]{1,0} %reshape.9825), metadata={op_type="Tanh" op_name="Tanh"}
%multiply.4891 = bf16[64,1024]{1,0} multiply(bf16[64,1024]{1,0} %logistic.814, bf16[64,1024]{1,0} %tanh.573), metadata={op_type="Mul" op_name="mul_1"}
%add.3766 = bf16[64,1024]{1,0} add(bf16[64,1024]{1,0} %multiply.4890, bf16[64,1024]{1,0} %multiply.4891), metadata={op_type="AddV2" op_name="add_1"}
%multiply.4894 = bf16[64,1024]{1,0} multiply(bf16[64,1024]{1,0} %add.3766, bf16[64,1024]{1,0} %logistic.814), metadata={op_type="Mul" op_name="gradients_1/mul_grad/Mul"}
%constant.10568 = bf16[] constant(1), metadata={op_type="TanhGrad" op_name="gradients/Tanh_1_grad/TanhGrad"}
%broadcast.7198 = bf16[64,1024]{1,0} broadcast(bf16[] %constant.10568), dimensions={}, metadata={op_type="TanhGrad" op_name="gradients/Tanh_1_grad/TanhGrad"}
%multiply.4896 = bf16[64,1024]{1,0} multiply(bf16[64,1024]{1,0} %tanh.573, bf16[64,1024]{1,0} %tanh.573), metadata={op_type="TanhGrad" op_name="gradients/Tanh_1_grad/TanhGrad"}
%constant.10571 = bf16[] constant(1), metadata={op_type="SigmoidGrad" op_name="gradients/Sigmoid_grad/SigmoidGrad"}
%broadcast.7201 = bf16[64,1024]{1,0} broadcast(bf16[] %constant.10571), dimensions={}, metadata={op_type="SigmoidGrad" op_name="gradients/Sigmoid_grad/SigmoidGrad"}
%subtract.1702 = bf16[64,1024]{1,0} subtract(bf16[64,1024]{1,0} %broadcast.7201, bf16[64,1024]{1,0} %logistic.814), metadata={op_type="SigmoidGrad" op_name="gradients/Sigmoid_grad/SigmoidGrad"}
%multiply.4907 = bf16[64,1024]{1,0} multiply(bf16[64,1024]{1,0} %tanh.573, bf16[64,1024]{1,0} %add.3766), metadata={op_type="Mul" op_name="gradients/mul_2_grad/Mul_1"}
%multiply.4908 = bf16[64,1024]{1,0} multiply(bf16[64,1024]{1,0} %multiply.4907, bf16[64,1024]{1,0} %logistic.814), metadata={op_type="SigmoidGrad" op_name="gradients/Sigmoid_2_grad/SigmoidGrad"}
%dot.781 = bf16[64,4096]{1,0} dot(bf16[64,1024]{1,0} %multiply.4908, bf16[1024,4096]{1,0} %get-tuple-element.16525), lhs_contracting_dims={1}, rhs_contracting_dims={0}, metadata={op_type="MatMul" op_name="MatMul"}
ROOT %tuple.3200 = (bf16[64,1024]{1,0}, bf16[64,4096]{1,0}, s32[]) tuple(bf16[64,1024]{1,0} %multiply.4894, bf16[64,4096]{1,0} %dot.781, s32[] %reshape.9822)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.3 = (bf16[1024,4096]{1,0}, bf16[18,64,1024]{2,1,0}, s32[]) parameter(1)
arg_tuple.4 = (bf16[1024,4096]{1,0}, bf16[18,64,1024]{2,1,0}, s32[]) parameter(2)
%arg.2 = s32[] parameter(3)
%conditional.3 = (bf16[64,1024]{1,0}, bf16[64,4096]{1,0}, s32[]) conditional(pred.1, arg_tuple.3, arg_tuple.4), true_computation=nmt.1, false_computation=nmt.1
%get-tuple-element.15889 = bf16[64,1024]{1,0} get-tuple-element((bf16[64,1024]{1,0}, bf16[64,4096]{1,0}, s32[]) %conditional.3), index=0, metadata={op_type="Case" op_name="switch_case/indexed_case"}
%multiply.4596 = bf16[64,1024]{1,0} multiply(bf16[64,1024]{1,0} %get-tuple-element.15889, bf16[64,1024]{1,0} %get-tuple-element.15889), metadata={op_type="L2Loss" op_name="global_norm/L2Loss"}
%constant.10279 = bf16[] constant(0), metadata={op_type="L2Loss" op_name="global_norm/L2Loss"}
%reduce.844 = bf16[] reduce(bf16[64,1024]{1,0} %multiply.4596, bf16[] %constant.10279), dimensions={0,1}, to_apply=%add.31755, metadata={op_type="L2Loss" op_name="global_norm/L2Loss"}
%get-tuple-element.15890 = bf16[64,4096]{1,0} get-tuple-element((bf16[64,1024]{1,0}, bf16[64,4096]{1,0}, s32[]) %conditional.3), index=1, metadata={op_type="Case" op_name="switch_case/indexed_case"}
%multiply.4597 = bf16[64,4096]{1,0} multiply(bf16[64,4096]{1,0} %get-tuple-element.15890, bf16[64,4096]{1,0} %get-tuple-element.15890), metadata={op_type="L2Loss" op_name="global_norm/L2Loss"}
%constant.10280 = bf16[] constant(0), metadata={op_type="L2Loss" op_name="global_norm/L2Loss"}
%reduce.845 = bf16[] reduce(bf16[64,4096]{1,0} %multiply.4597, bf16[] %constant.10280), dimensions={0,1}, to_apply=%add.31755, metadata={op_type="L2Loss" op_name="global_norm/L2Loss"}
%multiply.4667 = bf16[] multiply(bf16[] %reduce.845, bf16[]{:T(128)} %reduce.844), metadata={op_type="L2Loss" op_name="global_norm/L2Loss"}
ROOT %tuple.3200 = (bf16[], s32[]) tuple(%multiply.4667, s32[] %arg.2)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional.3");
CHECK(conditional != nullptr);
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 27);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 27);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Tuple(op::GetTupleElement(op::Conditional()),
op::Parameter())));
}
TEST_F(ConditionalCodeMotionTest, TestConfigurationFlag) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
conditional = (bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = bf16[2,512,364]{2,1,0} get-tuple-element(conditional), index=0
add.1 = bf16[2,512,364]{2,1,0} add(bf16[2,512,364]{2,1,0} get-first-index, bf16[2,512,364]{2,1,0} get-first-index)
ROOT result = (bf16[2,512,364]{2,1,0}) tuple(add.1)
}
)";
for (int max_flip = 1; max_flip < 3; ++max_flip) {
for (int flip_stride = 1; flip_stride < ((max_flip > 1) ? 7 : 2);
++flip_stride) {
for (int flip_start = 0; flip_start < 7; ++flip_start) {
int64_t search_config = ConditionalCodeMotion::MakeSearchConfig(
flip_start, max_flip, flip_stride);
ConditionalCodeMotion pass(true, true, search_config);
VLOG(1) << "Testing max_flip=" << max_flip
<< "; flip_start = " << flip_start
<< "; flip_stride = " << flip_stride
<< "; search_config=" << search_config;
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
bool opt_result = pass.Run(&*module).value();
if (flip_start < 2 && max_flip > 1 && flip_stride == 1) {
CHECK_EQ(opt_result, false);
continue;
}
CHECK_EQ(opt_result, true);
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
const HloComputation* on_false = conditional->branch_computation(1);
HloInstruction* root = module->entry_computation()->root_instruction();
switch (flip_start) {
case 0:
[[fallthrough]];
case 1:
ASSERT_EQ(on_true->instruction_count(), 6);
ASSERT_EQ(on_false->instruction_count(), 6);
EXPECT_THAT(root, AllOf(op::Conditional()));
break;
case 2:
ASSERT_EQ(on_true->instruction_count(), 4);
ASSERT_EQ(on_false->instruction_count(), 4);
EXPECT_THAT(
root,
AllOf(op::Tuple(op::Add(
op::Convert(op::GetTupleElement(op::Conditional())),
op::Convert(op::GetTupleElement(op::Conditional()))))));
break;
case 3:
ASSERT_EQ(on_true->instruction_count(), 1);
ASSERT_EQ(on_false->instruction_count(), 1);
EXPECT_THAT(root, AllOf(op::Tuple(op::Add(
op::Convert(op::Reshape(
op::GetTupleElement(op::Conditional()))),
op::Convert(op::Reshape(op::GetTupleElement(
op::Conditional())))))));
break;
case 4:
case 5:
case 6:
ASSERT_EQ(on_true->instruction_count(), 2);
ASSERT_EQ(on_false->instruction_count(), 2);
EXPECT_THAT(root,
AllOf(op::Tuple(op::Add(
op::Convert(op::Reshape(op::GetTupleElement(
op::GetTupleElement(op::Conditional())))),
op::Convert(op::Reshape(op::GetTupleElement(
op::GetTupleElement(op::Conditional()))))))));
break;
default:
ASSERT_EQ(on_true->instruction_count(), 1);
ASSERT_EQ(on_false->instruction_count(), 1);
EXPECT_THAT(root, AllOf(op::Tuple(op::Add(
op::Convert(op::Reshape(
op::GetTupleElement(op::Conditional()))),
op::Convert(op::Reshape(op::GetTupleElement(
op::Conditional())))))));
break;
}
}
}
}
}
TEST_F(ConditionalCodeMotionTest, TestMultipleConfigurationFlags) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
pred.2 = pred[] parameter(3)
conditional = (bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = bf16[2,512,364]{2,1,0} get-tuple-element(conditional), index=0
add.1 = bf16[2,512,364]{2,1,0} add(bf16[2,512,364]{2,1,0} get-first-index, bf16[2,512,364]{2,1,0} get-first-index)
conditional.2 = (bf16[2,512,364]{2,1,0}) conditional(pred.2, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index.2 = bf16[2,512,364]{2,1,0} get-tuple-element(conditional.2), index=0
add.2 = bf16[2,512,364]{2,1,0} add(bf16[2,512,364]{2,1,0} get-first-index.2, bf16[2,512,364]{2,1,0} get-first-index.2)
ROOT result = (bf16[2,512,364]{2,1,0}, bf16[2,512,364]{2,1,0}) tuple(add.1, add.2)
}
)";
for (int max_flip = 1; max_flip < 3; ++max_flip) {
for (int flip_stride = 1; flip_stride < ((max_flip > 1) ? 7 : 2);
++flip_stride) {
for (int flip_start = 0; flip_start < 7; ++flip_start) {
std::stringstream config_stream;
config_stream << 0 << "," << flip_start << "," << max_flip << ","
<< flip_stride << ";";
config_stream << 1 << "," << flip_start << "," << max_flip << ","
<< flip_stride;
auto search_config = config_stream.str();
ConditionalCodeMotion pass(true, true, search_config);
VLOG(1) << "Testing max_flip=" << max_flip
<< "; flip_start = " << flip_start
<< "; flip_stride = " << flip_stride
<< "; search_config=" << search_config;
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
bool opt_result = pass.Run(&*module).value();
if (flip_start < 2 && max_flip > 1 && flip_stride == 1) {
CHECK_EQ(opt_result, false);
continue;
}
CHECK_EQ(opt_result, true);
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
const HloComputation* on_false = conditional->branch_computation(1);
HloInstruction* root = module->entry_computation()->root_instruction();
switch (flip_start) {
case 0:
[[fallthrough]];
case 1:
ASSERT_EQ(on_true->instruction_count(), 6);
ASSERT_EQ(on_false->instruction_count(), 6);
EXPECT_THAT(
root, AllOf(op::Tuple(op::GetTupleElement(op::Conditional()),
op::GetTupleElement(op::Conditional()))));
break;
case 2:
ASSERT_EQ(on_true->instruction_count(), 4);
ASSERT_EQ(on_false->instruction_count(), 4);
EXPECT_THAT(
root,
AllOf(op::Tuple(
op::Add(
op::Convert(op::GetTupleElement(op::Conditional())),
op::Convert(op::GetTupleElement(op::Conditional()))),
op::Add(
op::Convert(op::GetTupleElement(op::Conditional())),
op::Convert(op::GetTupleElement(op::Conditional()))))));
break;
case 3:
ASSERT_EQ(on_true->instruction_count(), 1);
ASSERT_EQ(on_false->instruction_count(), 1);
EXPECT_THAT(
root, AllOf(op::Tuple(
op::Add(op::Convert(op::Reshape(
op::GetTupleElement(op::Conditional()))),
op::Convert(op::Reshape(
op::GetTupleElement(op::Conditional())))),
op::Add(op::Convert(op::Reshape(
op::GetTupleElement(op::Conditional()))),
op::Convert(op::Reshape(op::GetTupleElement(
op::Conditional())))))));
break;
case 4:
case 5:
case 6:
ASSERT_EQ(on_true->instruction_count(), 2);
ASSERT_EQ(on_false->instruction_count(), 2);
EXPECT_THAT(
root,
AllOf(op::Tuple(
op::Add(op::Convert(op::Reshape(op::GetTupleElement(
op::GetTupleElement(op::Conditional())))),
op::Convert(op::Reshape(op::GetTupleElement(
op::GetTupleElement(op::Conditional()))))),
op::Add(op::Convert(op::Reshape(op::GetTupleElement(
op::GetTupleElement(op::Conditional())))),
op::Convert(op::Reshape(op::GetTupleElement(
op::GetTupleElement(op::Conditional()))))))));
break;
default:
ASSERT_EQ(on_true->instruction_count(), 1);
ASSERT_EQ(on_false->instruction_count(), 1);
EXPECT_THAT(root, AllOf(op::Tuple(op::Add(
op::Convert(op::Reshape(
op::GetTupleElement(op::Conditional()))),
op::Convert(op::Reshape(op::GetTupleElement(
op::Conditional())))))));
break;
}
}
}
}
}
TEST_F(ConditionalCodeMotionTest, ShapeChangingMovePreservesSharding) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
%on_true (arg_tuple.1: (f32[10])) -> (f32[10]) {
%arg_tuple.1 = (f32[10]{0}) parameter(0), sharding={{devices=[4]0,1,2,3}}
%get-tuple-element.1 = f32[10]{0} get-tuple-element((f32[10]{0}) %arg_tuple.1), index=0, sharding={devices=[4]0,1,2,3}
%add.1 = f32[10]{0} add(f32[10]{0} %get-tuple-element.1, f32[10]{0} %get-tuple-element.1), sharding={devices=[4]0,1,2,3}
ROOT %tuple.3 = (f32[10]{0}) tuple(f32[10]{0} %add.1), sharding={{devices=[4]0,1,2,3}}
}
%on_false (arg_tuple.2: (f32[10])) -> (f32[10]) {
%arg_tuple.2 = (f32[10]{0}) parameter(0), sharding={{devices=[4]0,1,2,3}}
%get-tuple-element.2 = f32[10]{0} get-tuple-element((f32[10]{0}) %arg_tuple.2), index=0, sharding={devices=[4]0,1,2,3}
%mul.1 = f32[10]{0} multiply(f32[10]{0} %get-tuple-element.2, f32[10]{0} %get-tuple-element.2), sharding={devices=[4]0,1,2,3}
ROOT %tuple.4 = (f32[10]{0}) tuple(f32[10]{0} %mul.1), sharding={{devices=[4]0,1,2,3}}
}
ENTRY %main (pred.1: pred[], tuple.1: (f32[10]), tuple.2: (f32[10])) -> (f32[10], f32[10]) {
%pred.1 = pred[] parameter(0), sharding={replicated}
%tuple.1 = (f32[10]{0}) parameter(1), sharding={{replicated}}
%tuple.2 = (f32[10]{0}) parameter(2), sharding={{devices=[4]0,1,2,3}}
%conditional = (f32[10]{0}) conditional(pred[] %pred.1, (f32[10]{0}) %tuple.1, (f32[10]{0}) %tuple.2), true_computation=%on_true, false_computation=%on_false, sharding={{devices=[4]0,1,2,3}}
%get-first-index = f32[10]{0} get-tuple-element((f32[10]{0}) %conditional), index=0, sharding={devices=[4]0,1,2,3}
%get-first-index.2 = f32[10]{0} get-tuple-element((f32[10]{0}) %conditional), index=0, sharding={devices=[4]0,1,2,3}
%pow.1 = f32[10]{0} power(f32[10]{0} %get-first-index, f32[10]{0} %get-first-index.2), sharding={devices=[4]0,1,2,3}
ROOT %tuple.0 = (f32[10]{0}, f32[10]{0}) tuple(f32[10]{0} %pow.1, f32[10]{0} %get-first-index.2), sharding={{devices=[4]0,1,2,3}, {devices=[4]0,1,2,3}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ConditionalCodeMotion pass(true, true);
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
ASSERT_TRUE(changed);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Tuple(op::GetTupleElement(op::Conditional()),
op::GetTupleElement(op::Conditional())));
EXPECT_EQ(root->operand(0)->operand(0), root->operand(1)->operand(0));
const HloInstruction* conditional = root->operand(0)->operand(0);
EXPECT_THAT(
conditional,
AnyOf(op::NoSharding(),
op::Sharding("{{devices=[4]0,1,2,3},{devices=[4]0,1,2,3}}")));
}
TEST_F(ConditionalCodeMotionTest, ConvertDuplicate) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.8493, f32[2,512,364]{2,1,0} %reshape.8493)
%convert = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %add.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}, bf16[2,512,364]{2,1,0}) tuple(%convert, %convert)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%convert = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717)
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}, bf16[2,512,364]{2,1,0}) tuple(%convert, %convert)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
ROOT conditional = (bf16[2,512,364]{2,1,0}, bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
VLOG(2) << "module:\n" << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
}
TEST_F(ConditionalCodeMotionTest, NestedConvert) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.8493, f32[2,512,364]{2,1,0} %reshape.8493)
%convert = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %add.8493)
%convert.2894 = f32[2,512,364]{2,1,0} convert(bf16[2,512,364]{2,1,0} %convert)
ROOT %tuple.1 = ( f32[2,512,364]{2,1,0}, bf16[2,512,364]{2,1,0}) tuple(%convert.2894, %convert)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.9717, f32[2,512,364]{2,1,0} %reshape.9717)
%sub.8493 = f32[2,512,364]{2,1,0} subtract(f32[2,512,364]{2,1,0} %add.8493, f32[2,512,364]{2,1,0} %reshape.9717)
%convert = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717)
%convert.3604 = f32[2,512,364]{2,1,0} convert(%convert)
ROOT %tuple.2 = (f32[2,512,364]{2,1,0}, bf16[2,512,364]{2,1,0}) tuple(%convert.3604, %convert)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
ROOT conditional = (f32[2,512,364]{2,1,0}, bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
VLOG(2) << "module:\n" << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
VLOG(2) << "module:\n" << module->ToString();
}
TEST_F(ConditionalCodeMotionTest, NestedConditionalDisableMoveConvert) {
absl::string_view hlo_string =
R"(
HloModule xla_computation_unknown.45
%branch_0_comp.11 (parameter.12: (u32[])) -> (s8[]) {
%parameter.12 = (u32[]) parameter(0)
%get-tuple-element.13 = u32[] get-tuple-element((u32[]) %parameter.12), index=0
%convert.15 = s8[] convert(u32[] %get-tuple-element.13)
ROOT %tuple.18 = (s8[]) tuple(s8[] %convert.15)
}
%branch_0_comp__1.19 (parameter.20: (pred[])) -> (s8[]) {
%parameter.20 = (pred[]) parameter(0)
%get-tuple-element.21 = pred[] get-tuple-element((pred[]) %parameter.20), index=0
%convert.23 = s8[] convert(pred[] %get-tuple-element.21)
ROOT %tuple.24 = (s8[]) tuple(s8[] %convert.23)
}
%branch_1_comp__1.25 (parameter.26: (pred[])) -> (s8[]) {
%parameter.26 = (pred[]) parameter(0)
%get-tuple-element.27 = pred[] get-tuple-element((pred[]) %parameter.26), index=0
%convert.29 = s8[] convert(pred[] %get-tuple-element.27)
ROOT %tuple.30 = (s8[]) tuple(s8[] %convert.29)
}
%branch_1_comp.31 (parameter.32: (u32[])) -> (s8[]) {
%parameter.32 = (u32[]) parameter(0)
%get-tuple-element.33 = u32[] get-tuple-element((u32[]) %parameter.32), index=0
%convert.35 = pred[] convert(u32[] %get-tuple-element.33)
%convert.36 = s32[] convert(pred[] %convert.35)
%constant.37 = pred[] constant(true)
%tuple.38 = (pred[]) tuple(pred[] %constant.37)
ROOT %conditional.39 = (s8[]) conditional(s32[] %convert.36, (pred[]) %tuple.38, (pred[]) %tuple.38), branch_computations={%branch_0_comp__1.19, %branch_1_comp__1.25}
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation_unknown.45 (parameter.3: u8[], parameter.4: u8[], parameter.5: u32[15,14]) -> (s8[]) {
%parameter.3 = u8[] parameter(0)
%parameter.4 = u8[] parameter(1)
%compare.7 = pred[] compare(u8[] %parameter.3, u8[] %parameter.4), direction=LT
%convert.9 = s32[] convert(pred[] %compare.7)
%parameter.5 = u32[15,14]{1,0} parameter(2)
%constant.2 = u32[] constant(0)
%reduce.1 = u32[] reduce(u32[15,14]{1,0} %parameter.5, u32[] %constant.2), dimensions={1,0}, to_apply=%scalar_add_computation.1
%tuple.10 = (u32[]) tuple(u32[] %reduce.1)
ROOT %conditional.42 = (s8[]) conditional(s32[] %convert.9, (u32[]) %tuple.10, (u32[]) %tuple.10), branch_computations={%branch_0_comp.11, %branch_1_comp.31}
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands1) {
absl::string_view hlo_string =
R"(
HloModule xla_computation_unknown.45
%branch_0_comp.11 (parameter.12: (u32[])) -> (s8[]) {
%parameter.12 = (u32[], u32[]) parameter(0)
%get-tuple-element.13 = u32[] get-tuple-element(%parameter.12), index=1
%convert.15 = s8[] convert(u32[] %get-tuple-element.13)
ROOT %tuple.18 = (s8[]) tuple(s8[] %convert.15)
}
%branch_0_comp__1.19 (parameter.20: (pred[])) -> (s8[]) {
%parameter.20 = (pred[],s8[]) parameter(0)
%get-tuple-element.21 = pred[] get-tuple-element(%parameter.20), index=0
%convert.23 = s8[] convert(pred[] %get-tuple-element.21)
ROOT %tuple.24 = (s8[]) tuple(s8[] %convert.23)
}
%branch_1_comp__1.25 (parameter.26: (pred[])) -> (s8[]) {
%parameter.26 = (pred[],s8[]) parameter(0)
%get-tuple-element.27 = s8[] get-tuple-element(%parameter.26), index=1
ROOT %tuple.30 = (s8[]) tuple(s8[] %get-tuple-element.27)
}
%branch_1_comp.31 (parameter.32: (u32[])) -> (s8[]) {
%parameter.32 = (u32[], u32[]) parameter(0)
%get-tuple-element.33 = u32[] get-tuple-element(%parameter.32), index=0
%convert.35 = pred[] convert(%get-tuple-element.33)
%convert.36 = s32[] convert(%get-tuple-element.33)
%constant.37 = s8[] constant(1)
%add.0 = s8[] add(constant.37, constant.37)
%tuple.38 = (pred[], s8[]) tuple(pred[] %convert.35, s8[] add.0)
ROOT %conditional.39 = (s8[]) conditional(%convert.36, %tuple.38, %tuple.38), branch_computations={%branch_0_comp__1.19, %branch_1_comp__1.25}
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation_unknown.45 (parameter.3: u8[], parameter.4: u8[], parameter.5: u32[15,14]) -> (s8[]) {
%parameter.3 = u8[] parameter(0)
%parameter.4 = u8[] parameter(1)
%compare.7 = pred[] compare(u8[] %parameter.3, u8[] %parameter.4), direction=LT
%convert.9 = s32[] convert(pred[] %compare.7)
%parameter.5 = u32[15,14]{1,0} parameter(2)
%constant.2 = u32[] constant(0)
%reduce.1 = u32[] reduce(u32[15,14]{1,0} %parameter.5, u32[] %constant.2), dimensions={1,0}, to_apply=%scalar_add_computation.1
%tuple.10 = (u32[], u32[]) tuple(%reduce.1, constant.2)
ROOT %conditional.42 = (s8[]) conditional(s32[] %convert.9, %tuple.10, %tuple.10), branch_computations={%branch_0_comp.11, %branch_1_comp.31}
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
VLOG(3) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
EXPECT_EQ(root->branch_computation(0)->instruction_count(), 4);
EXPECT_EQ(root->branch_computation(1)->instruction_count(), 8);
HloInstruction* conditional_39 =
root->branch_computation(1)->root_instruction();
CHECK_EQ(conditional_39->opcode(), HloOpcode::kConditional);
const HloInstruction* conditional_39_pred = conditional_39->operand(0);
EXPECT_THAT(
conditional_39_pred,
op::Convert(op::Reduce(op::GetTupleElement(), op::GetTupleElement())));
const HloInstruction* conditional_39_true =
conditional_39->branch_computation(0)->root_instruction();
EXPECT_THAT(conditional_39_true, op::Tuple(op::Convert(op::Convert(
op::GetTupleElement(op::Parameter())))));
const HloInstruction* conditional_39_false =
conditional_39->branch_computation(1)->root_instruction();
EXPECT_THAT(conditional_39_false,
op::Tuple(op::Add(op::Constant(), op::Constant())));
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands2) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
%branch_true {
tmp_0 = ((f32[], f32[])) parameter(0)
tmp_1 = (f32[]{:T(256)}, f32[]) get-tuple-element(((f32[], f32[])) tmp_0), index=0
tmp_2 = f32[]{:T(256)} get-tuple-element((f32[], f32[]) tmp_1), index=0
tmp_3 = f32[] get-tuple-element((f32[], f32[]) tmp_1), index=1
tmp_4 = f32[] multiply(f32[] tmp_2, f32[] tmp_3)
tmp_5 = f32[1]{0} reshape(f32[] tmp_4)
ROOT tmp_6 = (f32[], f32[1]{0}) tuple(f32[] tmp_4, f32[1]{0} tmp_5)
}
%branch_false {
tmp_0 = (f32[]) parameter(0)
tmp_1 = f32[] get-tuple-element((f32[]) tmp_0), index=0
tmp_2 = f32[1]{0} reshape(f32[] tmp_1)
ROOT tmp_3 = (f32[], f32[1]{0}) tuple(f32[] tmp_1, f32[1]{0} tmp_2)
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation {
%parameter.0 = f32[] parameter(0)
%parameter.1 = ((f32[], f32[])) parameter(1)
%parameter.2 = pred[] parameter(2)
%constant.13862 = f32[] constant(0.00025)
%constant.13863 = f32[] constant(0.97)
%floor.145 = f32[]{:T(256)} floor(f32[]{:T(256)} %parameter.0)
%power.1 = f32[] power(f32[] %constant.13863, f32[]{:T(256)} %floor.145)
%multiply.13463 = f32[] multiply(f32[] %constant.13862, f32[] %power.1)
%tuple.87 = (f32[]) tuple(f32[] %multiply.13463)
ROOT conditional.1 = (f32[], f32[1]{0}) conditional(%parameter.2, %parameter.1, %tuple.87), true_computation=branch_true, false_computation=branch_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
EXPECT_EQ(root->branch_computation(0)->instruction_count(), 7);
EXPECT_EQ(root->branch_computation(1)->instruction_count(), 9);
const HloInstruction* conditional_false =
root->branch_computation(1)->root_instruction();
EXPECT_THAT(
conditional_false,
op::Tuple(
op::Multiply(
op::Constant(),
op::Power(op::Constant(), op::Floor(op::GetTupleElement()))),
op::Reshape(op::Multiply(
op::Constant(),
op::Power(op::Constant(), op::Floor(op::GetTupleElement()))))));
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands3) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
%branch_true {
tmp_0 = ((f32[], f32[])) parameter(0)
tmp_1 = (f32[]{:T(256)}, f32[]) get-tuple-element(((f32[], f32[])) tmp_0), index=0
tmp_2 = f32[]{:T(256)} get-tuple-element((f32[], f32[]) tmp_1), index=0
tmp_3 = f32[] get-tuple-element((f32[], f32[]) tmp_1), index=1
tmp_4 = f32[] multiply(f32[] tmp_2, f32[] tmp_3)
ROOT tmp_5 = (f32[]) tuple(tmp_4)
}
%branch_false {
ROOT tmp_0 = (f32[]) parameter(0)
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation {
%parameter.0 = f32[] parameter(0)
%parameter.1 = ((f32[], f32[])) parameter(1)
%parameter.2 = pred[] parameter(2)
%constant.13862 = f32[] constant(0.00025)
%constant.13863 = f32[] constant(0.97)
%floor.145 = f32[]{:T(256)} floor(f32[]{:T(256)} %parameter.0)
%power.1 = f32[] power(f32[] %constant.13863, f32[]{:T(256)} %floor.145)
%multiply.13463 = f32[] multiply(f32[] %constant.13862, f32[] %power.1)
%tuple.87 = (f32[]) tuple(f32[] %multiply.13463)
ROOT conditional.1 = (f32[]) conditional(%parameter.2, %parameter.1, %tuple.87), true_computation=branch_true, false_computation=branch_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
EXPECT_EQ(root->branch_computation(0)->instruction_count(), 6);
EXPECT_EQ(root->branch_computation(1)->instruction_count(), 8);
const HloInstruction* conditional_false =
root->branch_computation(1)->root_instruction();
EXPECT_THAT(
conditional_false,
op::Tuple(op::Multiply(
op::Constant(),
op::Power(op::Constant(), op::Floor(op::GetTupleElement())))));
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands4) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
%branch_true {
tmp_0 = ((f32[], f32[])) parameter(0)
tmp_1 = (f32[]{:T(256)}, f32[]) get-tuple-element(((f32[], f32[])) tmp_0), index=0
tmp_2 = f32[]{:T(256)} get-tuple-element((f32[], f32[]) tmp_1), index=0
tmp_3 = f32[] get-tuple-element((f32[], f32[]) tmp_1), index=1
tmp_4 = f32[] multiply(f32[] tmp_2, f32[] tmp_3)
tmp_5 = (f32[]) tuple(tmp_4)
ROOT tmp_6 = ((f32[])) tuple(tmp_5)
}
%branch_false {
tmp_0 = (f32[]) parameter(0)
ROOT tmp_1 = ((f32[])) tuple(tmp_0)
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation {
%parameter.0 = f32[] parameter(0)
%parameter.1 = ((f32[], f32[])) parameter(1)
%parameter.2 = pred[] parameter(2)
%constant.13862 = f32[] constant(0.00025)
%constant.13863 = f32[] constant(0.97)
%floor.145 = f32[]{:T(256)} floor(f32[]{:T(256)} %parameter.0)
%power.1 = f32[] power(f32[] %constant.13863, f32[]{:T(256)} %floor.145)
%multiply.13463 = f32[] multiply(f32[] %constant.13862, f32[] %power.1)
%tuple.87 = (f32[]) tuple(f32[] %multiply.13463)
ROOT conditional.1 = ((f32[])) conditional(%parameter.2, %parameter.1, %tuple.87), true_computation=branch_true, false_computation=branch_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
EXPECT_EQ(root->branch_computation(0)->instruction_count(), 7);
EXPECT_EQ(root->branch_computation(1)->instruction_count(), 9);
const HloInstruction* conditional_false =
root->branch_computation(1)->root_instruction();
EXPECT_THAT(
conditional_false,
op::Tuple(op::Tuple(op::Multiply(
op::Constant(),
op::Power(op::Constant(), op::Floor(op::GetTupleElement()))))));
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands5) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
%branch_true {
tmp_0 = ((f32[], f32[])) parameter(0)
tmp_1 = (f32[]{:T(256)}, f32[]) get-tuple-element(((f32[], f32[])) tmp_0), index=0
tmp_2 = f32[]{:T(256)} get-tuple-element((f32[], f32[]) tmp_1), index=0
tmp_3 = f32[] get-tuple-element((f32[], f32[]) tmp_1), index=1
tmp_4 = f32[] multiply(f32[] tmp_2, f32[] tmp_3)
ROOT tmp_5 = (f32[]) tuple(tmp_4)
}
%branch_false {
tmp_0 = f32[] parameter(0)
ROOT tmp_1 = (f32[]) tuple(tmp_0)
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation {
%parameter.0 = f32[] parameter(0)
%parameter.1 = ((f32[], f32[])) parameter(1)
%parameter.2 = pred[] parameter(2)
%constant.13862 = f32[] constant(0.00025)
%constant.13863 = f32[] constant(0.97)
%floor.145 = f32[]{:T(256)} floor(f32[]{:T(256)} %parameter.0)
%power.1 = f32[] power(f32[] %constant.13863, f32[]{:T(256)} %floor.145)
%multiply.13463 = f32[] multiply(f32[] %constant.13862, f32[] %power.1)
ROOT conditional.1 = (f32[]) conditional(%parameter.2, %parameter.1, %multiply.13463), true_computation=branch_true, false_computation=branch_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
EXPECT_EQ(root->branch_computation(0)->instruction_count(), 6);
EXPECT_EQ(root->branch_computation(1)->instruction_count(), 8);
const HloInstruction* conditional_false =
root->branch_computation(1)->root_instruction();
EXPECT_THAT(
conditional_false,
op::Tuple(op::Multiply(
op::Constant(),
op::Power(op::Constant(), op::Floor(op::GetTupleElement())))));
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands6) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
%branch_true {
tmp_0 = ((f32[], f32[])) parameter(0)
tmp_1 = (f32[], f32[]) get-tuple-element(((f32[], f32[])) tmp_0), index=0
tmp_2 = f32[] get-tuple-element((f32[], f32[]) tmp_1), index=0
tmp_3 = f32[] get-tuple-element((f32[], f32[]) tmp_1), index=1
tmp_4 = f32[] multiply(f32[] tmp_2, f32[] tmp_3)
ROOT tmp_5 = (f32[]) tuple(tmp_4)
}
%branch_false {
tmp_0 = f32[] parameter(0)
ROOT tmp_1 = (f32[]) tuple(tmp_0)
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation {
%parameter.0 = f32[] parameter(0)
%parameter.1 = f32[] parameter(1)
%parameter.2 = pred[] parameter(2)
%constant.13862 = f32[] constant(0.00025)
%constant.13863 = f32[] constant(0.97)
%add.0 = f32[] add(parameter.1, parameter.1)
%floor.145 = f32[]{:T(256)} floor(f32[]{:T(256)} %parameter.0)
%power.1 = f32[] power(f32[] %constant.13863, f32[]{:T(256)} %floor.145)
%multiply.13463 = f32[] multiply(f32[] %constant.13862, f32[] %power.1)
%tuple.1 = (f32[], f32[]) tuple(add.0, add.0)
%tuple.2 = ((f32[], f32[])) tuple(%tuple.1)
ROOT conditional.1 = (f32[]) conditional(%parameter.2, %tuple.2, %multiply.13463), true_computation=branch_true, false_computation=branch_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
EXPECT_EQ(root->branch_computation(0)->instruction_count(), 6);
EXPECT_EQ(root->branch_computation(1)->instruction_count(), 8);
const HloInstruction* conditional_false =
root->branch_computation(1)->root_instruction();
EXPECT_THAT(
conditional_false,
op::Tuple(op::Multiply(
op::Constant(),
op::Power(op::Constant(), op::Floor(op::GetTupleElement())))));
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands7) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
%branch_true {
window.58 = bf16[1,23,768]{2,1,0} parameter(0)
ROOT collective-permute.29 = bf16[1,23,768]{2,1,0} collective-permute(window.58), channel_id=100, source_target_pairs={{0,1},{1,0}}
}
%branch_false {
ROOT window.59 = bf16[1,23,768]{2,1,0} parameter(0)
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation {
%parameter.0 = bf16[1,23,768]{2,1,0} parameter(0)
%parameter.1 = bf16[1,23,768]{2,1,0} parameter(1)
%parameter.2 = pred[] parameter(2)
add.244 = bf16[1,23,768]{2,1,0} add(parameter.0, parameter.1)
ROOT conditional.1 = bf16[1,23,768]{2,1,0} conditional(%parameter.2, %add.244, %add.244), true_computation=branch_true, false_computation=branch_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands8) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
%branch_true {
window.58 = bf16[1,23,768]{2,1,0} parameter(0)
ROOT collective-permute.29 = bf16[1,23,768]{2,1,0} collective-permute(window.58), channel_id=100, source_target_pairs={{0,1},{1,0}}
}
%branch_false {
ROOT window.59 = bf16[1,23,768]{2,1,0} parameter(0)
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation {
%parameter.0 = bf16[1,23,768]{2,1,0} parameter(0)
%parameter.1 = bf16[1,23,768]{2,1,0} parameter(1)
%parameter.2 = pred[] parameter(2)
add.244 = bf16[1,23,768]{2,1,0} add(parameter.0, parameter.1)
ROOT conditional.1 = bf16[1,23,768]{2,1,0} conditional(%parameter.2, %parameter.0, %add.244), true_computation=branch_true, false_computation=branch_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
VLOG(2) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
EXPECT_EQ(root->branch_computation(0)->instruction_count(), 2);
EXPECT_EQ(root->branch_computation(1)->instruction_count(), 4);
const HloInstruction* conditional_false =
root->branch_computation(1)->root_instruction();
EXPECT_THAT(conditional_false,
op::Add(op::GetTupleElement(), op::GetTupleElement()));
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands9) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
%branch_true {
ROOT tmp = ((f32[], f32[])) parameter(0)
}
%branch_false {
ROOT tmp = ((f32[], f32[])) parameter(0)
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation {
%parameter.0 = f32[] parameter(0)
%parameter.1 = f32[] parameter(1)
%parameter.2 = pred[] parameter(2)
%constant.13862 = f32[] constant(0.00025)
%constant.13863 = f32[] constant(0.97)
%add.0 = f32[] add(parameter.1, parameter.1)
%floor.145 = f32[]{:T(256)} floor(f32[]{:T(256)} %parameter.0)
%power.1 = f32[] power(f32[] %constant.13863, f32[]{:T(256)} %floor.145)
%multiply.13463 = f32[] multiply(f32[] %parameter.1, f32[] %power.1)
%multiply.13464 = f32[] multiply(f32[] %parameter.0, f32[] %multiply.13463)
%tuple.1 = (f32[], f32[]) tuple(add.0, add.0)
%tuple.2 = ((f32[], f32[])) tuple(%tuple.1)
%tuple.3 = (f32[], f32[]) tuple(multiply.13463, multiply.13464)
%tuple.4 = ((f32[], f32[])) tuple(tuple.3)
ROOT conditional.1 = ((f32[], f32[])) conditional(%parameter.2, %tuple.2, %tuple.4), true_computation=branch_true, false_computation=branch_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
const HloInstruction* conditional_false =
root->branch_computation(1)->root_instruction();
const HloInstruction* conditional_true =
root->branch_computation(0)->root_instruction();
EXPECT_THAT(conditional_false->shape().tuple_shapes_size(), 1);
EXPECT_THAT(conditional_false->shape().tuple_shapes(0).tuple_shapes_size(),
2);
EXPECT_THAT(conditional_true->shape().tuple_shapes_size(), 1);
EXPECT_THAT(conditional_true->shape().tuple_shapes(0).tuple_shapes_size(), 2);
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands10) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
%branch_true {
tmp = ((f32[], f32[])) parameter(0)
tmp1 = (f32[], f32[]) get-tuple-element(tmp), index=0
tmp2 = f32[] get-tuple-element(tmp1), index=0
tmp3 = f32[] get-tuple-element(tmp1), index=1
add = f32[] add(tmp2, tmp3)
ROOT tuple = (f32[], (f32[], f32[])) tuple(add, tmp1)
}
%branch_false {
tmp = ((f32[], f32[])) parameter(0)
tmp1 = (f32[], f32[]) get-tuple-element(tmp), index=0
tmp2 = f32[] get-tuple-element(tmp1), index=0
ROOT tuple = (f32[], (f32[], f32[])) tuple(tmp2, tmp1)
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation {
%parameter.0 = f32[] parameter(0)
%parameter.1 = f32[] parameter(1)
%parameter.2 = pred[] parameter(2)
%constant.13862 = f32[] constant(0.00025)
%constant.13863 = f32[] constant(0.97)
%add.0 = f32[] add(parameter.1, parameter.1)
%floor.145 = f32[]{:T(256)} floor(f32[]{:T(256)} %parameter.0)
%power.1 = f32[] power(f32[] %constant.13863, f32[]{:T(256)} %floor.145)
%multiply.13463 = f32[] multiply(f32[] %parameter.1, f32[] %power.1)
%multiply.13464 = f32[] multiply(f32[] %parameter.0, f32[] %multiply.13463)
%tuple.1 = (f32[], f32[]) tuple(add.0, add.0)
%tuple.2 = ((f32[], f32[])) tuple(%tuple.1)
%tuple.3 = (f32[], f32[]) tuple(multiply.13463, multiply.13464)
%tuple.4 = ((f32[], f32[])) tuple(tuple.3)
ROOT conditional.1 = (f32[], (f32[], f32[])) conditional(%parameter.2, %tuple.2, %tuple.4), true_computation=branch_true, false_computation=branch_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
const HloInstruction* conditional_false =
root->branch_computation(1)->root_instruction();
const HloInstruction* conditional_true =
root->branch_computation(0)->root_instruction();
EXPECT_THAT(conditional_false->shape().tuple_shapes_size(), 2);
EXPECT_THAT(conditional_false->shape().tuple_shapes(1).tuple_shapes_size(),
2);
EXPECT_THAT(conditional_true->shape().tuple_shapes_size(), 2);
EXPECT_THAT(conditional_true->shape().tuple_shapes(1).tuple_shapes_size(), 2);
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands11) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
region_2.494 {
Arg_.495 = (u32[], u32[]) parameter(0)
get-tuple-element = u32[] get-tuple-element(Arg_.495), index=1, metadata={op_type="Less" op_name="cond_1/Less"}
bitcast-convert = s32[] bitcast-convert(get-tuple-element), metadata={op_type="Less" op_name="cond_1/Less"}
constant.172 = s32[] constant(0), metadata={op_type="Less" op_name="cond_1/Less"}
compare = pred[] compare(bitcast-convert, constant.172), direction=LT, metadata={op_type="Less" op_name="cond_1/Less"}
constant.1 = u32[] constant(0)
compare.1 = pred[] compare(get-tuple-element, constant.1), direction=EQ, metadata={op_type="Less" op_name="cond_1/Less"}
get-tuple-element.2 = u32[] get-tuple-element(Arg_.495), index=0, metadata={op_type="Less" op_name="cond_1/Less"}
constant = u32[] constant(25000), metadata={op_type="Less" op_name="cond_1/Less"}
compare.2 = pred[] compare(get-tuple-element.2, constant), direction=LT, metadata={op_type="Less" op_name="cond_1/Less"}
and = pred[] and(compare.1, compare.2), metadata={op_type="Less" op_name="cond_1/Less"}
or = pred[] or(compare, and), metadata={op_type="Less" op_name="cond_1/Less"}
ROOT tuple.1 = (pred[]) tuple(or)
}
region_3.498 {
Arg_.499 = pred[] parameter(0)
ROOT tuple.2 = (pred[]) tuple(Arg_.499)
}
ENTRY %xla_computation {
custom-call = u32[]{:T(256)} parameter(0)
bitcast-convert.31 = s32[]{:T(256)} parameter(1)
constant.202 = s32[]{:T(256)} parameter(2)
constant.21 = u32[]{:T(256)} parameter(3)
custom-call.1 = u32[]{:T(256)} parameter(4)
compare.38 = pred[]{:T(256)} compare(bitcast-convert.31, constant.202), direction=GT, metadata={op_type="GreaterEqual" op_name="GreaterEqual"}
compare.39 = pred[]{:T(256)} compare(custom-call, constant.21), direction=EQ, metadata={op_type="GreaterEqual" op_name="GreaterEqual"}
or.17 = pred[]{:T(256)} or(compare.38, compare.39), metadata={op_type="GreaterEqual" op_name="GreaterEqual"}
tuple.20 = (u32[]{:T(256)}, u32[]{:T(256)}) tuple(custom-call.1, custom-call), sharding={{maximal device=0}, {maximal device=0}}
ROOT conditional = (pred[]) conditional(or.17, tuple.20, or.17), true_computation=region_2.494, false_computation=region_3.498, metadata={op_type="If" op_name="cond_1"}
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional(op::Or(), op::Tuple(), op::Or()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_code_motion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_code_motion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fd8bcab4-0896-4547-adfa-bf93fb8d2f73 | cpp | tensorflow/tensorflow | log_softmax | tensorflow/compiler/tf2tensorrt/convert/ops/log_softmax.cc | tensorflow/lite/kernels/log_softmax_test.cc | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
class ConvertLogSoftmax : public OpConverterBase<ConvertLogSoftmax> {
public:
explicit ConvertLogSoftmax(const OpConverterParams *params)
: OpConverterBase<ConvertLogSoftmax>(params) {}
static constexpr std::array<InputArgSpec, 1> InputSpec() {
return std::array<InputArgSpec, 1>{
InputArgSpec::Create("logits", TrtInputArg::kTensor)};
}
Status Validate() {
const auto ¶ms = *this->params_;
const auto &inputs = params.inputs;
ITensorProxyPtr logits_tensor = inputs.at(0).tensor();
const int num_trt_dims = logits_tensor->getDimensions().nbDims;
if (!num_trt_dims && params.use_implicit_batch) {
return errors::InvalidArgument(
"TensorRT LogSoftmax cannot apply on the batch dimension");
}
return OkStatus();
}
Status Convert() {
const auto ¶ms = *this->params_;
const auto &inputs = params.inputs;
const auto &node_def = params.node_def;
ITensorProxyPtr logits_tensor = inputs.at(0).tensor();
const int num_trt_dims = logits_tensor->getDimensions().nbDims;
nvinfer1::IUnaryLayer *exp = params.converter->network()->addUnary(
*logits_tensor->trt_tensor(), nvinfer1::UnaryOperation::kEXP);
TFTRT_RETURN_ERROR_IF_NULLPTR(exp, node_def.name());
params.converter->SetLayerName(exp, node_def, "exp");
nvinfer1::IReduceLayer *reduced_sum =
params.converter->network()->addReduce(
*exp->getOutput(0), nvinfer1::ReduceOperation::kSUM,
(1 << (num_trt_dims - 1)),
true );
params.converter->SetLayerName(reduced_sum, node_def, "reduced_sum");
nvinfer1::IUnaryLayer *log_reduced_sum =
params.converter->network()->addUnary(*reduced_sum->getOutput(0),
nvinfer1::UnaryOperation::kLOG);
TFTRT_RETURN_ERROR_IF_NULLPTR(log_reduced_sum, node_def.name());
params.converter->SetLayerName(log_reduced_sum, node_def,
"log_reduced_sum");
nvinfer1::IElementWiseLayer *sub =
params.converter->network()->addElementWise(
*logits_tensor->trt_tensor(), *log_reduced_sum->getOutput(0),
nvinfer1::ElementWiseOperation::kSUB);
TFTRT_RETURN_ERROR_IF_NULLPTR(sub, node_def.name());
params.converter->SetLayerName(sub, node_def, "sub");
params.outputs->push_back(TRT_TensorOrWeights(sub->getOutput(0)));
return OkStatus();
}
};
REGISTER_DEFAULT_TRT_OP_CONVERTER(MakeConverterFunction<ConvertLogSoftmax>(),
"LogSoftmax");
}
}
}
#endif | #include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
class LogSoftmaxOpModel : public SingleOpModel {
public:
LogSoftmaxOpModel(int batches, int size)
: batches_(batches), input_size_(size) {
input_ = AddInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_LOG_SOFTMAX, BuiltinOptions_LogSoftmaxOptions,
CreateLogSoftmaxOptions(builder_).Union());
BuildInterpreter({{batches_, input_size_}});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
void SetInput(int offset, float* begin, float* end) {
PopulateTensor(input_, offset, begin, end);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
private:
int input_;
int output_;
int batches_;
int input_size_;
};
TEST(LogSoftmaxOpTest, SimpleTest) {
LogSoftmaxOpModel m(2, 5);
m.SetInput({
1.0, 2.0, 3.0, 4.0, 5.0,
-1.0, -2.0, -3.0, -4.0, -5.0,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-4.45191431, -3.45191431, -2.45191431, -1.45191443, -0.4519144,
-0.4519144, -1.45191443, -2.45191431, -3.45191431, -4.45191431},
1e-6)));
}
TEST(LogSoftmaxOpTest, CompareWithTFmini) {
const int batch_size = 2;
const int input_size = 5;
static float input_buffer[] = {
1.0, 2.0, 3.0, 4.0, 5.0,
-1.0, -2.0, -3.0, -4.0, -5.0,
};
LogSoftmaxOpModel m(batch_size, input_size);
m.SetInput(0, input_buffer, input_buffer + input_size * batch_size);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::unique_ptr<float[]> output_buffer(new float[input_size * batch_size]);
auto input_shape = RuntimeShape({batch_size, 1, 1, input_size});
SoftmaxParams params;
tflite::reference_ops::LogSoftmax(params, input_shape, input_buffer,
input_shape, output_buffer.get());
std::vector<float> expected;
expected.insert(expected.end(), output_buffer.get(),
output_buffer.get() + input_size * batch_size);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(expected, 1e-6)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/ops/log_softmax.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/log_softmax_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
38fbeaea-f8df-4f7a-a655-4f468d1949c7 | cpp | tensorflow/tensorflow | target_util | third_party/xla/xla/service/gpu/target_util.cc | third_party/xla/xla/service/gpu/target_util_test.cc | #include "xla/service/gpu/target_util.h"
#include <functional>
#include <string>
#include <variant>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/FPEnv.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include "llvm/TargetParser/Triple.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/llvm_ir/llvm_type_conversion_util.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
namespace {
using absl::StrCat;
struct TargetIntrinsics {
llvm::Intrinsic::ID nvptx_intrinsic;
std::variant<llvm::Intrinsic::ID,
std::function<llvm::CallInst*(llvm::IRBuilder<>*)>>
amdgpu_intrinsic_or_function;
std::variant<llvm::Intrinsic::ID,
std::function<llvm::CallInst*(llvm::IRBuilder<>*)>>
spir_intrinsic_or_function;
};
struct TargetIntrinsics GetIntrinsic(TargetIntrinsicID intrin) {
switch (intrin) {
case TargetIntrinsicID::kThreadIdx: {
return {
llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x,
llvm::Intrinsic::amdgcn_workitem_id_x,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z32__spirv_BuiltInLocalInvocationIdi", {b_->getInt32(0)},
{U32}, U64, {b_->getContext()}, b_);
},
};
}
case TargetIntrinsicID::kThreadIdy: {
return {
llvm::Intrinsic::nvvm_read_ptx_sreg_tid_y,
llvm::Intrinsic::amdgcn_workitem_id_y,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z32__spirv_BuiltInLocalInvocationIdi", {b_->getInt32(1)},
{U32}, U64, {b_->getContext()}, b_);
},
};
}
case TargetIntrinsicID::kThreadIdz: {
return {
llvm::Intrinsic::nvvm_read_ptx_sreg_tid_z,
llvm::Intrinsic::amdgcn_workitem_id_z,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z32__spirv_BuiltInLocalInvocationIdi", {b_->getInt32(2)},
{U32}, U64, {b_->getContext()}, b_);
},
};
}
case TargetIntrinsicID::kBlockIdx: {
return {
llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_x,
llvm::Intrinsic::amdgcn_workgroup_id_x,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall("_Z26__spirv_BuiltInWorkgroupIdi",
{b_->getInt32(0)}, {U32}, U64,
{b_->getContext()}, b_);
},
};
}
case TargetIntrinsicID::kBlockIdy: {
return {
llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_y,
llvm::Intrinsic::amdgcn_workgroup_id_y,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall("_Z26__spirv_BuiltInWorkgroupIdi",
{b_->getInt32(1)}, {U32}, U64,
{b_->getContext()}, b_);
},
};
}
case TargetIntrinsicID::kBlockIdz: {
return {
llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_z,
llvm::Intrinsic::amdgcn_workgroup_id_z,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall("_Z26__spirv_BuiltInWorkgroupIdi",
{b_->getInt32(2)}, {U32}, U64,
{b_->getContext()}, b_);
},
};
}
case TargetIntrinsicID::kBarrierId: {
return {llvm::Intrinsic::nvvm_barrier0, llvm::Intrinsic::amdgcn_s_barrier,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z22__spirv_ControlBarrierjjj",
{b_->getInt32(2), b_->getInt32(2), b_->getInt32(272)},
{U32, U32, U32}, U32,
llvm::AttrBuilder(b_->getContext())
.addAttribute(llvm::Attribute::Convergent),
b_);
}};
}
case TargetIntrinsicID::kBlockDimx: {
return {llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall("__ockl_get_local_size",
{b_->getInt32(0)}, {U32}, U64,
{b_->getContext()}, b_);
},
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z28__spirv_BuiltInWorkgroupSizei", {b_->getInt32(0)},
{U32}, U64, {b_->getContext()}, b_);
}};
}
case TargetIntrinsicID::kBlockDimy: {
return {llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_y,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall("__ockl_get_local_size",
{b_->getInt32(1)}, {U32}, U64,
{b_->getContext()}, b_);
},
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z28__spirv_BuiltInWorkgroupSizei", {b_->getInt32(1)},
{U32}, U64, {b_->getContext()}, b_);
}};
}
case TargetIntrinsicID::kBlockDimz: {
return {llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_z,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall("__ockl_get_local_size",
{b_->getInt32(2)}, {U32}, U64,
{b_->getContext()}, b_);
},
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z28__spirv_BuiltInWorkgroupSizei", {b_->getInt32(2)},
{U32}, U64, {b_->getContext()}, b_);
}};
}
case TargetIntrinsicID::kGroupBarrierId: {
return {llvm::Intrinsic::nvvm_bar_warp_sync,
llvm::Intrinsic::amdgcn_wave_barrier,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z22__spirv_ControlBarrierjjj",
{b_->getInt32(2), b_->getInt32(2), b_->getInt32(272)},
{U32, U32, U32}, U32,
llvm::AttrBuilder(b_->getContext())
.addAttribute(llvm::Attribute::Convergent),
b_);
}};
}
}
}
struct TargetDeviceFunction {
const std::string nvptx_root;
const std::string amdgpu_root;
const std::string spir_root;
};
struct TargetDeviceFunction GetDeviceFunctionRoot(
TargetDeviceFunctionID func_id) {
switch (func_id) {
case TargetDeviceFunctionID::kAtan2: {
return {"__nv_atan2", "__ocml_atan2", "_Z17__spirv_ocl_atan2"};
}
case TargetDeviceFunctionID::kCos: {
return {"__nv_cos", "__ocml_cos", "_Z15__spirv_ocl_cos"};
}
case TargetDeviceFunctionID::kErf: {
return {"__nv_erf", "__ocml_erf", "_Z15__spirv_ocl_erf"};
}
case TargetDeviceFunctionID::kExp: {
return {"__nv_exp", "__ocml_exp", "_Z15__spirv_ocl_exp"};
}
case TargetDeviceFunctionID::kExpm1: {
return {"__nv_expm1", "__ocml_expm1", "_Z17__spirv_ocl_expm1"};
}
case TargetDeviceFunctionID::kFmod: {
return {"__nv_fmod", "__ocml_fmod", "_Z16__spirv_ocl_fmod"};
}
case TargetDeviceFunctionID::kHypot: {
return {"__nv_hypot", "__ocml_hypot", "_Z17__spirv_ocl_hypot"};
}
case TargetDeviceFunctionID::kLog: {
return {"__nv_log", "__ocml_log", "_Z15__spirv_ocl_log"};
}
case TargetDeviceFunctionID::kLog1p: {
return {"__nv_log1p", "__ocml_log1p", "_Z17__spirv_ocl_log1p"};
}
case TargetDeviceFunctionID::kPow: {
return {"__nv_pow", "__ocml_pow", "_Z15__spirv_ocl_pow"};
}
case TargetDeviceFunctionID::kRsqrt: {
return {"__nv_rsqrt", "__ocml_rsqrt", "_Z17__spirv_ocl_rsqrt"};
}
case TargetDeviceFunctionID::kSin: {
return {"__nv_sin", "__ocml_sin", "_Z15__spirv_ocl_sin"};
}
case TargetDeviceFunctionID::kSqrt: {
return {"__nv_sqrt", "__ocml_sqrt", "_Z16__spirv_ocl_sqrt"};
}
case TargetDeviceFunctionID::kTan: {
return {"__nv_tan", "__ocml_tan", "_Z15__spirv_ocl_tan"};
}
case TargetDeviceFunctionID::kTanh: {
return {"__nv_tanh", "__ocml_tanh", "_Z16__spirv_ocl_tanh"};
}
case TargetDeviceFunctionID::kCbrt: {
return {"__nv_cbrt", "__ocml_cbrt", "_Z16__spirv_ocl_cbrt"};
}
}
}
}
absl::StatusOr<TargetDeviceFunctionID> GetTargetDeviceFunctionID(HloOpcode op) {
switch (op) {
case HloOpcode::kAtan2:
return TargetDeviceFunctionID::kAtan2;
case HloOpcode::kCos:
return TargetDeviceFunctionID::kCos;
case HloOpcode::kExp:
return TargetDeviceFunctionID::kExp;
case HloOpcode::kErf:
return TargetDeviceFunctionID::kErf;
case HloOpcode::kExpm1:
return TargetDeviceFunctionID::kExpm1;
case HloOpcode::kLog:
return TargetDeviceFunctionID::kLog;
case HloOpcode::kLog1p:
return TargetDeviceFunctionID::kLog1p;
case HloOpcode::kPower:
return TargetDeviceFunctionID::kPow;
case HloOpcode::kRemainder:
return TargetDeviceFunctionID::kFmod;
case HloOpcode::kRsqrt:
return TargetDeviceFunctionID::kRsqrt;
case HloOpcode::kSin:
return TargetDeviceFunctionID::kSin;
case HloOpcode::kSqrt:
return TargetDeviceFunctionID::kSqrt;
case HloOpcode::kTan:
return TargetDeviceFunctionID::kTan;
case HloOpcode::kTanh:
return TargetDeviceFunctionID::kTanh;
case HloOpcode::kCbrt:
return TargetDeviceFunctionID::kCbrt;
default:
break;
}
return NotFound("The HLO opcode %s is not mapped to a device function",
HloOpcodeString(op));
}
std::string ObtainDeviceFunctionName(TargetDeviceFunctionID func_id,
PrimitiveType output_type,
llvm::Triple target_triple) {
struct TargetDeviceFunction gpu_root_names = GetDeviceFunctionRoot(func_id);
if (target_triple.isNVPTX()) {
if (output_type == F32) {
return StrCat(gpu_root_names.nvptx_root, "f");
} else if (output_type == F64) {
return gpu_root_names.nvptx_root;
} else {
LOG(FATAL) << "Unexpected type while getting device function name: "
<< primitive_util::LowercasePrimitiveTypeName(output_type);
}
} else if (target_triple.getArch() == llvm::Triple::amdgcn) {
if (output_type == F32) {
return StrCat(gpu_root_names.amdgpu_root, "_f32");
} else if (output_type == F64) {
return StrCat(gpu_root_names.amdgpu_root, "_f64");
} else {
LOG(FATAL) << "Unexpected type while getting device function name.";
}
} else if (target_triple.isSPIR()) {
if (output_type == F32) {
if (gpu_root_names.spir_root == "_Z17__spirv_ocl_hypot" ||
gpu_root_names.spir_root == "_Z15__spirv_ocl_pow" ||
gpu_root_names.spir_root == "_Z17__spirv_ocl_atan2" ||
gpu_root_names.spir_root == "_Z16__spirv_ocl_fmod") {
return StrCat(gpu_root_names.spir_root, "ff");
} else {
return StrCat(gpu_root_names.spir_root, "f");
}
} else if (output_type == F64) {
if (gpu_root_names.spir_root == "_Z17__spirv_ocl_hypot" ||
gpu_root_names.spir_root == "_Z15__spirv_ocl_pow" ||
gpu_root_names.spir_root == "_Z17__spirv_ocl_atan2" ||
gpu_root_names.spir_root == "_Z16__spirv_ocl_fmod") {
return StrCat(gpu_root_names.spir_root, "dd");
} else {
return StrCat(gpu_root_names.spir_root, "d");
}
} else {
LOG(FATAL) << "Unexpected type while getting device function name.";
}
} else {
LOG(FATAL) << "Invalid triple " << target_triple.str();
}
}
llvm::CallInst* EmitDeviceFunctionCall(
const std::string& callee_name, absl::Span<llvm::Value* const> operands,
absl::Span<const PrimitiveType> input_types, PrimitiveType output_type,
const llvm::AttrBuilder& attributes, llvm::IRBuilder<>* b,
absl::string_view name) {
std::vector<llvm::Type*> ir_input_types;
llvm::Module* module = b->GetInsertBlock()->getModule();
llvm::Triple target_triple = llvm::Triple(module->getTargetTriple());
for (PrimitiveType input_type : input_types) {
ir_input_types.push_back(
llvm_ir::PrimitiveTypeToIrType(input_type, module));
}
llvm::FunctionType* callee_type = llvm::FunctionType::get(
llvm_ir::PrimitiveTypeToIrType(output_type, module),
ir_input_types,
false);
llvm::Function* callee = llvm::dyn_cast<llvm::Function>(
b->GetInsertBlock()
->getModule()
->getOrInsertFunction(callee_name, callee_type)
.getCallee());
callee->addFnAttrs(attributes);
if (target_triple.isSPIR())
callee->setCallingConv(llvm::CallingConv::SPIR_FUNC);
return b->CreateCall(callee, llvm_ir::AsArrayRef(operands), name.data());
}
llvm::CallInst* EmitCallToTargetIntrinsic(
TargetIntrinsicID intrinsic_id, absl::Span<llvm::Value* const> operands,
absl::Span<llvm::Type* const> overloaded_types, llvm::IRBuilder<>* b) {
llvm::Module* module = b->GetInsertBlock()->getModule();
struct TargetIntrinsics gpu_intrinsic_id = GetIntrinsic(intrinsic_id);
llvm::Triple target_triple = llvm::Triple(module->getTargetTriple());
llvm::Intrinsic::ID llvm_intrinsic_id = llvm::Intrinsic::not_intrinsic;
if (target_triple.isNVPTX()) {
llvm_intrinsic_id = gpu_intrinsic_id.nvptx_intrinsic;
} else if (target_triple.getArch() == llvm::Triple::amdgcn) {
llvm::Intrinsic::ID* llvm_intrinsic_id_ptr =
std::get_if<llvm::Intrinsic::ID>(
&gpu_intrinsic_id.amdgpu_intrinsic_or_function);
if (llvm_intrinsic_id_ptr) {
llvm_intrinsic_id = *llvm_intrinsic_id_ptr;
} else {
std::function<llvm::CallInst*(llvm::IRBuilder<>*)>* builder_func =
std::get_if<std::function<llvm::CallInst*(llvm::IRBuilder<>*)>>(
&gpu_intrinsic_id.amdgpu_intrinsic_or_function);
return (*builder_func)(b);
}
} else if (target_triple.isSPIR()) {
llvm::Intrinsic::ID* llvm_intrinsic_id_ptr =
std::get_if<llvm::Intrinsic::ID>(
&gpu_intrinsic_id.spir_intrinsic_or_function);
if (llvm_intrinsic_id_ptr) {
llvm_intrinsic_id = *llvm_intrinsic_id_ptr;
} else {
std::function<llvm::CallInst*(llvm::IRBuilder<>*)>* builder_func =
std::get_if<std::function<llvm::CallInst*(llvm::IRBuilder<>*)>>(
&gpu_intrinsic_id.spir_intrinsic_or_function);
return (*builder_func)(b);
}
} else {
LOG(FATAL) << "Invalid triple " << target_triple.str();
}
llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration(
module, llvm_intrinsic_id, llvm_ir::AsArrayRef(overloaded_types));
return b->CreateCall(intrinsic, llvm_ir::AsArrayRef(operands));
}
void AnnotateFunctionAsGpuKernel(llvm::Module* module, llvm::Function* func,
llvm::IRBuilder<>* b) {
llvm::Triple target_triple = llvm::Triple(module->getTargetTriple());
if (target_triple.isNVPTX()) {
llvm::LLVMContext& context = module->getContext();
llvm::NamedMDNode* nvvm_annotations_node =
module->getOrInsertNamedMetadata("nvvm.annotations");
nvvm_annotations_node->addOperand(llvm::MDNode::get(
context, {llvm::ConstantAsMetadata::get(func),
llvm::MDString::get(context, "kernel"),
llvm::ConstantAsMetadata::get(b->getInt32(1))}));
} else if (target_triple.getArch() == llvm::Triple::amdgcn) {
func->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
func->addFnAttr("amdgpu-flat-work-group-size", "1, 1024");
} else if (target_triple.isSPIR()) {
func->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
} else {
LOG(FATAL) << "Invalid triple " << target_triple.str();
}
}
}
} | #include "xla/service/gpu/target_util.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Support/raw_ostream.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
class TargetUtilTest : public testing::Test {
public:
TargetUtilTest() : module_("test", ctx_), builder_(ctx_) {}
protected:
void SetUp() override {
auto fn = llvm::Function::Create(
llvm::FunctionType::get(llvm::Type::getVoidTy(ctx_), {}),
llvm::Function::LinkageTypes::ExternalLinkage, "fn", module_);
auto block = llvm::BasicBlock::Create(ctx_, "blk", fn);
builder_.SetInsertPoint(block);
}
llvm::LLVMContext ctx_;
llvm::Module module_;
llvm::IRBuilder<> builder_;
};
TEST_F(TargetUtilTest, NVPTXGroupBarrier) {
module_.setTargetTriple("nvptx");
EmitCallToTargetIntrinsic(TargetIntrinsicID::kGroupBarrierId,
{builder_.getInt32(-1)}, {},
&builder_);
builder_.CreateRetVoid();
EXPECT_FALSE(llvm::verifyModule(module_, &llvm::errs()));
}
TEST_F(TargetUtilTest, AMDGCNGroupBarrier) {
module_.setTargetTriple("amdgcn");
EmitCallToTargetIntrinsic(TargetIntrinsicID::kGroupBarrierId, {}, {},
&builder_);
builder_.CreateRetVoid();
EXPECT_FALSE(llvm::verifyModule(module_, &llvm::errs()));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/target_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/target_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
10c4bede-30e8-4734-9dc4-15868981b86b | cpp | tensorflow/tensorflow | async_collective_creator | third_party/xla/xla/service/async_collective_creator.cc | third_party/xla/xla/service/async_collective_creator_test.cc | #include "xla/service/async_collective_creator.h"
#include <cstdint>
#include <iterator>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/frontend_attributes.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
struct ReplacedAsync {
HloInstruction* start;
HloInstruction* done;
};
absl::StatusOr<ReplacedAsync> CreateAsyncAllReduce(
HloInstruction* instruction) {
HloComputation* computation = instruction->parent();
auto* ar = Cast<HloAllReduceInstruction>(instruction);
HloInstruction* start =
computation->AddInstruction(HloInstruction::CreateAllReduceStart(
ar->shape(), ar->operands(), ar->to_apply(), ar->device_list(),
ar->constrain_layout(), ar->channel_id(),
ar->use_global_device_ids()));
HloInstruction* done =
computation->AddInstruction(HloInstruction::CreateUnary(
ar->shape(), HloOpcode::kAllReduceDone, start));
return ReplacedAsync{start, done};
}
absl::StatusOr<ReplacedAsync> CreateAsyncAllGather(
HloInstruction* instruction) {
HloComputation* computation = instruction->parent();
auto* ag = Cast<HloAllGatherInstruction>(instruction);
std::vector<const Shape*> operand_shapes;
operand_shapes.reserve(ag->operand_count());
for (const HloInstruction* op : ag->operands()) {
operand_shapes.push_back(&op->shape());
}
Shape shape = ShapeUtil::MakeTupleShape(
{ag->operand_count() > 1
? ShapeUtil::MakeTupleShapeWithPtrs(operand_shapes)
: *operand_shapes[0],
ag->shape()});
HloInstruction* start =
computation->AddInstruction(HloInstruction::CreateAllGatherStart(
shape, ag->operands(), ag->all_gather_dimension(), ag->device_list(),
ag->constrain_layout(), ag->channel_id(),
ag->use_global_device_ids()));
HloInstruction* done =
computation->AddInstruction(HloInstruction::CreateUnary(
ag->shape(), HloOpcode::kAllGatherDone, start));
return ReplacedAsync{start, done};
}
absl::StatusOr<ReplacedAsync> CreateAsyncCollectivePermute(
HloInstruction* instruction, absl::Span<const Shape> context_shapes) {
HloComputation* computation = instruction->parent();
auto* cp = Cast<HloCollectivePermuteInstruction>(instruction);
HloInstruction* start;
HloInstruction* operand = cp->mutable_operand(0);
if (cp->operand_count() == 1) {
start = computation->AddInstruction(
HloInstruction::CreateCollectivePermuteStart(
ShapeInference::InferCollectivePermuteStartShape(
{&operand->shape()}, context_shapes)
.value(),
operand, cp->source_target_pairs(), cp->channel_id()));
} else {
CHECK_EQ(cp->operand_count(), 4);
std::vector<const Shape*> operand_shapes;
absl::c_transform(
cp->operands(), std::back_inserter(operand_shapes),
[](const HloInstruction* operand) { return &(operand->shape()); });
start = computation->AddInstruction(
HloInstruction::CreateCollectivePermuteStart(
ShapeInference::InferCollectivePermuteStartShape(operand_shapes,
context_shapes)
.value(),
operand, cp->mutable_operand(1), cp->mutable_operand(2),
cp->mutable_operand(3), cp->source_target_pairs(),
cp->dynamic_slice_sizes_list(), cp->channel_id()));
if (HasDisjointReadWriteRegionsAttr(cp)) {
SetDisjointReadWriteRegionsAttr(start);
}
}
HloInstruction* done =
computation->AddInstruction(HloInstruction::CreateUnary(
cp->shape(), HloOpcode::kCollectivePermuteDone, start));
return ReplacedAsync{start, done};
}
absl::StatusOr<ReplacedAsync> CreateAsyncStartDone(
HloInstruction* instruction, absl::Span<const Shape> context_shapes) {
HloComputation* computation = instruction->parent();
TF_ASSIGN_OR_RETURN(
HloInstruction * done,
computation->CreateAsyncInstructions(instruction, context_shapes,
HloInstruction::kMainExecutionThread,
false));
HloInstruction* start = done->mutable_operand(0);
return ReplacedAsync{start, done};
}
int64_t GetShapeSize(const Shape& shape) {
int64_t size_in_bytes = 0;
if (shape.IsTuple()) {
for (int64_t i = 0; i < shape.tuple_shapes_size(); ++i) {
size_in_bytes += GetShapeSize(shape.tuple_shapes(i));
}
return size_in_bytes;
}
return ShapeUtil::ByteSizeOfElements(shape);
}
}
std::vector<HloInstruction*> AsyncCollectiveCreator::MatchCollectives(
HloComputation* computation) {
std::vector<HloInstruction*> supported_collectives;
for (HloInstruction* instruction : computation->instructions()) {
const HloOpcode op = instruction->opcode();
if ((op == HloOpcode::kAllReduce &&
config_.convert_all_reduce(instruction) &&
GetShapeSize(instruction->shape()) >=
config_.all_reduce_min_threshold_in_bytes) ||
(op == HloOpcode::kAllGather &&
config_.convert_all_gather(instruction)) ||
(op == HloOpcode::kCollectiveBroadcast &&
config_.convert_collective_broadcast(instruction)) ||
(op == HloOpcode::kCollectivePermute &&
config_.convert_collective_permute(instruction)) ||
(op == HloOpcode::kAllToAll &&
config_.convert_all_to_all(instruction)) ||
(op == HloOpcode::kReduceScatter &&
config_.convert_reduce_scatter(instruction))) {
supported_collectives.push_back(instruction);
}
}
return supported_collectives;
}
absl::StatusOr<bool> AsyncCollectiveCreator::ReplaceCollectives(
HloComputation* computation,
std::vector<HloInstruction*>& supported_collectives) {
bool changed = false;
HloModule* module = computation->parent();
absl::flat_hash_map<HloInstruction*, ReplacedAsync> replaced_pairs;
const bool should_update_schedule =
module->has_schedule() &&
module->schedule().is_computation_scheduled(computation);
for (HloInstruction* instruction : supported_collectives) {
absl::StatusOr<ReplacedAsync> async_pair;
switch (instruction->opcode()) {
case HloOpcode::kAllReduce:
async_pair = CreateAsyncAllReduce(instruction);
break;
case HloOpcode::kAllGather:
async_pair = CreateAsyncAllGather(instruction);
break;
case HloOpcode::kCollectivePermute:
async_pair = CreateAsyncCollectivePermute(
instruction, config_.get_context_shapes(instruction));
break;
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kAllToAll:
case HloOpcode::kReduceScatter:
async_pair = CreateAsyncStartDone(
instruction, config_.get_context_shapes(instruction));
break;
default:
return Internal("Unexpected opcode %s",
HloOpcodeString(instruction->opcode()));
}
TF_RETURN_IF_ERROR(async_pair.status());
async_pair->start->set_metadata(instruction->metadata());
async_pair->start->CopyBackendConfigFrom(instruction);
if (should_update_schedule) {
replaced_pairs[instruction] = *async_pair;
}
TF_RETURN_IF_ERROR(
instruction->CopyAllControlDepsTo(async_pair->start, async_pair->done));
TF_RETURN_IF_ERROR(instruction->DropAllControlDeps());
TF_RETURN_WITH_CONTEXT_IF_ERROR(
computation->ReplaceInstruction(instruction, async_pair->done),
"replacing ", instruction->ToShortString());
changed = true;
}
if (should_update_schedule) {
std::vector<HloInstruction*> new_sequence;
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
new_sequence.reserve(sequence.size() + replaced_pairs.size());
for (HloInstruction* instr : sequence.instructions()) {
auto it = replaced_pairs.find(instr);
if (it != replaced_pairs.end()) {
new_sequence.push_back(it->second.start);
new_sequence.push_back(it->second.done);
continue;
}
new_sequence.push_back(instr);
}
module->schedule().set_sequence(computation, new_sequence);
}
return changed;
}
absl::StatusOr<bool> AsyncCollectiveCreator::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
int64_t collectives_replaced = 0;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
std::vector<HloInstruction*> supported_collectives =
MatchCollectives(computation);
if (supported_collectives.empty()) {
continue;
}
TF_ASSIGN_OR_RETURN(bool comp_changed,
ReplaceCollectives(computation, supported_collectives));
collectives_replaced += supported_collectives.size();
changed |= comp_changed;
}
VLOG(1) << "Replaced " << collectives_replaced
<< " sync collectives with async versions.";
return changed;
}
} | #include "xla/service/async_collective_creator.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = ::xla::match;
using ::testing::NotNull;
using ::testing::SizeIs;
using AsyncAllReduceCreatorTest = HloTestBase;
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleAllReduce) {
constexpr absl::string_view hlo_string = R"(
HloModule test
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p0 = f32[1024] parameter(0)
ROOT ar = f32[1024] all-reduce(p0), to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_all_reduce = HloPredicateTrue;
config.all_reduce_min_threshold_in_bytes = 4096;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAllReduceDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAllReduceStart);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleAllGather) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
p0 = f32[1] parameter(0)
ROOT ag = f32[8] all-gather(p0), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_all_gather = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAllGatherDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAllGatherStart);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleCollectivePermute) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
%p0 = bf16[8]{0} parameter(0)
ROOT %collective-permute.1 = bf16[8]{0} collective-permute(bf16[8]{0} p0), source_target_pairs={{0,1},{1,2},{2,3}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_collective_permute = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kCollectivePermuteDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kCollectivePermuteStart);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleInPlaceCollectivePermute) {
std::string hlo_string = std::string(R"(
HloModule module
ENTRY %module_spmd () -> f32[4,4,128] {
%constant.8 = u32[] constant(0)
%constant.5 = u32[] constant(2)
%tuple.1 = (u32[], u32[], u32[]) tuple(u32[] %constant.8, u32[] %constant.8, u32[] %constant.8)
%tuple = (u32[], u32[], u32[]) tuple(u32[] %constant.5, u32[] %constant.8, u32[] %constant.8)
%custom-call = f32[4,4,128]{2,1,0:T(4,128)} custom-call(), custom_call_target="SomeCustomCall"
ROOT %collective-permute = f32[4,4,128]{2,1,0:T(4,128)} collective-permute(f32[4,4,128]{2,1,0:T(4,128)} %custom-call, f32[4,4,128]{2,1,0:T(4,128)} %custom-call, (u32[], u32[], u32[]) %tuple, (u32[], u32[], u32[]) %tuple.1), channel_id=958, source_target_pairs={{0,4},{4,0},{1,5},{5,1},{2,6},{6,2},{3,7},{7,3}}, slice_sizes={{2,4,128}}
}
)");
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_collective_permute = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 7);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kCollectivePermuteDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kCollectivePermuteStart);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleCollectivePermuteScheduled) {
constexpr absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
ENTRY entry {
%p0 = bf16[8]{0} parameter(0)
ROOT %collective-permute.1 = bf16[8]{0} collective-permute(bf16[8]{0} p0), source_target_pairs={{0,1},{1,2},{2,3}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
const int64_t original_instr_sequence_size =
hlo_module->schedule().sequence(hlo_module->entry_computation()).size();
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_collective_permute = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kCollectivePermuteDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kCollectivePermuteStart);
EXPECT_EQ(
hlo_module->schedule().sequence(hlo_module->entry_computation()).size(),
original_instr_sequence_size + 1);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleCollectiveBroadcast) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
p0 = f32[8,16] parameter(0)
ROOT cb = f32[8,16] collective-broadcast(p0), replica_groups={{7,0,1,2,3,4,5,6}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_collective_broadcast = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAsyncDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAsyncStart);
ASSERT_THAT(start->async_wrapped_instruction(), NotNull());
EXPECT_THAT(start->async_wrapped_opcode(), HloOpcode::kCollectiveBroadcast);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleAllToAll) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
p0 = f32[8,16] parameter(0)
ROOT ata = f32[8,16] all-to-all(p0), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_all_to_all = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
XLA_VLOG_LINES(0, hlo_module->ToString());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAsyncDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAsyncStart);
ASSERT_THAT(start->async_wrapped_instruction(), NotNull());
EXPECT_THAT(start->async_wrapped_opcode(), HloOpcode::kAllToAll);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleReduceScatter) {
constexpr absl::string_view hlo_string = R"(
HloModule test
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p0 = f32[8,16] parameter(0)
ROOT ata = f32[1,16] reduce-scatter(p0), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_reduce_scatter = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
XLA_VLOG_LINES(0, hlo_module->ToString());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAsyncDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAsyncStart);
ASSERT_THAT(start->async_wrapped_instruction(), NotNull());
EXPECT_THAT(start->async_wrapped_opcode(), HloOpcode::kReduceScatter);
}
TEST_F(AsyncAllReduceCreatorTest, ControlPredecessor) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
p0 = f32[1] parameter(0)
ag = f32[8] all-gather(p0), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}, control-predecessors={p0}
p1 = f32[1] parameter(1), control-predecessors={ag}
ROOT sum = add(ag, ag)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_all_gather = HloPredicateTrue;
TF_ASSERT_OK(
RunHloPass(AsyncCollectiveCreator(config), hlo_module.get()).status());
SCOPED_TRACE(hlo_module->ToString());
HloInstruction* start;
HloInstruction* done;
ASSERT_THAT(
hlo_module->entry_computation()->root_instruction(),
GmockMatch(m::Add(m::Op(),
m::Op(&done)
.WithOpcode(HloOpcode::kAllGatherDone)
.WithOperand(0, m::Op(&start).WithOpcode(
HloOpcode::kAllGatherStart)))));
EXPECT_EQ(start->control_successors().size(), 0);
ASSERT_EQ(start->control_predecessors().size(), 1);
EXPECT_THAT(start->control_predecessors()[0], GmockMatch(m::Parameter(0)));
EXPECT_EQ(done->control_predecessors().size(), 0);
ASSERT_EQ(done->control_successors().size(), 1);
EXPECT_THAT(done->control_successors()[0], GmockMatch(m::Parameter(1)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/async_collective_creator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/async_collective_creator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0977269c-291b-49e7-ae5b-3ecff7f99329 | cpp | google/arolla | accessor_helpers | arolla/io/accessor_helpers.h | arolla/io/accessor_helpers_test.cc | #ifndef AROLLA_IO_ACCESSOR_HELPERS_H_
#define AROLLA_IO_ACCESSOR_HELPERS_H_
#include <cstddef>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
namespace arolla::accessor_helpers_impl {
template <class... NameAccessors>
class VariadicPackToNestedTupleImpl {
private:
template <size_t I>
using AccessorType =
std::remove_cv_t<std::remove_reference_t<typename std::tuple_element<
I * 2 + 1, std::tuple<NameAccessors...>>::type>>;
template <size_t I>
AccessorType<I> Accessor() const {
return std::get<I * 2 + 1>(name_accessors_);
}
template <size_t... Is>
using NestedTupleType =
std::tuple<std::pair<std::string, AccessorType<Is>>...>;
template <size_t... Is>
NestedTupleType<Is...> MakeNestedTupleImpl(std::index_sequence<Is...>) const {
return std::make_tuple(std::make_pair(Name<Is>(), Accessor<Is>())...);
}
template <size_t I>
std::string Name() const {
return std::string(std::get<I * 2>(name_accessors_));
}
public:
static_assert(
sizeof...(NameAccessors) % 2 == 0,
"NameAccessors must be formed as name, accessor, name, accessor, ...");
static constexpr size_t kAccessorCount = sizeof...(NameAccessors) / 2;
explicit VariadicPackToNestedTupleImpl(
std::tuple<NameAccessors...> name_accessors)
: name_accessors_(name_accessors) {}
auto MakeNestedTuple() const
-> decltype(MakeNestedTupleImpl(
std::make_index_sequence<
VariadicPackToNestedTupleImpl::kAccessorCount>())) {
return MakeNestedTupleImpl(
std::make_index_sequence<
VariadicPackToNestedTupleImpl::kAccessorCount>());
}
private:
std::tuple<NameAccessors...> name_accessors_;
};
template <class... NameAccessors>
auto ConvertNameAccessorsPackToNestedTuple(NameAccessors... name_accessors)
-> decltype(VariadicPackToNestedTupleImpl<NameAccessors...>(
std::make_tuple(name_accessors...))
.MakeNestedTuple()) {
return VariadicPackToNestedTupleImpl<NameAccessors...>(
std::forward_as_tuple(name_accessors...))
.MakeNestedTuple();
}
}
#endif | #include "arolla/io/accessor_helpers.h"
#include <string>
#include <tuple>
#include <utility>
#include "gtest/gtest.h"
namespace arolla::accessor_helpers_impl {
namespace {
struct TestStruct {
int a;
double b;
};
struct GetAConstRef {
const int& operator()(const TestStruct& s) const { return s.a; }
};
struct GetBValue {
double operator()(const TestStruct& s) const { return s.b; }
};
TEST(InputLoaderTest, ConvertNameAccessorsPackToNestedTuple) {
{
std::tuple<std::pair<std::string, GetAConstRef>,
std::pair<std::string, GetBValue>>
t = ConvertNameAccessorsPackToNestedTuple("a", GetAConstRef{},
"b", GetBValue{});
EXPECT_EQ(std::get<0>(std::get<0>(t)), "a");
EXPECT_EQ(std::get<0>(std::get<1>(t)), "b");
EXPECT_EQ(std::get<1>(std::get<0>(t))(TestStruct{5, 3.5}), 5);
EXPECT_EQ(std::get<1>(std::get<1>(t))(TestStruct{5, 3.5}), 3.5);
}
{
auto t = ConvertNameAccessorsPackToNestedTuple(
"a", [](const TestStruct& s) { return s.a; },
"b", [](const TestStruct& s) { return s.b; });
EXPECT_EQ(std::get<0>(std::get<0>(t)), "a");
EXPECT_EQ(std::get<0>(std::get<1>(t)), "b");
EXPECT_EQ(std::get<1>(std::get<0>(t))(TestStruct{5, 3.5}), 5);
EXPECT_EQ(std::get<1>(std::get<1>(t))(TestStruct{5, 3.5}), 3.5);
}
{
auto t = ConvertNameAccessorsPackToNestedTuple(
"a", GetAConstRef{},
"b", [](const TestStruct& s) { return s.b; });
EXPECT_EQ(std::get<0>(std::get<0>(t)), "a");
EXPECT_EQ(std::get<0>(std::get<1>(t)), "b");
EXPECT_EQ(std::get<1>(std::get<0>(t))(TestStruct{5, 3.5}), 5);
EXPECT_EQ(std::get<1>(std::get<1>(t))(TestStruct{5, 3.5}), 3.5);
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/accessor_helpers.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/accessor_helpers_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
0f8193a5-8939-4a01-a117-059df31d3aab | cpp | google/quiche | quic_received_packet_manager | quiche/quic/core/quic_received_packet_manager.cc | quiche/quic/core/quic_received_packet_manager_test.cc | #include "quiche/quic/core/quic_received_packet_manager.h"
#include <algorithm>
#include <limits>
#include <utility>
#include "quiche/quic/core/congestion_control/rtt_stats.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/quic_config.h"
#include "quiche/quic/core/quic_connection_stats.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
namespace {
const size_t kMaxPacketsAfterNewMissing = 4;
const float kShortAckDecimationDelay = 0.125;
}
QuicReceivedPacketManager::QuicReceivedPacketManager()
: QuicReceivedPacketManager(nullptr) {}
QuicReceivedPacketManager::QuicReceivedPacketManager(QuicConnectionStats* stats)
: ack_frame_updated_(false),
max_ack_ranges_(0),
time_largest_observed_(QuicTime::Zero()),
save_timestamps_(false),
save_timestamps_for_in_order_packets_(false),
stats_(stats),
num_retransmittable_packets_received_since_last_ack_sent_(0),
min_received_before_ack_decimation_(kMinReceivedBeforeAckDecimation),
ack_frequency_(kDefaultRetransmittablePacketsBeforeAck),
ack_decimation_delay_(GetQuicFlag(quic_ack_decimation_delay)),
unlimited_ack_decimation_(false),
one_immediate_ack_(false),
ignore_order_(false),
local_max_ack_delay_(
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs())),
ack_timeout_(QuicTime::Zero()),
time_of_previous_received_packet_(QuicTime::Zero()),
was_last_packet_missing_(false),
last_ack_frequency_frame_sequence_number_(-1) {}
QuicReceivedPacketManager::~QuicReceivedPacketManager() {}
void QuicReceivedPacketManager::SetFromConfig(const QuicConfig& config,
Perspective perspective) {
if (config.HasClientSentConnectionOption(kAKD3, perspective)) {
ack_decimation_delay_ = kShortAckDecimationDelay;
}
if (config.HasClientSentConnectionOption(kAKDU, perspective)) {
unlimited_ack_decimation_ = true;
}
if (config.HasClientSentConnectionOption(k1ACK, perspective)) {
one_immediate_ack_ = true;
}
}
void QuicReceivedPacketManager::RecordPacketReceived(
const QuicPacketHeader& header, QuicTime receipt_time,
const QuicEcnCodepoint ecn) {
const QuicPacketNumber packet_number = header.packet_number;
QUICHE_DCHECK(IsAwaitingPacket(packet_number))
<< " packet_number:" << packet_number;
was_last_packet_missing_ = IsMissing(packet_number);
if (!ack_frame_updated_) {
ack_frame_.received_packet_times.clear();
}
ack_frame_updated_ = true;
bool packet_reordered = false;
if (LargestAcked(ack_frame_).IsInitialized() &&
LargestAcked(ack_frame_) > packet_number) {
packet_reordered = true;
++stats_->packets_reordered;
stats_->max_sequence_reordering =
std::max(stats_->max_sequence_reordering,
LargestAcked(ack_frame_) - packet_number);
int64_t reordering_time_us =
(receipt_time - time_largest_observed_).ToMicroseconds();
stats_->max_time_reordering_us =
std::max(stats_->max_time_reordering_us, reordering_time_us);
}
if (!LargestAcked(ack_frame_).IsInitialized() ||
packet_number > LargestAcked(ack_frame_)) {
ack_frame_.largest_acked = packet_number;
time_largest_observed_ = receipt_time;
}
ack_frame_.packets.Add(packet_number);
MaybeTrimAckRanges();
if (save_timestamps_) {
if (save_timestamps_for_in_order_packets_ && packet_reordered) {
QUIC_DLOG(WARNING) << "Not saving receive timestamp for packet "
<< packet_number;
} else if (!ack_frame_.received_packet_times.empty() &&
ack_frame_.received_packet_times.back().second > receipt_time) {
QUIC_LOG(WARNING)
<< "Receive time went backwards from: "
<< ack_frame_.received_packet_times.back().second.ToDebuggingValue()
<< " to " << receipt_time.ToDebuggingValue();
} else {
ack_frame_.received_packet_times.push_back(
std::make_pair(packet_number, receipt_time));
}
}
if (ecn != ECN_NOT_ECT) {
if (!ack_frame_.ecn_counters.has_value()) {
ack_frame_.ecn_counters = QuicEcnCounts();
}
switch (ecn) {
case ECN_NOT_ECT:
QUICHE_NOTREACHED();
break;
case ECN_ECT0:
ack_frame_.ecn_counters->ect0++;
break;
case ECN_ECT1:
ack_frame_.ecn_counters->ect1++;
break;
case ECN_CE:
ack_frame_.ecn_counters->ce++;
break;
}
}
if (least_received_packet_number_.IsInitialized()) {
least_received_packet_number_ =
std::min(least_received_packet_number_, packet_number);
} else {
least_received_packet_number_ = packet_number;
}
}
void QuicReceivedPacketManager::MaybeTrimAckRanges() {
while (max_ack_ranges_ > 0 &&
ack_frame_.packets.NumIntervals() > max_ack_ranges_) {
ack_frame_.packets.RemoveSmallestInterval();
}
}
bool QuicReceivedPacketManager::IsMissing(QuicPacketNumber packet_number) {
return LargestAcked(ack_frame_).IsInitialized() &&
packet_number < LargestAcked(ack_frame_) &&
!ack_frame_.packets.Contains(packet_number);
}
bool QuicReceivedPacketManager::IsAwaitingPacket(
QuicPacketNumber packet_number) const {
return quic::IsAwaitingPacket(ack_frame_, packet_number,
peer_least_packet_awaiting_ack_);
}
const QuicFrame QuicReceivedPacketManager::GetUpdatedAckFrame(
QuicTime approximate_now) {
if (time_largest_observed_ == QuicTime::Zero()) {
ack_frame_.ack_delay_time = QuicTime::Delta::Infinite();
} else {
ack_frame_.ack_delay_time = approximate_now < time_largest_observed_
? QuicTime::Delta::Zero()
: approximate_now - time_largest_observed_;
}
const size_t initial_ack_ranges = ack_frame_.packets.NumIntervals();
uint64_t num_iterations = 0;
while (max_ack_ranges_ > 0 &&
ack_frame_.packets.NumIntervals() > max_ack_ranges_) {
num_iterations++;
QUIC_BUG_IF(quic_rpm_too_many_ack_ranges, (num_iterations % 100000) == 0)
<< "Too many ack ranges to remove, possibly a dead loop. "
"initial_ack_ranges:"
<< initial_ack_ranges << " max_ack_ranges:" << max_ack_ranges_
<< ", current_ack_ranges:" << ack_frame_.packets.NumIntervals()
<< " num_iterations:" << num_iterations;
ack_frame_.packets.RemoveSmallestInterval();
}
for (auto it = ack_frame_.received_packet_times.begin();
it != ack_frame_.received_packet_times.end();) {
if (LargestAcked(ack_frame_) - it->first >=
std::numeric_limits<uint8_t>::max()) {
it = ack_frame_.received_packet_times.erase(it);
} else {
++it;
}
}
#if QUIC_FRAME_DEBUG
QuicFrame frame = QuicFrame(&ack_frame_);
frame.delete_forbidden = true;
return frame;
#else
return QuicFrame(&ack_frame_);
#endif
}
void QuicReceivedPacketManager::DontWaitForPacketsBefore(
QuicPacketNumber least_unacked) {
if (!least_unacked.IsInitialized()) {
return;
}
QUICHE_DCHECK(!peer_least_packet_awaiting_ack_.IsInitialized() ||
peer_least_packet_awaiting_ack_ <= least_unacked);
if (!peer_least_packet_awaiting_ack_.IsInitialized() ||
least_unacked > peer_least_packet_awaiting_ack_) {
peer_least_packet_awaiting_ack_ = least_unacked;
bool packets_updated = ack_frame_.packets.RemoveUpTo(least_unacked);
if (packets_updated) {
ack_frame_updated_ = true;
}
}
QUICHE_DCHECK(ack_frame_.packets.Empty() ||
!peer_least_packet_awaiting_ack_.IsInitialized() ||
ack_frame_.packets.Min() >= peer_least_packet_awaiting_ack_);
}
QuicTime::Delta QuicReceivedPacketManager::GetMaxAckDelay(
QuicPacketNumber last_received_packet_number,
const RttStats& rtt_stats) const {
if (AckFrequencyFrameReceived() ||
last_received_packet_number < PeerFirstSendingPacketNumber() +
min_received_before_ack_decimation_) {
return local_max_ack_delay_;
}
QuicTime::Delta ack_delay = std::min(
local_max_ack_delay_, rtt_stats.min_rtt() * ack_decimation_delay_);
return std::max(ack_delay, kAlarmGranularity);
}
void QuicReceivedPacketManager::MaybeUpdateAckFrequency(
QuicPacketNumber last_received_packet_number) {
if (AckFrequencyFrameReceived()) {
return;
}
if (last_received_packet_number <
PeerFirstSendingPacketNumber() + min_received_before_ack_decimation_) {
return;
}
ack_frequency_ = unlimited_ack_decimation_
? std::numeric_limits<size_t>::max()
: kMaxRetransmittablePacketsBeforeAck;
}
void QuicReceivedPacketManager::MaybeUpdateAckTimeout(
bool should_last_packet_instigate_acks,
QuicPacketNumber last_received_packet_number,
QuicTime last_packet_receipt_time, QuicTime now,
const RttStats* rtt_stats) {
if (!ack_frame_updated_) {
return;
}
if (!ignore_order_ && was_last_packet_missing_ &&
last_sent_largest_acked_.IsInitialized() &&
last_received_packet_number < last_sent_largest_acked_) {
ack_timeout_ = now;
return;
}
if (!should_last_packet_instigate_acks) {
return;
}
++num_retransmittable_packets_received_since_last_ack_sent_;
MaybeUpdateAckFrequency(last_received_packet_number);
if (num_retransmittable_packets_received_since_last_ack_sent_ >=
ack_frequency_) {
ack_timeout_ = now;
return;
}
if (!ignore_order_ && HasNewMissingPackets()) {
ack_timeout_ = now;
return;
}
const QuicTime updated_ack_time = std::max(
now, std::min(last_packet_receipt_time, now) +
GetMaxAckDelay(last_received_packet_number, *rtt_stats));
if (!ack_timeout_.IsInitialized() || ack_timeout_ > updated_ack_time) {
ack_timeout_ = updated_ack_time;
}
}
void QuicReceivedPacketManager::ResetAckStates() {
ack_frame_updated_ = false;
ack_timeout_ = QuicTime::Zero();
num_retransmittable_packets_received_since_last_ack_sent_ = 0;
last_sent_largest_acked_ = LargestAcked(ack_frame_);
}
bool QuicReceivedPacketManager::HasMissingPackets() const {
if (ack_frame_.packets.Empty()) {
return false;
}
if (ack_frame_.packets.NumIntervals() > 1) {
return true;
}
return peer_least_packet_awaiting_ack_.IsInitialized() &&
ack_frame_.packets.Min() > peer_least_packet_awaiting_ack_;
}
bool QuicReceivedPacketManager::HasNewMissingPackets() const {
if (one_immediate_ack_) {
return HasMissingPackets() && ack_frame_.packets.LastIntervalLength() == 1;
}
return HasMissingPackets() &&
ack_frame_.packets.LastIntervalLength() <= kMaxPacketsAfterNewMissing;
}
bool QuicReceivedPacketManager::ack_frame_updated() const {
return ack_frame_updated_;
}
QuicPacketNumber QuicReceivedPacketManager::GetLargestObserved() const {
return LargestAcked(ack_frame_);
}
QuicPacketNumber QuicReceivedPacketManager::PeerFirstSendingPacketNumber()
const {
if (!least_received_packet_number_.IsInitialized()) {
QUIC_BUG(quic_bug_10849_1) << "No packets have been received yet";
return QuicPacketNumber(1);
}
return least_received_packet_number_;
}
bool QuicReceivedPacketManager::IsAckFrameEmpty() const {
return ack_frame_.packets.Empty();
}
void QuicReceivedPacketManager::OnAckFrequencyFrame(
const QuicAckFrequencyFrame& frame) {
int64_t new_sequence_number = frame.sequence_number;
if (new_sequence_number <= last_ack_frequency_frame_sequence_number_) {
return;
}
last_ack_frequency_frame_sequence_number_ = new_sequence_number;
ack_frequency_ = frame.packet_tolerance;
local_max_ack_delay_ = frame.max_ack_delay;
ignore_order_ = frame.ignore_order;
}
} | #include "quiche/quic/core/quic_received_packet_manager.h"
#include <algorithm>
#include <cstddef>
#include <ostream>
#include <vector>
#include "quiche/quic/core/congestion_control/rtt_stats.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/quic_connection_stats.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_clock.h"
namespace quic {
namespace test {
class QuicReceivedPacketManagerPeer {
public:
static void SetOneImmediateAck(QuicReceivedPacketManager* manager,
bool one_immediate_ack) {
manager->one_immediate_ack_ = one_immediate_ack;
}
static void SetAckDecimationDelay(QuicReceivedPacketManager* manager,
float ack_decimation_delay) {
manager->ack_decimation_delay_ = ack_decimation_delay;
}
};
namespace {
const bool kInstigateAck = true;
const QuicTime::Delta kMinRttMs = QuicTime::Delta::FromMilliseconds(40);
const QuicTime::Delta kDelayedAckTime =
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs());
class QuicReceivedPacketManagerTest : public QuicTest {
protected:
QuicReceivedPacketManagerTest() : received_manager_(&stats_) {
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1));
rtt_stats_.UpdateRtt(kMinRttMs, QuicTime::Delta::Zero(), QuicTime::Zero());
received_manager_.set_save_timestamps(true, false);
}
void RecordPacketReceipt(uint64_t packet_number) {
RecordPacketReceipt(packet_number, QuicTime::Zero());
}
void RecordPacketReceipt(uint64_t packet_number, QuicTime receipt_time) {
RecordPacketReceipt(packet_number, receipt_time, ECN_NOT_ECT);
}
void RecordPacketReceipt(uint64_t packet_number, QuicTime receipt_time,
QuicEcnCodepoint ecn_codepoint) {
QuicPacketHeader header;
header.packet_number = QuicPacketNumber(packet_number);
received_manager_.RecordPacketReceived(header, receipt_time, ecn_codepoint);
}
bool HasPendingAck() {
return received_manager_.ack_timeout().IsInitialized();
}
void MaybeUpdateAckTimeout(bool should_last_packet_instigate_acks,
uint64_t last_received_packet_number) {
received_manager_.MaybeUpdateAckTimeout(
should_last_packet_instigate_acks,
QuicPacketNumber(last_received_packet_number),
clock_.ApproximateNow(),
clock_.ApproximateNow(), &rtt_stats_);
}
void CheckAckTimeout(QuicTime time) {
QUICHE_DCHECK(HasPendingAck());
QUICHE_DCHECK_EQ(received_manager_.ack_timeout(), time);
if (time <= clock_.ApproximateNow()) {
received_manager_.ResetAckStates();
QUICHE_DCHECK(!HasPendingAck());
}
}
MockClock clock_;
RttStats rtt_stats_;
QuicConnectionStats stats_;
QuicReceivedPacketManager received_manager_;
};
TEST_F(QuicReceivedPacketManagerTest, DontWaitForPacketsBefore) {
QuicPacketHeader header;
header.packet_number = QuicPacketNumber(2u);
received_manager_.RecordPacketReceived(header, QuicTime::Zero(), ECN_NOT_ECT);
header.packet_number = QuicPacketNumber(7u);
received_manager_.RecordPacketReceived(header, QuicTime::Zero(), ECN_NOT_ECT);
EXPECT_TRUE(received_manager_.IsAwaitingPacket(QuicPacketNumber(3u)));
EXPECT_TRUE(received_manager_.IsAwaitingPacket(QuicPacketNumber(6u)));
received_manager_.DontWaitForPacketsBefore(QuicPacketNumber(4));
EXPECT_FALSE(received_manager_.IsAwaitingPacket(QuicPacketNumber(3u)));
EXPECT_TRUE(received_manager_.IsAwaitingPacket(QuicPacketNumber(6u)));
}
TEST_F(QuicReceivedPacketManagerTest, GetUpdatedAckFrame) {
QuicPacketHeader header;
header.packet_number = QuicPacketNumber(2u);
QuicTime two_ms = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(2);
EXPECT_FALSE(received_manager_.ack_frame_updated());
received_manager_.RecordPacketReceived(header, two_ms, ECN_NOT_ECT);
EXPECT_TRUE(received_manager_.ack_frame_updated());
QuicFrame ack = received_manager_.GetUpdatedAckFrame(QuicTime::Zero());
received_manager_.ResetAckStates();
EXPECT_FALSE(received_manager_.ack_frame_updated());
EXPECT_EQ(QuicTime::Delta::Zero(), ack.ack_frame->ack_delay_time);
EXPECT_EQ(1u, ack.ack_frame->received_packet_times.size());
QuicTime four_ms = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(4);
ack = received_manager_.GetUpdatedAckFrame(four_ms);
received_manager_.ResetAckStates();
EXPECT_FALSE(received_manager_.ack_frame_updated());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(2),
ack.ack_frame->ack_delay_time);
EXPECT_EQ(1u, ack.ack_frame->received_packet_times.size());
header.packet_number = QuicPacketNumber(999u);
received_manager_.RecordPacketReceived(header, two_ms, ECN_NOT_ECT);
header.packet_number = QuicPacketNumber(4u);
received_manager_.RecordPacketReceived(header, two_ms, ECN_NOT_ECT);
header.packet_number = QuicPacketNumber(1000u);
received_manager_.RecordPacketReceived(header, two_ms, ECN_NOT_ECT);
EXPECT_TRUE(received_manager_.ack_frame_updated());
ack = received_manager_.GetUpdatedAckFrame(two_ms);
received_manager_.ResetAckStates();
EXPECT_FALSE(received_manager_.ack_frame_updated());
EXPECT_EQ(2u, ack.ack_frame->received_packet_times.size());
}
TEST_F(QuicReceivedPacketManagerTest, UpdateReceivedConnectionStats) {
EXPECT_FALSE(received_manager_.ack_frame_updated());
RecordPacketReceipt(1);
EXPECT_TRUE(received_manager_.ack_frame_updated());
RecordPacketReceipt(6);
RecordPacketReceipt(2,
QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(1));
EXPECT_EQ(4u, stats_.max_sequence_reordering);
EXPECT_EQ(1000, stats_.max_time_reordering_us);
EXPECT_EQ(1u, stats_.packets_reordered);
}
TEST_F(QuicReceivedPacketManagerTest, LimitAckRanges) {
received_manager_.set_max_ack_ranges(10);
EXPECT_FALSE(received_manager_.ack_frame_updated());
for (int i = 0; i < 100; ++i) {
RecordPacketReceipt(1 + 2 * i);
EXPECT_TRUE(received_manager_.ack_frame_updated());
received_manager_.GetUpdatedAckFrame(QuicTime::Zero());
EXPECT_GE(10u, received_manager_.ack_frame().packets.NumIntervals());
EXPECT_EQ(QuicPacketNumber(1u + 2 * i),
received_manager_.ack_frame().packets.Max());
for (int j = 0; j < std::min(10, i + 1); ++j) {
ASSERT_GE(i, j);
EXPECT_TRUE(received_manager_.ack_frame().packets.Contains(
QuicPacketNumber(1 + (i - j) * 2)));
if (i > j) {
EXPECT_FALSE(received_manager_.ack_frame().packets.Contains(
QuicPacketNumber((i - j) * 2)));
}
}
}
}
TEST_F(QuicReceivedPacketManagerTest, TrimAckRangesEarly) {
const size_t kMaxAckRanges = 10;
received_manager_.set_max_ack_ranges(kMaxAckRanges);
for (size_t i = 0; i < kMaxAckRanges + 10; ++i) {
RecordPacketReceipt(1 + 2 * i);
if (i < kMaxAckRanges) {
EXPECT_EQ(i + 1, received_manager_.ack_frame().packets.NumIntervals());
} else {
EXPECT_EQ(kMaxAckRanges,
received_manager_.ack_frame().packets.NumIntervals());
}
}
}
TEST_F(QuicReceivedPacketManagerTest, IgnoreOutOfOrderTimestamps) {
EXPECT_FALSE(received_manager_.ack_frame_updated());
RecordPacketReceipt(1, QuicTime::Zero());
EXPECT_TRUE(received_manager_.ack_frame_updated());
EXPECT_EQ(1u, received_manager_.ack_frame().received_packet_times.size());
RecordPacketReceipt(2,
QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(1));
EXPECT_EQ(2u, received_manager_.ack_frame().received_packet_times.size());
RecordPacketReceipt(3, QuicTime::Zero());
EXPECT_EQ(2u, received_manager_.ack_frame().received_packet_times.size());
}
TEST_F(QuicReceivedPacketManagerTest, IgnoreOutOfOrderPackets) {
received_manager_.set_save_timestamps(true, true);
EXPECT_FALSE(received_manager_.ack_frame_updated());
RecordPacketReceipt(1, QuicTime::Zero());
EXPECT_TRUE(received_manager_.ack_frame_updated());
EXPECT_EQ(1u, received_manager_.ack_frame().received_packet_times.size());
RecordPacketReceipt(4,
QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(1));
EXPECT_EQ(2u, received_manager_.ack_frame().received_packet_times.size());
RecordPacketReceipt(3,
QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(3));
EXPECT_EQ(2u, received_manager_.ack_frame().received_packet_times.size());
}
TEST_F(QuicReceivedPacketManagerTest, HasMissingPackets) {
EXPECT_QUIC_BUG(received_manager_.PeerFirstSendingPacketNumber(),
"No packets have been received yet");
RecordPacketReceipt(4, QuicTime::Zero());
EXPECT_EQ(QuicPacketNumber(4),
received_manager_.PeerFirstSendingPacketNumber());
EXPECT_FALSE(received_manager_.HasMissingPackets());
RecordPacketReceipt(3, QuicTime::Zero());
EXPECT_FALSE(received_manager_.HasMissingPackets());
EXPECT_EQ(QuicPacketNumber(3),
received_manager_.PeerFirstSendingPacketNumber());
RecordPacketReceipt(1, QuicTime::Zero());
EXPECT_EQ(QuicPacketNumber(1),
received_manager_.PeerFirstSendingPacketNumber());
EXPECT_TRUE(received_manager_.HasMissingPackets());
RecordPacketReceipt(2, QuicTime::Zero());
EXPECT_EQ(QuicPacketNumber(1),
received_manager_.PeerFirstSendingPacketNumber());
EXPECT_FALSE(received_manager_.HasMissingPackets());
}
TEST_F(QuicReceivedPacketManagerTest, OutOfOrderReceiptCausesAckSent) {
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(3, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 3);
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
RecordPacketReceipt(5, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 5);
CheckAckTimeout(clock_.ApproximateNow());
RecordPacketReceipt(6, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 6);
CheckAckTimeout(clock_.ApproximateNow());
RecordPacketReceipt(2, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 2);
CheckAckTimeout(clock_.ApproximateNow());
RecordPacketReceipt(1, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 1);
CheckAckTimeout(clock_.ApproximateNow());
RecordPacketReceipt(7, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 7);
CheckAckTimeout(clock_.ApproximateNow());
}
TEST_F(QuicReceivedPacketManagerTest, OutOfOrderReceiptCausesAckSent1Ack) {
QuicReceivedPacketManagerPeer::SetOneImmediateAck(&received_manager_, true);
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(3, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 3);
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
RecordPacketReceipt(5, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 5);
CheckAckTimeout(clock_.ApproximateNow());
RecordPacketReceipt(6, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 6);
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
RecordPacketReceipt(2, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 2);
CheckAckTimeout(clock_.ApproximateNow());
RecordPacketReceipt(1, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 1);
CheckAckTimeout(clock_.ApproximateNow());
RecordPacketReceipt(7, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 7);
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
}
TEST_F(QuicReceivedPacketManagerTest, OutOfOrderAckReceiptCausesNoAck) {
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(2, clock_.ApproximateNow());
MaybeUpdateAckTimeout(!kInstigateAck, 2);
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(1, clock_.ApproximateNow());
MaybeUpdateAckTimeout(!kInstigateAck, 1);
EXPECT_FALSE(HasPendingAck());
}
TEST_F(QuicReceivedPacketManagerTest, AckReceiptCausesAckSend) {
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(1, clock_.ApproximateNow());
MaybeUpdateAckTimeout(!kInstigateAck, 1);
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(2, clock_.ApproximateNow());
MaybeUpdateAckTimeout(!kInstigateAck, 2);
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(3, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 3);
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
clock_.AdvanceTime(kDelayedAckTime);
CheckAckTimeout(clock_.ApproximateNow());
RecordPacketReceipt(4, clock_.ApproximateNow());
MaybeUpdateAckTimeout(!kInstigateAck, 4);
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(5, clock_.ApproximateNow());
MaybeUpdateAckTimeout(!kInstigateAck, 5);
EXPECT_FALSE(HasPendingAck());
}
TEST_F(QuicReceivedPacketManagerTest, AckSentEveryNthPacket) {
EXPECT_FALSE(HasPendingAck());
received_manager_.set_ack_frequency(3);
for (size_t i = 1; i <= 39; ++i) {
RecordPacketReceipt(i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, i);
if (i % 3 == 0) {
CheckAckTimeout(clock_.ApproximateNow());
} else {
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
}
}
}
TEST_F(QuicReceivedPacketManagerTest, AckDecimationReducesAcks) {
EXPECT_FALSE(HasPendingAck());
received_manager_.set_min_received_before_ack_decimation(10);
for (size_t i = 1; i <= 29; ++i) {
RecordPacketReceipt(i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, i);
if (i <= 10) {
if (i % 2 == 0) {
CheckAckTimeout(clock_.ApproximateNow());
} else {
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
}
continue;
}
if (i == 20) {
CheckAckTimeout(clock_.ApproximateNow());
} else {
CheckAckTimeout(clock_.ApproximateNow() + kMinRttMs * 0.25);
}
}
RecordPacketReceipt(30, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 30);
CheckAckTimeout(clock_.ApproximateNow());
}
TEST_F(QuicReceivedPacketManagerTest, SendDelayedAckDecimation) {
EXPECT_FALSE(HasPendingAck());
QuicTime ack_time = clock_.ApproximateNow() + kMinRttMs * 0.25;
uint64_t kFirstDecimatedPacket = 101;
for (uint64_t i = 1; i < kFirstDecimatedPacket; ++i) {
RecordPacketReceipt(i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, i);
if (i % 2 == 0) {
CheckAckTimeout(clock_.ApproximateNow());
} else {
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
}
}
RecordPacketReceipt(kFirstDecimatedPacket, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, kFirstDecimatedPacket);
CheckAckTimeout(ack_time);
for (uint64_t i = 1; i < 10; ++i) {
RecordPacketReceipt(kFirstDecimatedPacket + i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, kFirstDecimatedPacket + i);
}
CheckAckTimeout(clock_.ApproximateNow());
}
TEST_F(QuicReceivedPacketManagerTest, SendDelayedAckDecimationMin1ms) {
EXPECT_FALSE(HasPendingAck());
rtt_stats_.UpdateRtt(kAlarmGranularity, QuicTime::Delta::Zero(),
clock_.ApproximateNow());
QuicTime ack_time = clock_.ApproximateNow() + kAlarmGranularity;
uint64_t kFirstDecimatedPacket = 101;
for (uint64_t i = 1; i < kFirstDecimatedPacket; ++i) {
RecordPacketReceipt(i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, i);
if (i % 2 == 0) {
CheckAckTimeout(clock_.ApproximateNow());
} else {
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
}
}
RecordPacketReceipt(kFirstDecimatedPacket, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, kFirstDecimatedPacket);
CheckAckTimeout(ack_time);
for (uint64_t i = 1; i < 10; ++i) {
RecordPacketReceipt(kFirstDecimatedPacket + i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, kFirstDecimatedPacket + i);
}
CheckAckTimeout(clock_.ApproximateNow());
}
TEST_F(QuicReceivedPacketManagerTest,
SendDelayedAckDecimationUnlimitedAggregation) {
EXPECT_FALSE(HasPendingAck());
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(kAKDU);
config.SetConnectionOptionsToSend(connection_options);
received_manager_.SetFromConfig(config, Perspective::IS_CLIENT);
QuicTime ack_time = clock_.ApproximateNow() + kMinRttMs * 0.25;
uint64_t kFirstDecimatedPacket = 101;
for (uint64_t i = 1; i < kFirstDecimatedPacket; ++i) {
RecordPacketReceipt(i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, i);
if (i % 2 == 0) {
CheckAckTimeout(clock_.ApproximateNow());
} else {
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
}
}
RecordPacketReceipt(kFirstDecimatedPacket, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, kFirstDecimatedPacket);
CheckAckTimeout(ack_time);
for (int i = 1; i <= 18; ++i) {
RecordPacketReceipt(kFirstDecimatedPacket + i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, kFirstDecimatedPacket + i);
}
CheckAckTimeout(ack_time);
}
TEST_F(QuicReceivedPacketManagerTest, SendDelayedAckDecimationEighthRtt) {
EXPECT_FALSE(HasPendingAck());
QuicReceivedPacketManagerPeer::SetAckDecimationDelay(&received_manager_,
0.125);
QuicTime ack_time = clock_.ApproximateNow() + kMinRttMs * 0.125;
uint64_t kFirstDecimatedPacket = 101;
for (uint64_t i = 1; i < kFirstDecimatedPacket; ++i) {
RecordPacketReceipt(i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, i);
if (i % 2 == 0) {
CheckAckTimeout(clock_.ApproximateNow());
} else {
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
}
}
RecordPacketReceipt(kFirstDecimatedPacket, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, kFirstDecimatedPacket);
CheckAckTimeout(ack_time);
for (uint64_t i = 1; i < 10; ++i) {
RecordPacketReceipt(kFirstDecimatedPacket + i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, kFirstDecimatedPacket + i);
}
CheckAckTimeout(clock_.ApproximateNow());
}
TEST_F(QuicReceivedPacketManagerTest,
UpdateMaxAckDelayAndAckFrequencyFromAckFrequencyFrame) {
EXPECT_FALSE(HasPendingAck());
QuicAckFrequencyFrame frame;
frame.max_ack_delay = QuicTime::Delta::FromMilliseconds(10);
frame.packet_tolerance = 5;
received_manager_.OnAckFrequencyFrame(frame);
for (int i = 1; i <= 50; ++i) {
RecordPacketReceipt(i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, i);
if (i % frame.packet_tolerance == 0) {
CheckAckTimeout(clock_.ApproximateNow());
} else {
CheckAckTimeout(clock_.ApproximateNow() + frame.max_ack_delay);
}
}
}
TEST_F(QuicReceivedPacketManagerTest,
DisableOutOfOrderAckByIgnoreOrderFromAckFrequencyFrame) {
EXPECT_FALSE(HasPendingAck());
QuicAckFrequencyFrame frame;
frame.max_ack_delay = kDelayedAckTime;
frame.packet_tolerance = 2;
frame.ignore_order = true;
received_manager_.OnAckFrequencyFrame(frame);
RecordPacketReceipt(4, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 4);
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
RecordPacketReceipt(5, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 5);
CheckAckTimeout(clock_.ApproximateNow());
RecordPacketReceipt(3, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 3);
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
RecordPacketReceipt(2, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 2);
CheckAckTimeout(clock_.ApproximateNow());
RecordPacketReceipt(1, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 1);
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
}
TEST_F(QuicReceivedPacketManagerTest,
DisableMissingPaketsAckByIgnoreOrderFromAckFrequencyFrame) {
EXPECT_FALSE(HasPendingAck());
QuicConfig config;
config.SetConnectionOptionsToSend({kAFFE});
received_manager_.SetFromConfig(config, Perspective::IS_CLIENT);
QuicAckFrequencyFrame frame;
frame.max_ack_delay = kDelayedAckTime;
frame.packet_tolerance = 2;
frame.ignore_order = true;
received_manager_.OnAckFrequencyFrame(frame);
RecordPacketReceipt(1, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 1);
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
RecordPacketReceipt(2, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 2);
CheckAckTimeout(clock_.ApproximateNow());
RecordPacketReceipt(4, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 4);
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
RecordPacketReceipt(5, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 5);
CheckAckTimeout(clock_.ApproximateNow());
RecordPacketReceipt(7, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 7);
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
}
TEST_F(QuicReceivedPacketManagerTest,
AckDecimationDisabledWhenAckFrequencyFrameIsReceived) {
EXPECT_FALSE(HasPendingAck());
QuicAckFrequencyFrame frame;
frame.max_ack_delay = kDelayedAckTime;
frame.packet_tolerance = 3;
frame.ignore_order = true;
received_manager_.OnAckFrequencyFrame(frame);
uint64_t kFirstDecimatedPacket = 101;
uint64_t FiftyPacketsAfterAckDecimation = kFirstDecimatedPacket + 50;
for (uint64_t i = 1; i < FiftyPacketsAfterAckDecimation; ++i) {
RecordPacketReceipt(i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, i);
if (i % 3 == 0) {
CheckAckTimeout(clock_.ApproximateNow());
} else {
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
}
}
}
TEST_F(QuicReceivedPacketManagerTest, UpdateAckTimeoutOnPacketReceiptTime) {
EXPECT_FALSE(HasPendingAck());
QuicTime packet_receipt_time3 = clock_.ApproximateNow();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
RecordPacketReceipt(3, packet_receipt_time3);
received_manager_.MaybeUpdateAckTimeout(
kInstigateAck, QuicPacketNumber(3),
packet_receipt_time3,
clock_.ApproximateNow(), &rtt_stats_);
CheckAckTimeout(packet_receipt_time3 + kDelayedAckTime);
RecordPacketReceipt(4, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 4);
CheckAckTimeout(clock_.ApproximateNow());
}
TEST_F(QuicReceivedPacketManagerTest,
UpdateAckTimeoutOnPacketReceiptTimeLongerQueuingTime) {
EXPECT_FALSE(HasPendingAck());
QuicTime packet_receipt_time3 = clock_.ApproximateNow();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(100));
RecordPacketReceipt(3, packet_receipt_time3);
received_manager_.MaybeUpdateAckTimeout(
kInstigateAck, QuicPacketNumber(3),
packet_receipt_time3,
clock_.ApproximateNow(), &rtt_stats_);
CheckAckTimeout(clock_.ApproximateNow());
}
TEST_F(QuicReceivedPacketManagerTest, CountEcnPackets) {
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(3, QuicTime::Zero(), ECN_NOT_ECT);
RecordPacketReceipt(4, QuicTime::Zero(), ECN_ECT0);
RecordPacketReceipt(5, QuicTime::Zero(), ECN_ECT1);
RecordPacketReceipt(6, QuicTime::Zero(), ECN_CE);
QuicFrame ack = received_manager_.GetUpdatedAckFrame(QuicTime::Zero());
EXPECT_TRUE(ack.ack_frame->ecn_counters.has_value());
EXPECT_EQ(ack.ack_frame->ecn_counters->ect0, 1);
EXPECT_EQ(ack.ack_frame->ecn_counters->ect1, 1);
EXPECT_EQ(ack.ack_frame->ecn_counters->ce, 1);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_received_packet_manager.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_received_packet_manager_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
800feb57-d016-4a87-a837-c3d1bbe0754c | cpp | tensorflow/tensorflow | reduction_base | third_party/xla/xla/service/gpu/fusions/reduction_base.cc | third_party/xla/xla/service/gpu/fusions/reduction_base_test.cc | #include "xla/service/gpu/fusions/reduction_base.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <optional>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/container/node_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "mlir/IR/AffineExpr.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_description.h"
#include "xla/union_find.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
int RowReductionGetRowsPerWarp(int reduced_dimension_size) {
if (WarpSize() % reduced_dimension_size != 0 ||
reduced_dimension_size >= WarpSize()) {
return 1;
}
return WarpSize() / reduced_dimension_size;
}
int GetVectorSize(const HloFusionAnalysis& analysis,
const ReductionDimensions& reduction_dimensions,
int num_threads, Vector3 reduction_tiling) {
int64_t minor_dim = reduction_dimensions.dimensions.back();
if (minor_dim % 2 != 0) {
return 1;
}
if (num_threads * 2 > minor_dim) {
return 1;
}
if (MayPreventVectorization(analysis.fusion())) {
return 1;
}
if (reduction_dimensions.is_row_reduction) {
constexpr int kRowMinorReduced =
ReductionDimensions::kRowMinorReducedDimension;
const auto* cuda_cc = std::get_if<se::CudaComputeCapability>(
&analysis.device_info().gpu_compute_capability());
if (cuda_cc == nullptr) return 1;
if (cuda_cc->IsAtLeast(se::CudaComputeCapability::VOLTA)) return 2;
if (cuda_cc->IsAtLeast(se::CudaComputeCapability::PASCAL_)) {
return analysis.input_output_info().smallest_input_dtype_bits <= 32 &&
reduction_dimensions.dimensions[kRowMinorReduced] %
(reduction_tiling[kRowMinorReduced] *
num_threads) ==
0
? 2
: 1;
}
return 1;
}
return 1;
}
int GetVectorSizeForMlir(const HloFusionAnalysis& analysis, int64_t minor_dim,
int num_threads) {
if (minor_dim % 2 != 0) {
return 1;
}
if (num_threads * 2 > minor_dim) {
return 1;
}
for (HloInstructionAdaptor hero : analysis.fusion_heroes()) {
for (HloInstructionAdaptor operand : hero.GetOperands()) {
if (primitive_util::IsComplexType(operand.shape().element_type())) {
return 1;
}
}
}
if (analysis.input_output_info().smallest_input_dtype_bits >= 64) {
return 1;
}
if (analysis.input_output_info().smallest_input_dtype_bits >= 32) {
return 2;
}
if (num_threads * 4 > minor_dim) {
return 2;
}
return minor_dim % 4 == 0 ? 4 : 2;
}
ReductionGroups GroupDisjointReductions(const HloFusionAnalysis& analysis,
bool for_mlir) {
const int num_fusion_outputs = analysis.fusion_root_count();
CHECK_NE(0, num_fusion_outputs);
if (num_fusion_outputs == 1) {
return {{{&analysis.fusion_root(0).instruction()}}, {0}, {true}};
}
absl::node_hash_map<HloInstructionAdaptor, UnionFind<HloInstructionAdaptor>>
disjoint_sets;
UnionFind<HloInstructionAdaptor>* first_non_reduction_root = nullptr;
absl::node_hash_map<HloInstructionAdaptor,
absl::flat_hash_set<HloInstructionAdaptor>>
reachable_outputs;
absl::flat_hash_set<HloInstructionAdaptor> roots_with_reduction;
absl::flat_hash_map<const HloInstruction*, int> root_indices;
const auto& roots = analysis.fusion().GetRoots();
ReductionGroups result;
result.group_id_per_root.resize(roots.size());
result.is_reduction_root.reserve(roots.size());
for (auto [root, hero] : llvm::zip(roots, analysis.fusion_heroes())) {
int index = root_indices.size();
root_indices[&root.instruction()] = index;
auto [it, inserted] = disjoint_sets.try_emplace(root, root);
CHECK(inserted) << "Duplicate root " << root.ToString();
reachable_outputs[root].insert(root);
result.is_reduction_root.push_back(
IsRealReductionHero(root.instruction(), hero.instruction()));
if (result.is_reduction_root.back()) {
roots_with_reduction.insert(root);
} else if (first_non_reduction_root != nullptr) {
first_non_reduction_root->Merge(&it->second);
} else {
first_non_reduction_root = &it->second;
}
}
absl::flat_hash_set<HloInstructionAdaptor> instructions;
for (const HloInstruction* operand : analysis.fusion().GetParameters()) {
instructions.insert(HloInstructionAdaptor{*operand, &analysis.fusion()});
}
auto visit = [&](absl::Span<const HloInstructionAdaptor> roots) {
HloBfsConsumersFirstTraversal(
roots, analysis.fusion(), [&](HloInstructionAdaptor consumer) {
auto& consumer_reachable = reachable_outputs[consumer];
for (auto producer : consumer.GetOperands()) {
reachable_outputs[producer].insert(consumer_reachable.begin(),
consumer_reachable.end());
}
instructions.insert(consumer);
return TraversalResult::kAdvance;
});
};
if (for_mlir) {
for (auto root : roots) {
visit({root});
}
} else {
visit(roots);
}
for (auto instr : instructions) {
const auto& reachable = reachable_outputs[instr];
std::vector<HloInstructionAdaptor> reached_output_ids;
bool added_to_reduce = false;
for (auto output : roots) {
bool has_real_hero = roots_with_reduction.contains(output);
if (has_real_hero &&
(hlo_query::IsBroadcastedConstantOrScalar(instr.instruction()))) {
if (added_to_reduce) {
VLOG(3) << "Skip broadcasted constant or scalar " << instr.ToString();
continue;
}
}
if (reachable.contains(output)) {
VLOG(3) << "Reaching " << output.ToString() << " from "
<< instr.ToString();
reached_output_ids.push_back(output);
if (has_real_hero) {
added_to_reduce = true;
}
}
}
auto& first_reached_output = disjoint_sets.at(reached_output_ids.front());
for (size_t j = 1; j < reached_output_ids.size(); ++j) {
first_reached_output.Merge(&disjoint_sets.at(reached_output_ids[j]));
}
}
ConstHloInstructionMap<std::vector<const HloInstruction*>> group_map;
for (auto root : roots) {
group_map[&disjoint_sets.at(root).Get().instruction()].push_back(
&root.instruction());
}
result.grouped_roots.reserve(group_map.size());
absl::c_for_each(group_map, [&](auto& it) {
for (auto* root : it.second) {
result.group_id_per_root[root_indices[root]] =
result.grouped_roots.size();
}
result.grouped_roots.emplace_back(std::move(it.second));
});
return result;
}
void AddGroupIdConstraint(IndexingMap& map, int64_t root_index,
const ReductionGroups& groups) {
int group_index = groups.group_id_per_root[root_index];
map.AddConstraint(
mlir::getAffineDimExpr(KernelFusionInterface::kIndexingMapBlockIdxDims[1],
map.GetMLIRContext()),
{group_index, group_index});
}
}
} | #include "xla/service/gpu/fusions/reduction_base.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::SizeIs;
using MlirReductionBaseTest = HloTestBase;
TEST_F(MlirReductionBaseTest, TwoGroups) {
auto module = ParseAndReturnVerifiedModule(R"(
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%p0 = f32[2] parameter(0)
%p1 = f32[2] parameter(1)
%c0 = f32[] constant(-inf)
%r0 = f32[] reduce(%p0, %c0), dimensions={0}, to_apply=add
%c1 = f32[] constant(inf)
%r1 = f32[] reduce(%p1, %c1), dimensions={0}, to_apply=add
ROOT %tuple = (f32[], f32[]) tuple(%r0, %r1)
}
ENTRY entry {
%p0 = f32[2] parameter(0)
%p1 = f32[2] parameter(1)
ROOT %fusion = (f32[], f32[]) fusion(%p0, %p1), kind=kInput, calls=fusion
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto device_info = TestGpuDeviceInfo::CudaOrRocmDeviceInfo();
auto analysis = HloFusionAnalysis::Create(*root, device_info);
auto reduction_groups = GroupDisjointReductions(analysis, true);
EXPECT_THAT(reduction_groups.grouped_roots,
ElementsAre(ElementsAre(&analysis.fusion_root(0).instruction()),
ElementsAre(&analysis.fusion_root(1).instruction())));
}
TEST_F(MlirReductionBaseTest, OneGroup) {
auto module = ParseAndReturnVerifiedModule(R"(
%add {
%p0 = c128[] parameter(0)
%p1 = c128[] parameter(1)
ROOT %add.35 = c128[] add(c128[] %p0, c128[] %p1)
}
%fusion {
%p0 = c128[1,2] parameter(0)
%c0 = c128[] constant((0, 0))
%reduce = c128[] reduce(%p0, %c0), dimensions={0,1}, to_apply=%add
%real = f64[] real(c128[] %reduce)
%imag = f64[] imag(c128[] %reduce)
%negate = f64[] negate(f64[] %imag)
ROOT %tuple.29 = (f64[], f64[]) tuple(f64[] %real, f64[] %negate)
}
ENTRY entry {
%p0 = c128[1,2] parameter(0)
ROOT %fusion = (f64[], f64[]) fusion(%p0), kind=kInput, calls=fusion
})")
.value();
auto device_info = TestGpuDeviceInfo::CudaOrRocmDeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis = HloFusionAnalysis::Create(*root, device_info);
auto reduction_groups = GroupDisjointReductions(analysis, true);
EXPECT_THAT(reduction_groups.grouped_roots, SizeIs(1));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/reduction_base.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/reduction_base_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
15dff040-9d15-4863-8bdf-21b9008d68d1 | cpp | google/quiche | balsa_frame | quiche/balsa/balsa_frame.cc | quiche/balsa/balsa_frame_test.cc | #include "quiche/balsa/balsa_frame.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/string_view.h"
#include "quiche/balsa/balsa_enums.h"
#include "quiche/balsa/balsa_headers.h"
#include "quiche/balsa/balsa_visitor_interface.h"
#include "quiche/balsa/header_properties.h"
#include "quiche/common/platform/api/quiche_logging.h"
#define CHAR_LT(a, b) \
(static_cast<unsigned char>(a) < static_cast<unsigned char>(b))
#define CHAR_LE(a, b) \
(static_cast<unsigned char>(a) <= static_cast<unsigned char>(b))
#define CHAR_GT(a, b) \
(static_cast<unsigned char>(a) > static_cast<unsigned char>(b))
#define CHAR_GE(a, b) \
(static_cast<unsigned char>(a) >= static_cast<unsigned char>(b))
#define QUICHE_DCHECK_CHAR_GE(a, b) \
QUICHE_DCHECK_GE(static_cast<unsigned char>(a), static_cast<unsigned char>(b))
namespace quiche {
namespace {
using FirstLineValidationOption =
HttpValidationPolicy::FirstLineValidationOption;
constexpr size_t kContinueStatusCode = 100;
constexpr size_t kSwitchingProtocolsStatusCode = 101;
constexpr absl::string_view kChunked = "chunked";
constexpr absl::string_view kContentLength = "content-length";
constexpr absl::string_view kIdentity = "identity";
constexpr absl::string_view kTransferEncoding = "transfer-encoding";
bool IsInterimResponse(size_t response_code) {
return response_code >= 100 && response_code < 200;
}
bool IsObsTextChar(char c) { return static_cast<uint8_t>(c) >= 0x80; }
}
void BalsaFrame::Reset() {
last_char_was_slash_r_ = false;
saw_non_newline_char_ = false;
start_was_space_ = true;
chunk_length_character_extracted_ = false;
allow_reading_until_close_for_request_ = false;
chunk_length_remaining_ = 0;
content_length_remaining_ = 0;
last_slash_n_idx_ = 0;
term_chars_ = 0;
parse_state_ = BalsaFrameEnums::READING_HEADER_AND_FIRSTLINE;
last_error_ = BalsaFrameEnums::BALSA_NO_ERROR;
lines_.clear();
if (continue_headers_ != nullptr) {
continue_headers_->Clear();
}
if (headers_ != nullptr) {
headers_->Clear();
}
trailer_lines_.clear();
start_of_trailer_line_ = 0;
trailer_length_ = 0;
if (trailers_ != nullptr) {
trailers_->Clear();
}
is_valid_target_uri_ = true;
}
namespace {
inline char* ParseOneIsland(char* current, char* begin, char* end,
size_t* first_whitespace, size_t* first_nonwhite) {
*first_whitespace = current - begin;
while (current < end && CHAR_LE(*current, ' ')) {
++current;
}
*first_nonwhite = current - begin;
while (current < end && CHAR_GT(*current, ' ')) {
++current;
}
return current;
}
}
bool ParseHTTPFirstLine(char* begin, char* end, bool is_request,
BalsaHeaders* headers,
BalsaFrameEnums::ErrorCode* error_code,
FirstLineValidationOption whitespace_option) {
while (begin < end && (end[-1] == '\n' || end[-1] == '\r')) {
--end;
}
if (whitespace_option != FirstLineValidationOption::NONE) {
constexpr absl::string_view kBadWhitespace = "\r\t";
char* pos = std::find_first_of(begin, end, kBadWhitespace.begin(),
kBadWhitespace.end());
if (pos != end) {
if (whitespace_option == FirstLineValidationOption::REJECT) {
*error_code = static_cast<BalsaFrameEnums::ErrorCode>(
BalsaFrameEnums::INVALID_WS_IN_STATUS_LINE +
static_cast<int>(is_request));
return false;
}
QUICHE_DCHECK(whitespace_option == FirstLineValidationOption::SANITIZE);
std::replace_if(
pos, end, [](char c) { return c == '\r' || c == '\t'; }, ' ');
}
}
char* current = ParseOneIsland(begin, begin, end, &headers->whitespace_1_idx_,
&headers->non_whitespace_1_idx_);
current = ParseOneIsland(current, begin, end, &headers->whitespace_2_idx_,
&headers->non_whitespace_2_idx_);
current = ParseOneIsland(current, begin, end, &headers->whitespace_3_idx_,
&headers->non_whitespace_3_idx_);
const char* last = end;
while (current <= last && CHAR_LE(*last, ' ')) {
--last;
}
headers->whitespace_4_idx_ = last - begin + 1;
QUICHE_DCHECK(begin == end || static_cast<unsigned char>(*begin) > ' ');
QUICHE_DCHECK_EQ(0u, headers->whitespace_1_idx_);
QUICHE_DCHECK_EQ(0u, headers->non_whitespace_1_idx_);
QUICHE_DCHECK(begin == end ||
headers->non_whitespace_1_idx_ < headers->whitespace_2_idx_);
if (headers->non_whitespace_2_idx_ == headers->whitespace_3_idx_) {
*error_code = static_cast<BalsaFrameEnums::ErrorCode>(
BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_RESPONSE_VERSION +
static_cast<int>(is_request));
if (!is_request) {
return false;
}
}
if (headers->whitespace_3_idx_ == headers->non_whitespace_3_idx_) {
if (*error_code == BalsaFrameEnums::BALSA_NO_ERROR) {
*error_code = static_cast<BalsaFrameEnums::ErrorCode>(
BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_RESPONSE_STATUSCODE +
static_cast<int>(is_request));
}
}
if (!is_request) {
headers->parsed_response_code_ = 0;
if (headers->non_whitespace_2_idx_ < headers->whitespace_3_idx_) {
if (!absl::SimpleAtoi(
absl::string_view(begin + headers->non_whitespace_2_idx_,
headers->non_whitespace_3_idx_ -
headers->non_whitespace_2_idx_),
&headers->parsed_response_code_)) {
*error_code = BalsaFrameEnums::FAILED_CONVERTING_STATUS_CODE_TO_INT;
return false;
}
}
}
return true;
}
namespace {
bool IsValidTargetUri(absl::string_view method, absl::string_view target_uri) {
if (target_uri.empty()) {
QUICHE_CODE_COUNT(invalid_target_uri_empty);
return false;
}
if (target_uri == "*") {
if (method == "OPTIONS") {
return true;
}
QUICHE_CODE_COUNT(invalid_target_uri_asterisk_not_options);
return false;
}
if (method == "CONNECT") {
size_t index = target_uri.find_last_of(':');
if (index == absl::string_view::npos || index == 0) {
QUICHE_CODE_COUNT(invalid_target_uri_connect_missing_port);
return false;
}
if (target_uri[0] == '[' && target_uri[index - 1] != ']') {
QUICHE_CODE_COUNT(invalid_target_uri_connect_bad_v6_literal);
return false;
}
int port;
if (!absl::SimpleAtoi(target_uri.substr(index + 1), &port) || port < 0 ||
port > 65535) {
QUICHE_CODE_COUNT(invalid_target_uri_connect_bad_port);
return false;
}
return true;
}
if (target_uri[0] == '/' || absl::StrContains(target_uri, ":
return true;
}
QUICHE_CODE_COUNT(invalid_target_uri_bad_path);
return false;
}
}
void BalsaFrame::ProcessFirstLine(char* begin, char* end) {
BalsaFrameEnums::ErrorCode previous_error = last_error_;
if (!ParseHTTPFirstLine(
begin, end, is_request_, headers_, &last_error_,
http_validation_policy().sanitize_cr_tab_in_first_line)) {
parse_state_ = BalsaFrameEnums::ERROR;
HandleError(last_error_);
return;
}
if (previous_error != last_error_) {
HandleWarning(last_error_);
}
const absl::string_view line_input(
begin + headers_->non_whitespace_1_idx_,
headers_->whitespace_4_idx_ - headers_->non_whitespace_1_idx_);
const absl::string_view part1(
begin + headers_->non_whitespace_1_idx_,
headers_->whitespace_2_idx_ - headers_->non_whitespace_1_idx_);
const absl::string_view part2(
begin + headers_->non_whitespace_2_idx_,
headers_->whitespace_3_idx_ - headers_->non_whitespace_2_idx_);
const absl::string_view part3(
begin + headers_->non_whitespace_3_idx_,
headers_->whitespace_4_idx_ - headers_->non_whitespace_3_idx_);
if (is_request_) {
is_valid_target_uri_ = IsValidTargetUri(part1, part2);
if (http_validation_policy().disallow_invalid_target_uris &&
!is_valid_target_uri_) {
parse_state_ = BalsaFrameEnums::ERROR;
last_error_ = BalsaFrameEnums::INVALID_TARGET_URI;
HandleError(last_error_);
return;
}
visitor_->OnRequestFirstLineInput(line_input, part1, part2, part3);
if (part3.empty()) {
parse_state_ = BalsaFrameEnums::MESSAGE_FULLY_READ;
}
return;
}
visitor_->OnResponseFirstLineInput(line_input, part1, part2, part3);
}
void BalsaFrame::CleanUpKeyValueWhitespace(
const char* stream_begin, const char* line_begin, const char* current,
const char* line_end, HeaderLineDescription* current_header_line) {
const char* colon_loc = current;
QUICHE_DCHECK_LT(colon_loc, line_end);
QUICHE_DCHECK_EQ(':', *colon_loc);
QUICHE_DCHECK_EQ(':', *current);
QUICHE_DCHECK_CHAR_GE(' ', *line_end)
<< "\"" << std::string(line_begin, line_end) << "\"";
--current;
while (current > line_begin && CHAR_LE(*current, ' ')) {
--current;
}
current += static_cast<int>(current != colon_loc);
current_header_line->key_end_idx = current - stream_begin;
current = colon_loc;
QUICHE_DCHECK_EQ(':', *current);
++current;
while (current < line_end && CHAR_LE(*current, ' ')) {
++current;
}
current_header_line->value_begin_idx = current - stream_begin;
QUICHE_DCHECK_GE(current_header_line->key_end_idx,
current_header_line->first_char_idx);
QUICHE_DCHECK_GE(current_header_line->value_begin_idx,
current_header_line->key_end_idx);
QUICHE_DCHECK_GE(current_header_line->last_char_idx,
current_header_line->value_begin_idx);
}
bool BalsaFrame::FindColonsAndParseIntoKeyValue(const Lines& lines,
bool is_trailer,
BalsaHeaders* headers) {
QUICHE_DCHECK(!lines.empty());
const char* stream_begin = headers->OriginalHeaderStreamBegin();
const Lines::size_type lines_size_m1 = lines.size() - 1;
int first_header_idx = (is_trailer ? 0 : 1);
const char* current = stream_begin + lines[first_header_idx].first;
for (Lines::size_type i = first_header_idx; i < lines_size_m1;) {
const char* line_begin = stream_begin + lines[i].first;
for (++i; i < lines_size_m1; ++i) {
const char c = *(stream_begin + lines[i].first);
if (CHAR_GT(c, ' ')) {
break;
}
if ((c != ' ' && c != '\t') ||
http_validation_policy().disallow_header_continuation_lines) {
HandleError(is_trailer ? BalsaFrameEnums::INVALID_TRAILER_FORMAT
: BalsaFrameEnums::INVALID_HEADER_FORMAT);
return false;
}
}
const char* line_end = stream_begin + lines[i - 1].second;
QUICHE_DCHECK_LT(line_begin - stream_begin, line_end - stream_begin);
--line_end;
QUICHE_DCHECK_EQ('\n', *line_end)
<< "\"" << std::string(line_begin, line_end) << "\"";
while (CHAR_LE(*line_end, ' ') && line_end > line_begin) {
--line_end;
}
++line_end;
QUICHE_DCHECK_CHAR_GE(' ', *line_end);
QUICHE_DCHECK_LT(line_begin, line_end);
headers->header_lines_.push_back(HeaderLineDescription(
line_begin - stream_begin, line_end - stream_begin,
line_end - stream_begin, line_end - stream_begin, 0));
if (current >= line_end) {
if (http_validation_policy().require_header_colon) {
HandleError(is_trailer ? BalsaFrameEnums::TRAILER_MISSING_COLON
: BalsaFrameEnums::HEADER_MISSING_COLON);
return false;
}
HandleWarning(is_trailer ? BalsaFrameEnums::TRAILER_MISSING_COLON
: BalsaFrameEnums::HEADER_MISSING_COLON);
continue;
}
if (current < line_begin) {
current = line_begin;
}
for (; current < line_end; ++current) {
const char c = *current;
if (c == ':') {
break;
}
if (http_validation_policy().disallow_double_quote_in_header_name) {
if (header_properties::IsInvalidHeaderKeyChar(c)) {
HandleError(is_trailer
? BalsaFrameEnums::INVALID_TRAILER_NAME_CHARACTER
: BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER);
return false;
}
} else if (header_properties::IsInvalidHeaderKeyCharAllowDoubleQuote(c)) {
HandleError(is_trailer
? BalsaFrameEnums::INVALID_TRAILER_NAME_CHARACTER
: BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER);
return false;
}
if (http_validation_policy().disallow_obs_text_in_field_names &&
IsObsTextChar(c)) {
HandleError(is_trailer
? BalsaFrameEnums::INVALID_TRAILER_NAME_CHARACTER
: BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER);
return false;
}
}
if (current == line_end) {
if (http_validation_policy().require_header_colon) {
HandleError(is_trailer ? BalsaFrameEnums::TRAILER_MISSING_COLON
: BalsaFrameEnums::HEADER_MISSING_COLON);
return false;
}
HandleWarning(is_trailer ? BalsaFrameEnums::TRAILER_MISSING_COLON
: BalsaFrameEnums::HEADER_MISSING_COLON);
continue;
}
QUICHE_DCHECK_EQ(*current, ':');
QUICHE_DCHECK_LE(current - stream_begin, line_end - stream_begin);
QUICHE_DCHECK_LE(stream_begin - stream_begin, current - stream_begin);
HeaderLineDescription& current_header_line = headers->header_lines_.back();
current_header_line.key_end_idx = current - stream_begin;
current_header_line.value_begin_idx = current_header_line.key_end_idx;
if (current < line_end) {
++current_header_line.key_end_idx;
CleanUpKeyValueWhitespace(stream_begin, line_begin, current, line_end,
¤t_header_line);
}
}
return true;
}
void BalsaFrame::HandleWarning(BalsaFrameEnums::ErrorCode error_code) {
last_error_ = error_code;
visitor_->HandleWarning(last_error_);
}
void BalsaFrame::HandleError(BalsaFrameEnums::ErrorCode error_code) {
last_error_ = error_code;
parse_state_ = BalsaFrameEnums::ERROR;
visitor_->HandleError(last_error_);
}
BalsaHeadersEnums::ContentLengthStatus BalsaFrame::ProcessContentLengthLine(
HeaderLines::size_type line_idx, size_t* length) {
const HeaderLineDescription& header_line = headers_->header_lines_[line_idx];
const char* stream_begin = headers_->OriginalHeaderStreamBegin();
const char* line_end = stream_begin + header_line.last_char_idx;
const char* value_begin = (stream_begin + header_line.value_begin_idx);
if (value_begin >= line_end) {
QUICHE_DVLOG(1) << "invalid content-length -- no non-whitespace value data";
return BalsaHeadersEnums::INVALID_CONTENT_LENGTH;
}
*length = 0;
while (value_begin < line_end) {
if (*value_begin < '0' || *value_begin > '9') {
QUICHE_DVLOG(1)
<< "invalid content-length - non numeric character detected";
return BalsaHeadersEnums::INVALID_CONTENT_LENGTH;
}
const size_t kMaxDiv10 = std::numeric_limits<size_t>::max() / 10;
size_t length_x_10 = *length * 10;
const size_t c = *value_begin - '0';
if (*length > kMaxDiv10 ||
(std::numeric_limits<size_t>::max() - length_x_10) < c) {
QUICHE_DVLOG(1) << "content-length overflow";
return BalsaHeadersEnums::CONTENT_LENGTH_OVERFLOW;
}
*length = length_x_10 + c;
++value_begin;
}
QUICHE_DVLOG(1) << "content_length parsed: " << *length;
return BalsaHeadersEnums::VALID_CONTENT_LENGTH;
}
void BalsaFrame::ProcessTransferEncodingLine(HeaderLines::size_type line_idx) {
const HeaderLineDescription& header_line = headers_->header_lines_[line_idx];
const char* stream_begin = headers_->OriginalHeaderStreamBegin();
const absl::string_view transfer_encoding(
stream_begin + header_line.value_begin_idx,
header_line.last_char_idx - header_line.value_begin_idx);
if (absl::EqualsIgnoreCase(transfer_encoding, kChunked)) {
headers_->transfer_encoding_is_chunked_ = true;
return;
}
if (absl::EqualsIgnoreCase(transfer_encoding, kIdentity)) {
headers_->transfer_encoding_is_chunked_ = false;
return;
}
if (http_validation_policy().validate_transfer_encoding) {
HandleError(BalsaFrameEnums::UNKNOWN_TRANSFER_ENCODING);
}
}
bool BalsaFrame::CheckHeaderLinesForInvalidChars(const Lines& lines,
const BalsaHeaders* headers) {
const char* stream_begin =
headers->OriginalHeaderStreamBegin() + lines.front().first;
const char* stream_end =
headers->OriginalHeaderStreamBegin() + lines.back().second;
bool found_invalid = false;
for (const char* c = stream_begin; c < stream_end; c++) {
if (header_properties::IsInvalidHeaderChar(*c)) {
found_invalid = true;
}
if (*c == '\r' &&
http_validation_policy().disallow_lone_cr_in_request_headers &&
c + 1 < stream_end && *(c + 1) != '\n') {
found_invalid = true;
}
}
return found_invalid;
}
void BalsaFrame::ProcessHeaderLines(const Lines& lines, bool is_trailer,
BalsaHeaders* headers) {
QUICHE_DCHECK(!lines.empty());
QUICHE_DVLOG(1) << "******@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@**********\n";
if (invalid_chars_error_enabled() &&
CheckHeaderLinesForInvalidChars(lines, headers)) {
HandleError(BalsaFrameEnums::INVALID_HEADER_CHARACTER);
return;
}
if (lines.size() <= (is_trailer ? 1 : 2)) {
return;
}
HeaderLines::size_type content_length_idx = 0;
HeaderLines::size_type transfer_encoding_idx = 0;
const char* stream_begin = headers->OriginalHeaderStreamBegin();
if (!FindColonsAndParseIntoKeyValue(lines, is_trailer, headers)) {
return;
}
const HeaderLines::size_type lines_size = headers->header_lines_.size();
for (HeaderLines::size_type i = 0; i < lines_size; ++i) {
const HeaderLineDescription& line = headers->header_lines_[i];
const absl::string_view key(stream_begin + line.first_char_idx,
line.key_end_idx - line.first_char_idx);
QUICHE_DVLOG(2) << "[" << i << "]: " << key << " key_len: " << key.length();
if (key.empty() || key[0] == ' ') {
parse_state_ = BalsaFrameEnums::ERROR;
HandleError(is_trailer ? BalsaFrameEnums::INVALID_TRAILER_FORMAT
: BalsaFrameEnums::INVALID_HEADER_FORMAT);
return;
}
if (is_trailer) {
continue;
}
if (absl::EqualsIgnoreCase(key, kContentLength)) {
size_t length = 0;
BalsaHeadersEnums::ContentLengthStatus content_length_status =
ProcessContentLengthLine(i, &length);
if (content_length_idx == 0) {
content_length_idx = i + 1;
headers->content_length_status_ = content_length_status;
headers->content_length_ = length;
content_length_remaining_ = length;
continue;
}
if ((headers->content_length_status_ != content_length_status) ||
((headers->content_length_status_ ==
BalsaHeadersEnums::VALID_CONTENT_LENGTH) &&
(http_validation_policy().disallow_multiple_content_length ||
length != headers->content_length_))) {
HandleError(BalsaFrameEnums::MULTIPLE_CONTENT_LENGTH_KEYS);
return;
}
continue;
}
if (absl::EqualsIgnoreCase(key, kTransferEncoding)) {
if (http_validation_policy().validate_transfer_encoding &&
transfer_encoding_idx != 0) {
HandleError(BalsaFrameEnums::MULTIPLE_TRANSFER_ENCODING_KEYS);
return;
}
transfer_encoding_idx = i + 1;
}
}
if (!is_trailer) {
if (http_validation_policy().validate_transfer_encoding &&
http_validation_policy()
.disallow_transfer_encoding_with_content_length &&
content_length_idx != 0 && transfer_encoding_idx != 0) {
HandleError(BalsaFrameEnums::BOTH_TRANSFER_ENCODING_AND_CONTENT_LENGTH);
return;
}
if (headers->transfer_encoding_is_chunked_) {
headers->content_length_ = 0;
headers->content_length_status_ = BalsaHeadersEnums::NO_CONTENT_LENGTH;
content_length_remaining_ = 0;
}
if (transfer_encoding_idx != 0) {
ProcessTransferEncodingLine(transfer_encoding_idx - 1);
}
}
}
void BalsaFrame::AssignParseStateAfterHeadersHaveBeenParsed() {
parse_state_ = BalsaFrameEnums::MESSAGE_FULLY_READ;
int response_code = headers_->parsed_response_code_;
if (!is_request_ && (request_was_head_ ||
!BalsaHeaders::ResponseCanHaveBody(response_code))) {
return;
}
if (headers_->transfer_encoding_is_chunked_) {
parse_state_ = BalsaFrameEnums::READING_CHUNK_LENGTH;
return;
}
switch (headers_->content_length_status_) {
case BalsaHeadersEnums::VALID_CONTENT_LENGTH:
if (headers_->content_length_ == 0) {
parse_state_ = BalsaFrameEnums::MESSAGE_FULLY_READ;
} else {
parse_state_ = BalsaFrameEnums::READING_CONTENT;
}
break;
case BalsaHeadersEnums::CONTENT_LENGTH_OVERFLOW:
case BalsaHeadersEnums::INVALID_CONTENT_LENGTH:
HandleError(BalsaFrameEnums::UNPARSABLE_CONTENT_LENGTH);
break;
case BalsaHeadersEnums::NO_CONTENT_LENGTH:
if (is_request_) {
const absl::string_view method = headers_->request_method();
if ((method != "POST" && method != "PUT") ||
!http_validation_policy().require_content_length_if_body_required) {
parse_state_ = BalsaFrameEnums::MESSAGE_FULLY_READ;
break;
} else if (!allow_reading_until_close_for_request_) {
HandleError(BalsaFrameEnums::REQUIRED_BODY_BUT_NO_CONTENT_LENGTH);
break;
}
}
parse_state_ = BalsaFrameEnums::READING_UNTIL_CLOSE;
HandleWarning(BalsaFrameEnums::MAYBE_BODY_BUT_NO_CONTENT_LENGTH);
break;
default:
QUICHE_LOG(FATAL) << "Saw a content_length_status: "
<< headers_->content_length_status_
<< " which is unknown.";
}
}
size_t BalsaFrame::ProcessHeaders(const char* message_start,
size_t message_length) {
const char* const original_message_start = message_start;
const char* const message_end = message_start + message_length;
const char* message_current = message_start;
const char* checkpoint = message_start;
if (message_length == 0) {
return message_current - original_message_start;
}
while (message_current < message_end) {
size_t base_idx = headers_->GetReadableBytesFromHeaderStream();
if (!saw_non_newline_char_) {
do {
const char c = *message_current;
if (c != '\r' && c != '\n') {
if (CHAR_LE(c, ' ')) {
HandleError(BalsaFrameEnums::NO_REQUEST_LINE_IN_REQUEST);
return message_current - original_message_start;
}
break;
}
++message_current;
if (message_current == message_end) {
return message_current - original_message_start;
}
} while (true);
saw_non_newline_char_ = true;
message_start = message_current;
checkpoint = message_current;
}
while (message_current < message_end) {
if (*message_current != '\n') {
++message_current;
continue;
}
const size_t relative_idx = message_current - message_start;
const size_t message_current_idx = 1 + base_idx + relative_idx;
lines_.push_back(std::make_pair(last_slash_n_idx_, message_current_idx));
if (lines_.size() == 1) {
headers_->WriteFromFramer(checkpoint, 1 + message_current - checkpoint);
checkpoint = message_current + 1;
char* begin = headers_->OriginalHeaderStreamBegin();
QUICHE_DVLOG(1) << "First line "
<< std::string(begin, lines_[0].second);
QUICHE_DVLOG(1) << "is_request_: " << is_request_;
ProcessFirstLine(begin, begin + lines_[0].second);
if (parse_state_ == BalsaFrameEnums::MESSAGE_FULLY_READ) {
break;
}
if (parse_state_ == BalsaFrameEnums::ERROR) {
return message_current - original_message_start;
}
}
const size_t chars_since_last_slash_n =
(message_current_idx - last_slash_n_idx_);
last_slash_n_idx_ = message_current_idx;
if (chars_since_last_slash_n > 2) {
++message_current;
continue;
}
if ((chars_since_last_slash_n == 1) ||
(((message_current > message_start) &&
(*(message_current - 1) == '\r')) ||
(last_char_was_slash_r_))) {
break;
}
++message_current;
}
if (message_current == message_end) {
continue;
}
++message_current;
QUICHE_DCHECK(message_current >= message_start);
if (message_current > message_start) {
headers_->WriteFromFramer(checkpoint, message_current - checkpoint);
}
if (headers_->GetReadableBytesFromHeaderStream() > max_header_length_) {
HandleHeadersTooLongError();
return message_current - original_message_start;
}
headers_->DoneWritingFromFramer();
visitor_->OnHeaderInput(headers_->GetReadablePtrFromHeaderStream());
ProcessHeaderLines(lines_, false , headers_);
if (parse_state_ == BalsaFrameEnums::ERROR) {
return message_current - original_message_start;
}
if (use_interim_headers_callback_ &&
IsInterimResponse(headers_->parsed_response_code()) &&
headers_->parsed_response_code() != kSwitchingProtocolsStatusCode) {
visitor_->OnInterimHeaders(
std::make_unique<BalsaHeaders>(std::move(*headers_)));
Reset();
checkpoint = message_start = message_current;
continue;
}
if (continue_headers_ != nullptr &&
headers_->parsed_response_code_ == kContinueStatusCode) {
BalsaHeaders saved_continue_headers = std::move(*headers_);
Reset();
*continue_headers_ = std::move(saved_continue_headers);
visitor_->ContinueHeaderDone();
checkpoint = message_start = message_current;
continue;
}
AssignParseStateAfterHeadersHaveBeenParsed();
if (parse_state_ == BalsaFrameEnums::ERROR) {
return message_current - original_message_start;
}
visitor_->ProcessHeaders(*headers_);
visitor_->HeaderDone();
if (parse_state_ == BalsaFrameEnums::MESSAGE_FULLY_READ) {
visitor_->MessageDone();
}
return message_current - original_message_start;
}
last_char_was_slash_r_ = (*(message_end - 1) == '\r');
QUICHE_DCHECK(message_current >= message_start);
if (message_current > message_start) {
headers_->WriteFromFramer(checkpoint, message_current - checkpoint);
}
return message_current - original_message_start;
}
size_t BalsaFrame::BytesSafeToSplice() const {
switch (parse_state_) {
case BalsaFrameEnums::READING_CHUNK_DATA:
return chunk_length_remaining_;
case BalsaFrameEnums::READING_UNTIL_CLOSE:
return std::numeric_limits<size_t>::max();
case BalsaFrameEnums::READING_CONTENT:
return content_length_remaining_;
default:
return 0;
}
}
void BalsaFrame::BytesSpliced(size_t bytes_spliced) {
switch (parse_state_) {
case BalsaFrameEnums::READING_CHUNK_DATA:
if (chunk_length_remaining_ < bytes_spliced) {
HandleError(BalsaFrameEnums::
CALLED_BYTES_SPLICED_AND_EXCEEDED_SAFE_SPLICE_AMOUNT);
return;
}
chunk_length_remaining_ -= bytes_spliced;
if (chunk_length_remaining_ == 0) {
parse_state_ = BalsaFrameEnums::READING_CHUNK_TERM;
}
return;
case BalsaFrameEnums::READING_UNTIL_CLOSE:
return;
case BalsaFrameEnums::READING_CONTENT:
if (content_length_remaining_ < bytes_spliced) {
HandleError(BalsaFrameEnums::
CALLED_BYTES_SPLICED_AND_EXCEEDED_SAFE_SPLICE_AMOUNT);
return;
}
content_length_remaining_ -= bytes_spliced;
if (content_length_remaining_ == 0) {
parse_state_ = BalsaFrameEnums::MESSAGE_FULLY_READ;
visitor_->MessageDone();
}
return;
default:
HandleError(BalsaFrameEnums::CALLED_BYTES_SPLICED_WHEN_UNSAFE_TO_DO_SO);
return;
}
}
size_t BalsaFrame::ProcessInput(const char* input, size_t size) {
const char* current = input;
const char* on_entry = current;
const char* end = current + size;
QUICHE_DCHECK(headers_ != nullptr);
if (headers_ == nullptr) {
return 0;
}
if (parse_state_ == BalsaFrameEnums::READING_HEADER_AND_FIRSTLINE) {
const size_t header_length = headers_->GetReadableBytesFromHeaderStream();
if (header_length > max_header_length_ ||
(header_length == max_header_length_ && size > 0)) {
HandleHeadersTooLongError();
return current - input;
}
const size_t bytes_to_process =
std::min(max_header_length_ - header_length, size);
current += ProcessHeaders(input, bytes_to_process);
if (parse_state_ == BalsaFrameEnums::READING_HEADER_AND_FIRSTLINE) {
const size_t header_length_after =
headers_->GetReadableBytesFromHeaderStream();
if (header_length_after >= max_header_length_) {
HandleHeadersTooLongError();
}
}
return current - input;
}
if (parse_state_ == BalsaFrameEnums::MESSAGE_FULLY_READ ||
parse_state_ == BalsaFrameEnums::ERROR) {
return current - input;
}
QUICHE_DCHECK_LE(current, end);
if (current == end) {
return current - input;
}
while (true) {
switch (parse_state_) {
case BalsaFrameEnums::READING_CHUNK_LENGTH:
QUICHE_DCHECK_LE(current, end);
while (true) {
if (current == end) {
visitor_->OnRawBodyInput(
absl::string_view(on_entry, current - on_entry));
return current - input;
}
const char c = *current;
++current;
static const signed char kBad = -1;
static const signed char kDelimiter = -2;
signed char addition = kBad;
switch (c) {
case '0': addition = 0; break;
case '1': addition = 1; break;
case '2': addition = 2; break;
case '3': addition = 3; break;
case '4': addition = 4; break;
case '5': addition = 5; break;
case '6': addition = 6; break;
case '7': addition = 7; break;
case '8': addition = 8; break;
case '9': addition = 9; break;
case 'a': addition = 0xA; break;
case 'b': addition = 0xB; break;
case 'c': addition = 0xC; break;
case 'd': addition = 0xD; break;
case 'e': addition = 0xE; break;
case 'f': addition = 0xF; break;
case 'A': addition = 0xA; break;
case 'B': addition = 0xB; break;
case 'C': addition = 0xC; break;
case 'D': addition = 0xD; break;
case 'E': addition = 0xE; break;
case 'F': addition = 0xF; break;
case '\t':
case '\n':
case '\r':
case ' ':
case ';':
addition = kDelimiter;
break;
default:
break;
}
if (addition >= 0) {
chunk_length_character_extracted_ = true;
size_t length_x_16 = chunk_length_remaining_ * 16;
const size_t kMaxDiv16 = std::numeric_limits<size_t>::max() / 16;
if ((chunk_length_remaining_ > kMaxDiv16) ||
(std::numeric_limits<size_t>::max() - length_x_16) <
static_cast<size_t>(addition)) {
visitor_->OnRawBodyInput(
absl::string_view(on_entry, current - on_entry));
HandleError(BalsaFrameEnums::CHUNK_LENGTH_OVERFLOW);
return current - input;
}
chunk_length_remaining_ = length_x_16 + addition;
continue;
}
if (!chunk_length_character_extracted_ || addition == kBad) {
visitor_->OnRawBodyInput(
absl::string_view(on_entry, current - on_entry));
HandleError(BalsaFrameEnums::INVALID_CHUNK_LENGTH);
return current - input;
}
break;
}
--current;
parse_state_ = BalsaFrameEnums::READING_CHUNK_EXTENSION;
last_char_was_slash_r_ = false;
visitor_->OnChunkLength(chunk_length_remaining_);
continue;
case BalsaFrameEnums::READING_CHUNK_EXTENSION: {
const char* extensions_start = current;
size_t extensions_length = 0;
QUICHE_DCHECK_LE(current, end);
while (true) {
if (current == end) {
visitor_->OnChunkExtensionInput(
absl::string_view(extensions_start, extensions_length));
visitor_->OnRawBodyInput(
absl::string_view(on_entry, current - on_entry));
return current - input;
}
const char c = *current;
if (http_validation_policy_.disallow_lone_cr_in_chunk_extension) {
const bool cr_followed_by_non_lf =
c == '\r' && current + 1 < end && *(current + 1) != '\n';
const bool previous_cr_followed_by_non_lf =
last_char_was_slash_r_ && current == input && c != '\n';
if (cr_followed_by_non_lf || previous_cr_followed_by_non_lf) {
HandleError(BalsaFrameEnums::INVALID_CHUNK_EXTENSION);
return current - input;
}
if (current + 1 == end) {
last_char_was_slash_r_ = c == '\r';
}
}
if (c == '\r' || c == '\n') {
extensions_length = (extensions_start == current)
? 0
: current - extensions_start - 1;
}
++current;
if (c == '\n') {
break;
}
}
chunk_length_character_extracted_ = false;
visitor_->OnChunkExtensionInput(
absl::string_view(extensions_start, extensions_length));
if (chunk_length_remaining_ != 0) {
parse_state_ = BalsaFrameEnums::READING_CHUNK_DATA;
continue;
}
HeaderFramingFound('\n');
parse_state_ = BalsaFrameEnums::READING_LAST_CHUNK_TERM;
continue;
}
case BalsaFrameEnums::READING_CHUNK_DATA:
while (current < end) {
if (chunk_length_remaining_ == 0) {
break;
}
size_t bytes_remaining = end - current;
size_t consumed_bytes = (chunk_length_remaining_ < bytes_remaining)
? chunk_length_remaining_
: bytes_remaining;
const char* tmp_current = current + consumed_bytes;
visitor_->OnRawBodyInput(
absl::string_view(on_entry, tmp_current - on_entry));
visitor_->OnBodyChunkInput(
absl::string_view(current, consumed_bytes));
on_entry = current = tmp_current;
chunk_length_remaining_ -= consumed_bytes;
}
if (chunk_length_remaining_ == 0) {
parse_state_ = BalsaFrameEnums::READING_CHUNK_TERM;
continue;
}
visitor_->OnRawBodyInput(
absl::string_view(on_entry, current - on_entry));
return current - input;
case BalsaFrameEnums::READING_CHUNK_TERM:
QUICHE_DCHECK_LE(current, end);
while (true) {
if (current == end) {
visitor_->OnRawBodyInput(
absl::string_view(on_entry, current - on_entry));
return current - input;
}
const char c = *current;
++current;
if (c == '\n') {
break;
}
}
parse_state_ = BalsaFrameEnums::READING_CHUNK_LENGTH;
continue;
case BalsaFrameEnums::READING_LAST_CHUNK_TERM:
QUICHE_DCHECK_LE(current, end);
while (true) {
if (current == end) {
visitor_->OnRawBodyInput(
absl::string_view(on_entry, current - on_entry));
return current - input;
}
const char c = *current;
if (HeaderFramingFound(c) != 0) {
++current;
parse_state_ = BalsaFrameEnums::MESSAGE_FULLY_READ;
visitor_->OnRawBodyInput(
absl::string_view(on_entry, current - on_entry));
visitor_->MessageDone();
return current - input;
}
if (!HeaderFramingMayBeFound()) {
break;
}
++current;
}
parse_state_ = BalsaFrameEnums::READING_TRAILER;
visitor_->OnRawBodyInput(
absl::string_view(on_entry, current - on_entry));
on_entry = current;
continue;
case BalsaFrameEnums::READING_TRAILER:
while (current < end) {
const char c = *current;
++current;
++trailer_length_;
if (trailers_ != nullptr) {
if (trailer_length_ > max_header_length_) {
--current;
HandleError(BalsaFrameEnums::TRAILER_TOO_LONG);
return current - input;
}
if (LineFramingFound(c)) {
trailer_lines_.push_back(
std::make_pair(start_of_trailer_line_, trailer_length_));
start_of_trailer_line_ = trailer_length_;
}
}
if (HeaderFramingFound(c) != 0) {
parse_state_ = BalsaFrameEnums::MESSAGE_FULLY_READ;
if (trailers_ != nullptr) {
trailers_->WriteFromFramer(on_entry, current - on_entry);
trailers_->DoneWritingFromFramer();
ProcessHeaderLines(trailer_lines_, true ,
trailers_.get());
if (parse_state_ == BalsaFrameEnums::ERROR) {
return current - input;
}
visitor_->OnTrailers(std::move(trailers_));
trailers_ = std::make_unique<BalsaHeaders>();
}
visitor_->OnTrailerInput(
absl::string_view(on_entry, current - on_entry));
visitor_->MessageDone();
return current - input;
}
}
if (trailers_ != nullptr) {
trailers_->WriteFromFramer(on_entry, current - on_entry);
}
visitor_->OnTrailerInput(
absl::string_view(on_entry, current - on_entry));
return current - input;
case BalsaFrameEnums::READING_UNTIL_CLOSE: {
const size_t bytes_remaining = end - current;
if (bytes_remaining > 0) {
visitor_->OnRawBodyInput(absl::string_view(current, bytes_remaining));
visitor_->OnBodyChunkInput(
absl::string_view(current, bytes_remaining));
current += bytes_remaining;
}
return current - input;
}
case BalsaFrameEnums::READING_CONTENT:
while ((content_length_remaining_ != 0u) && current < end) {
const size_t bytes_remaining = end - current;
const size_t consumed_bytes =
(content_length_remaining_ < bytes_remaining)
? content_length_remaining_
: bytes_remaining;
visitor_->OnRawBodyInput(absl::string_view(current, consumed_bytes));
visitor_->OnBodyChunkInput(
absl::string_view(current, consumed_bytes));
current += consumed_bytes;
content_length_remaining_ -= consumed_bytes;
}
if (content_length_remaining_ == 0) {
parse_state_ = BalsaFrameEnums::MESSAGE_FULLY_READ;
visitor_->MessageDone();
}
return current - input;
default:
QUICHE_LOG(FATAL) << "Unknown state: " << parse_state_
<< " memory corruption?!";
}
}
}
void BalsaFrame::HandleHeadersTooLongError() {
if (parse_truncated_headers_even_when_headers_too_long_) {
const size_t len = headers_->GetReadableBytesFromHeaderStream();
const char* stream_begin = headers_->OriginalHeaderStreamBegin();
if (last_slash_n_idx_ < len && stream_begin[last_slash_n_idx_] != '\r') {
static const absl::string_view kTwoLineEnds = "\r\n\r\n";
headers_->WriteFromFramer(kTwoLineEnds.data(), kTwoLineEnds.size());
lines_.push_back(std::make_pair(last_slash_n_idx_, len + 2));
lines_.push_back(std::make_pair(len + 2, len + 4));
}
ProcessHeaderLines(lines_, false, headers_);
}
HandleError(BalsaFrameEnums::HEADERS_TOO_LONG);
}
const int32_t BalsaFrame::kValidTerm1;
const int32_t BalsaFrame::kValidTerm1Mask;
const int32_t BalsaFrame::kValidTerm2;
const int32_t BalsaFrame::kValidTerm2Mask;
}
#undef CHAR_LT
#undef CHAR_LE
#undef CHAR_GT
#undef CHAR_GE
#undef QUICHE_DCHECK_CHAR_GE | #include "quiche/balsa/balsa_frame.h"
#include <stdlib.h>
#include <cstdint>
#include <limits>
#include <map>
#include <memory>
#include <random>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/escaping.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "quiche/balsa/balsa_enums.h"
#include "quiche/balsa/balsa_headers.h"
#include "quiche/balsa/balsa_visitor_interface.h"
#include "quiche/balsa/http_validation_policy.h"
#include "quiche/balsa/noop_balsa_visitor.h"
#include "quiche/balsa/simple_buffer.h"
#include "quiche/common/platform/api/quiche_command_line_flags.h"
#include "quiche/common/platform/api/quiche_expect_bug.h"
#include "quiche/common/platform/api/quiche_flags.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::AtLeast;
using ::testing::InSequence;
using ::testing::IsEmpty;
using ::testing::Mock;
using ::testing::NiceMock;
using ::testing::Pointee;
using ::testing::Property;
using ::testing::Range;
using ::testing::StrEq;
using ::testing::StrictMock;
DEFINE_QUICHE_COMMAND_LINE_FLAG(
std::string, randseed, "",
"This is the seed for Pseudo-random number"
" generator used when generating random messages for unittests");
namespace quiche::test {
using RandomEngine = std::mt19937;
class BalsaFrameTestPeer {
public:
static int32_t HeaderFramingFound(BalsaFrame* balsa_frame, char c) {
return balsa_frame->HeaderFramingFound(c);
}
static void FindColonsAndParseIntoKeyValue(BalsaFrame* balsa_frame,
const BalsaFrame::Lines& lines,
bool is_trailer,
BalsaHeaders* headers) {
balsa_frame->FindColonsAndParseIntoKeyValue(lines, is_trailer, headers);
}
};
class BalsaHeadersTestPeer {
public:
static void WriteFromFramer(BalsaHeaders* headers, const char* ptr,
size_t size) {
headers->WriteFromFramer(ptr, size);
}
};
namespace {
class TestSeed {
public:
TestSeed() : test_seed_(0), user_supplied_seed_(false) {}
void Initialize(const std::string& seed_flag) {
if (!seed_flag.empty()) {
ASSERT_TRUE(absl::SimpleAtoi(seed_flag, &test_seed_));
user_supplied_seed_ = true;
}
}
int GetSeed() const {
int seed =
(user_supplied_seed_ ? test_seed_
: testing::UnitTest::GetInstance()->random_seed());
QUICHE_LOG(INFO) << "**** The current seed is " << seed << " ****";
return seed;
}
private:
int test_seed_;
bool user_supplied_seed_;
};
static bool RandomBool(RandomEngine& rng) { return rng() % 2 != 0; }
std::string EscapeString(absl::string_view message) {
return absl::StrReplaceAll(
message, {{"\n", "\\\\n\n"}, {"\\r", "\\\\r"}, {"\\t", "\\\\t"}});
}
char random_lws(RandomEngine& rng) {
if (RandomBool(rng)) {
return '\t';
}
return ' ';
}
const char* random_line_term(RandomEngine& rng) {
if (RandomBool(rng)) {
return "\r\n";
}
return "\n";
}
void AppendRandomWhitespace(RandomEngine& rng, std::stringstream* s) {
for (int i = 0; i < 1000 && RandomBool(rng); ++i) {
*s << random_lws(rng);
}
}
std::string CreateFirstLine(const char* tokens[3], const char* whitespace[4],
const char* line_ending) {
QUICHE_CHECK(tokens != nullptr);
QUICHE_CHECK(whitespace != nullptr);
QUICHE_CHECK(line_ending != nullptr);
QUICHE_CHECK(std::string(line_ending) == "\n" ||
std::string(line_ending) == "\r\n")
<< "line_ending: " << EscapeString(line_ending);
SimpleBuffer firstline_buffer;
firstline_buffer.WriteString(whitespace[0]);
for (int i = 0; i < 3; ++i) {
firstline_buffer.WriteString(tokens[i]);
firstline_buffer.WriteString(whitespace[i + 1]);
}
firstline_buffer.WriteString(line_ending);
return std::string(firstline_buffer.GetReadableRegion());
}
std::string CreateMessage(const char* firstline,
const std::pair<std::string, std::string>* headers,
size_t headers_len, const char* colon,
const char* line_ending, const char* body) {
SimpleBuffer request_buffer;
request_buffer.WriteString(firstline);
if (headers_len > 0) {
QUICHE_CHECK(headers != nullptr);
QUICHE_CHECK(colon != nullptr);
}
QUICHE_CHECK(line_ending != nullptr);
QUICHE_CHECK(std::string(line_ending) == "\n" ||
std::string(line_ending) == "\r\n")
<< "line_ending: " << EscapeString(line_ending);
QUICHE_CHECK(body != nullptr);
for (size_t i = 0; i < headers_len; ++i) {
bool only_whitespace_in_key = true;
{
const char* tmp_key = headers[i].first.c_str();
while (*tmp_key != '\0') {
if (*tmp_key > ' ') {
only_whitespace_in_key = false;
break;
}
++tmp_key;
}
}
const char* tmp_colon = colon;
if (only_whitespace_in_key) {
while (*tmp_colon != ':') {
++tmp_colon;
}
}
request_buffer.WriteString(headers[i].first);
request_buffer.WriteString(tmp_colon);
request_buffer.WriteString(headers[i].second);
request_buffer.WriteString(line_ending);
}
request_buffer.WriteString(line_ending);
request_buffer.WriteString(body);
return std::string(request_buffer.GetReadableRegion());
}
void VerifyRequestFirstLine(const char* tokens[3],
const BalsaHeaders& headers) {
EXPECT_EQ(tokens[0], headers.request_method());
EXPECT_EQ(tokens[1], headers.request_uri());
EXPECT_EQ(0u, headers.parsed_response_code());
EXPECT_EQ(tokens[2], headers.request_version());
}
void VerifyResponseFirstLine(const char* tokens[3],
size_t expected_response_code,
const BalsaHeaders& headers) {
EXPECT_EQ(tokens[0], headers.response_version());
EXPECT_EQ(tokens[1], headers.response_code());
EXPECT_EQ(expected_response_code, headers.parsed_response_code());
EXPECT_EQ(tokens[2], headers.response_reason_phrase());
}
void VerifyHeaderLines(
const std::pair<std::string, std::string>* expected_headers,
size_t headers_len, const BalsaHeaders& headers) {
BalsaHeaders::const_header_lines_iterator it = headers.lines().begin();
for (size_t i = 0; it != headers.lines().end(); ++it, ++i) {
ASSERT_GT(headers_len, i);
std::string actual_key;
std::string actual_value;
if (!it->first.empty()) {
actual_key = std::string(it->first);
}
if (!it->second.empty()) {
actual_value = std::string(it->second);
}
EXPECT_THAT(actual_key, StrEq(expected_headers[i].first));
EXPECT_THAT(actual_value, StrEq(expected_headers[i].second));
}
EXPECT_TRUE(headers.lines().end() == it);
}
void FirstLineParsedCorrectlyHelper(const char* tokens[3],
size_t expected_response_code,
bool is_request, const char* whitespace) {
BalsaHeaders headers;
BalsaFrame framer;
framer.set_is_request(is_request);
framer.set_balsa_headers(&headers);
const char* tmp_tokens[3] = {tokens[0], tokens[1], tokens[2]};
const char* tmp_whitespace[4] = {"", whitespace, whitespace, ""};
for (int j = 2; j >= 0; --j) {
framer.Reset();
std::string firstline = CreateFirstLine(tmp_tokens, tmp_whitespace, "\n");
std::string message =
CreateMessage(firstline.c_str(), nullptr, 0, nullptr, "\n", "");
SCOPED_TRACE(absl::StrFormat("input: \n%s", EscapeString(message)));
EXPECT_GE(message.size(),
framer.ProcessInput(message.data(), message.size()));
if (is_request || j >= 1) {
EXPECT_FALSE(framer.Error());
if (is_request) {
EXPECT_TRUE(framer.MessageFullyRead());
}
if (j == 0) {
expected_response_code = 0;
}
if (is_request) {
VerifyRequestFirstLine(tmp_tokens, *framer.headers());
} else {
VerifyResponseFirstLine(tmp_tokens, expected_response_code,
*framer.headers());
}
} else {
EXPECT_TRUE(framer.Error());
}
tmp_tokens[j] = "";
tmp_whitespace[j] = "";
}
}
TEST(HTTPBalsaFrame, ParseStateToString) {
EXPECT_STREQ("ERROR",
BalsaFrameEnums::ParseStateToString(BalsaFrameEnums::ERROR));
EXPECT_STREQ("READING_HEADER_AND_FIRSTLINE",
BalsaFrameEnums::ParseStateToString(
BalsaFrameEnums::READING_HEADER_AND_FIRSTLINE));
EXPECT_STREQ("READING_CHUNK_LENGTH",
BalsaFrameEnums::ParseStateToString(
BalsaFrameEnums::READING_CHUNK_LENGTH));
EXPECT_STREQ("READING_CHUNK_EXTENSION",
BalsaFrameEnums::ParseStateToString(
BalsaFrameEnums::READING_CHUNK_EXTENSION));
EXPECT_STREQ("READING_CHUNK_DATA", BalsaFrameEnums::ParseStateToString(
BalsaFrameEnums::READING_CHUNK_DATA));
EXPECT_STREQ("READING_CHUNK_TERM", BalsaFrameEnums::ParseStateToString(
BalsaFrameEnums::READING_CHUNK_TERM));
EXPECT_STREQ("READING_LAST_CHUNK_TERM",
BalsaFrameEnums::ParseStateToString(
BalsaFrameEnums::READING_LAST_CHUNK_TERM));
EXPECT_STREQ("READING_TRAILER", BalsaFrameEnums::ParseStateToString(
BalsaFrameEnums::READING_TRAILER));
EXPECT_STREQ("READING_UNTIL_CLOSE",
BalsaFrameEnums::ParseStateToString(
BalsaFrameEnums::READING_UNTIL_CLOSE));
EXPECT_STREQ("READING_CONTENT", BalsaFrameEnums::ParseStateToString(
BalsaFrameEnums::READING_CONTENT));
EXPECT_STREQ("MESSAGE_FULLY_READ", BalsaFrameEnums::ParseStateToString(
BalsaFrameEnums::MESSAGE_FULLY_READ));
EXPECT_STREQ("UNKNOWN_STATE", BalsaFrameEnums::ParseStateToString(
BalsaFrameEnums::NUM_STATES));
EXPECT_STREQ("UNKNOWN_STATE",
BalsaFrameEnums::ParseStateToString(
static_cast<BalsaFrameEnums::ParseState>(-1)));
for (int i = 0; i < BalsaFrameEnums::NUM_STATES; ++i) {
EXPECT_STRNE("UNKNOWN_STATE",
BalsaFrameEnums::ParseStateToString(
static_cast<BalsaFrameEnums::ParseState>(i)));
}
}
TEST(HTTPBalsaFrame, ErrorCodeToString) {
EXPECT_STREQ("NO_STATUS_LINE_IN_RESPONSE",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::NO_STATUS_LINE_IN_RESPONSE));
EXPECT_STREQ("NO_REQUEST_LINE_IN_REQUEST",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::NO_REQUEST_LINE_IN_REQUEST));
EXPECT_STREQ("FAILED_TO_FIND_WS_AFTER_RESPONSE_VERSION",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_RESPONSE_VERSION));
EXPECT_STREQ("FAILED_TO_FIND_WS_AFTER_REQUEST_METHOD",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_REQUEST_METHOD));
EXPECT_STREQ(
"FAILED_TO_FIND_WS_AFTER_RESPONSE_STATUSCODE",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_RESPONSE_STATUSCODE));
EXPECT_STREQ(
"FAILED_TO_FIND_WS_AFTER_REQUEST_REQUEST_URI",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_REQUEST_REQUEST_URI));
EXPECT_STREQ(
"FAILED_TO_FIND_NL_AFTER_RESPONSE_REASON_PHRASE",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::FAILED_TO_FIND_NL_AFTER_RESPONSE_REASON_PHRASE));
EXPECT_STREQ(
"FAILED_TO_FIND_NL_AFTER_REQUEST_HTTP_VERSION",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::FAILED_TO_FIND_NL_AFTER_REQUEST_HTTP_VERSION));
EXPECT_STREQ("FAILED_CONVERTING_STATUS_CODE_TO_INT",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::FAILED_CONVERTING_STATUS_CODE_TO_INT));
EXPECT_STREQ("HEADERS_TOO_LONG", BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::HEADERS_TOO_LONG));
EXPECT_STREQ("UNPARSABLE_CONTENT_LENGTH",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::UNPARSABLE_CONTENT_LENGTH));
EXPECT_STREQ("MAYBE_BODY_BUT_NO_CONTENT_LENGTH",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::MAYBE_BODY_BUT_NO_CONTENT_LENGTH));
EXPECT_STREQ("HEADER_MISSING_COLON",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::HEADER_MISSING_COLON));
EXPECT_STREQ("INVALID_CHUNK_LENGTH",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::INVALID_CHUNK_LENGTH));
EXPECT_STREQ("CHUNK_LENGTH_OVERFLOW",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::CHUNK_LENGTH_OVERFLOW));
EXPECT_STREQ("CALLED_BYTES_SPLICED_WHEN_UNSAFE_TO_DO_SO",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::CALLED_BYTES_SPLICED_WHEN_UNSAFE_TO_DO_SO));
EXPECT_STREQ("CALLED_BYTES_SPLICED_AND_EXCEEDED_SAFE_SPLICE_AMOUNT",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::
CALLED_BYTES_SPLICED_AND_EXCEEDED_SAFE_SPLICE_AMOUNT));
EXPECT_STREQ("MULTIPLE_CONTENT_LENGTH_KEYS",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::MULTIPLE_CONTENT_LENGTH_KEYS));
EXPECT_STREQ("MULTIPLE_TRANSFER_ENCODING_KEYS",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::MULTIPLE_TRANSFER_ENCODING_KEYS));
EXPECT_STREQ("INVALID_HEADER_FORMAT",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::INVALID_HEADER_FORMAT));
EXPECT_STREQ("INVALID_TRAILER_FORMAT",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::INVALID_TRAILER_FORMAT));
EXPECT_STREQ("TRAILER_TOO_LONG", BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::TRAILER_TOO_LONG));
EXPECT_STREQ("TRAILER_MISSING_COLON",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::TRAILER_MISSING_COLON));
EXPECT_STREQ("INTERNAL_LOGIC_ERROR",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::INTERNAL_LOGIC_ERROR));
EXPECT_STREQ("INVALID_HEADER_CHARACTER",
BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::INVALID_HEADER_CHARACTER));
EXPECT_STREQ("UNKNOWN_ERROR", BalsaFrameEnums::ErrorCodeToString(
BalsaFrameEnums::NUM_ERROR_CODES));
EXPECT_STREQ("UNKNOWN_ERROR",
BalsaFrameEnums::ErrorCodeToString(
static_cast<BalsaFrameEnums::ErrorCode>(-1)));
for (int i = 0; i < BalsaFrameEnums::NUM_ERROR_CODES; ++i) {
EXPECT_STRNE("UNKNOWN_ERROR",
BalsaFrameEnums::ErrorCodeToString(
static_cast<BalsaFrameEnums::ErrorCode>(i)));
}
}
class FakeHeaders {
public:
struct KeyValuePair {
KeyValuePair(const std::string& key, const std::string& value)
: key(key), value(value) {}
KeyValuePair() {}
std::string key;
std::string value;
};
typedef std::vector<KeyValuePair> KeyValuePairs;
KeyValuePairs key_value_pairs_;
bool operator==(const FakeHeaders& other) const {
if (key_value_pairs_.size() != other.key_value_pairs_.size()) {
return false;
}
for (KeyValuePairs::size_type i = 0; i < key_value_pairs_.size(); ++i) {
if (key_value_pairs_[i].key != other.key_value_pairs_[i].key) {
return false;
}
if (key_value_pairs_[i].value != other.key_value_pairs_[i].value) {
return false;
}
}
return true;
}
void AddKeyValue(const std::string& key, const std::string& value) {
key_value_pairs_.push_back(KeyValuePair(key, value));
}
};
class BalsaVisitorMock : public BalsaVisitorInterface {
public:
~BalsaVisitorMock() override = default;
void ProcessHeaders(const BalsaHeaders& headers) override {
FakeHeaders fake_headers;
GenerateFakeHeaders(headers, &fake_headers);
ProcessHeaders(fake_headers);
}
void OnTrailers(std::unique_ptr<BalsaHeaders> trailers) override {
FakeHeaders fake_trailers;
GenerateFakeHeaders(*trailers, &fake_trailers);
OnTrailers(fake_trailers);
}
MOCK_METHOD(void, OnRawBodyInput, (absl::string_view input), (override));
MOCK_METHOD(void, OnBodyChunkInput, (absl::string_view input), (override));
MOCK_METHOD(void, OnHeaderInput, (absl::string_view input), (override));
MOCK_METHOD(void, OnTrailerInput, (absl::string_view input), (override));
MOCK_METHOD(void, ProcessHeaders, (const FakeHeaders& headers));
MOCK_METHOD(void, OnTrailers, (const FakeHeaders& trailers));
MOCK_METHOD(void, OnRequestFirstLineInput,
(absl::string_view line_input, absl::string_view method_input,
absl::string_view request_uri, absl::string_view version_input),
(override));
MOCK_METHOD(void, OnResponseFirstLineInput,
(absl::string_view line_input, absl::string_view version_input,
absl::string_view status_input, absl::string_view reason_input),
(override));
MOCK_METHOD(void, OnChunkLength, (size_t length), (override));
MOCK_METHOD(void, OnChunkExtensionInput, (absl::string_view input),
(override));
MOCK_METHOD(void, OnInterimHeaders, (std::unique_ptr<BalsaHeaders> headers),
(override));
MOCK_METHOD(void, ContinueHeaderDone, (), (override));
MOCK_METHOD(void, HeaderDone, (), (override));
MOCK_METHOD(void, MessageDone, (), (override));
MOCK_METHOD(void, HandleError, (BalsaFrameEnums::ErrorCode error_code),
(override));
MOCK_METHOD(void, HandleWarning, (BalsaFrameEnums::ErrorCode error_code),
(override));
private:
static void GenerateFakeHeaders(const BalsaHeaders& headers,
FakeHeaders* fake_headers) {
for (const auto& line : headers.lines()) {
fake_headers->AddKeyValue(std::string(line.first),
std::string(line.second));
}
}
};
class HTTPBalsaFrameTest : public QuicheTest {
protected:
void SetUp() override {
balsa_frame_.set_balsa_headers(&headers_);
balsa_frame_.set_balsa_visitor(&visitor_mock_);
balsa_frame_.set_is_request(true);
balsa_frame_.EnableTrailers();
}
void VerifyFirstLineParsing(const std::string& firstline,
BalsaFrameEnums::ErrorCode error_code) {
balsa_frame_.ProcessInput(firstline.data(), firstline.size());
EXPECT_EQ(error_code, balsa_frame_.ErrorCode());
}
BalsaHeaders headers_;
BalsaFrame balsa_frame_;
NiceMock<BalsaVisitorMock> visitor_mock_;
};
TEST_F(HTTPBalsaFrameTest, TestHeaderFramingFound) {
EXPECT_EQ(0, BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, ' '));
EXPECT_EQ(0, BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, '\r'));
EXPECT_EQ(0, BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, '\n'));
EXPECT_EQ(0, BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, '\r'));
EXPECT_EQ(BalsaFrame::kValidTerm1,
BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, '\n'));
EXPECT_EQ(0, BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, '\t'));
EXPECT_EQ(0, BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, '\n'));
EXPECT_EQ(0, BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, '\r'));
EXPECT_EQ(BalsaFrame::kValidTerm1,
BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, '\n'));
EXPECT_EQ(0, BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, 'a'));
EXPECT_EQ(0, BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, '\r'));
EXPECT_EQ(0, BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, '\n'));
EXPECT_EQ(BalsaFrame::kValidTerm2,
BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, '\n'));
EXPECT_EQ(0, BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, '1'));
EXPECT_EQ(0, BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, '\n'));
EXPECT_EQ(BalsaFrame::kValidTerm2,
BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, '\n'));
EXPECT_EQ(0, BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, ':'));
EXPECT_EQ(0, BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, '\r'));
EXPECT_EQ(0, BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, '\r'));
EXPECT_EQ(0, BalsaFrameTestPeer::HeaderFramingFound(&balsa_frame_, '\n'));
}
TEST_F(HTTPBalsaFrameTest, MissingColonInTrailer) {
const absl::string_view trailer = "kv\r\n\r\n";
BalsaFrame::Lines lines;
lines.push_back({0, 4});
lines.push_back({4, trailer.length()});
BalsaHeaders trailers;
BalsaHeadersTestPeer::WriteFromFramer(&trailers, trailer.data(),
trailer.length());
BalsaFrameTestPeer::FindColonsAndParseIntoKeyValue(
&balsa_frame_, lines, true , &trailers);
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::TRAILER_MISSING_COLON, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, FindColonsAndParseIntoKeyValueInTrailer) {
const absl::string_view trailer_line1 = "Fraction: 0.23\r\n";
const absl::string_view trailer_line2 = "Some:junk \r\n";
const absl::string_view trailer_line3 = "\r\n";
const std::string trailer =
absl::StrCat(trailer_line1, trailer_line2, trailer_line3);
BalsaFrame::Lines lines;
lines.push_back({0, trailer_line1.length()});
lines.push_back({trailer_line1.length(),
trailer_line1.length() + trailer_line2.length()});
lines.push_back(
{trailer_line1.length() + trailer_line2.length(), trailer.length()});
BalsaHeaders trailers;
BalsaHeadersTestPeer::WriteFromFramer(&trailers, trailer.data(),
trailer.length());
BalsaFrameTestPeer::FindColonsAndParseIntoKeyValue(
&balsa_frame_, lines, true , &trailers);
EXPECT_FALSE(balsa_frame_.Error());
absl::string_view fraction = trailers.GetHeader("Fraction");
EXPECT_EQ("0.23", fraction);
absl::string_view some = trailers.GetHeader("Some");
EXPECT_EQ("junk", some);
}
TEST_F(HTTPBalsaFrameTest, InvalidTrailer) {
const absl::string_view trailer_line1 = "Fraction : 0.23\r\n";
const absl::string_view trailer_line2 = "Some\t :junk \r\n";
const absl::string_view trailer_line3 = "\r\n";
const std::string trailer =
absl::StrCat(trailer_line1, trailer_line2, trailer_line3);
BalsaFrame::Lines lines;
lines.push_back({0, trailer_line1.length()});
lines.push_back({trailer_line1.length(),
trailer_line1.length() + trailer_line2.length()});
lines.push_back(
{trailer_line1.length() + trailer_line2.length(), trailer.length()});
BalsaHeaders trailers;
BalsaHeadersTestPeer::WriteFromFramer(&trailers, trailer.data(),
trailer.length());
BalsaFrameTestPeer::FindColonsAndParseIntoKeyValue(
&balsa_frame_, lines, true , &trailers);
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::INVALID_TRAILER_NAME_CHARACTER,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, OneCharacterFirstLineParsedAsExpected) {
VerifyFirstLineParsing(
"a\r\n\r\n", BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_REQUEST_METHOD);
}
TEST_F(HTTPBalsaFrameTest,
OneCharacterFirstLineWithWhitespaceParsedAsExpected) {
VerifyFirstLineParsing(
"a \r\n\r\n", BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_REQUEST_METHOD);
}
TEST_F(HTTPBalsaFrameTest, WhitespaceOnlyFirstLineIsNotACompleteHeader) {
VerifyFirstLineParsing(" \n\n", BalsaFrameEnums::NO_REQUEST_LINE_IN_REQUEST);
}
TEST(HTTPBalsaFrame, RequestFirstLineParsedCorrectly) {
const char* request_tokens[3] = {"GET", "/jjsdjrqk", "HTTP/1.0"};
FirstLineParsedCorrectlyHelper(request_tokens, 0, true, " ");
FirstLineParsedCorrectlyHelper(request_tokens, 0, true, "\t");
FirstLineParsedCorrectlyHelper(request_tokens, 0, true, "\t ");
FirstLineParsedCorrectlyHelper(request_tokens, 0, true, " \t");
FirstLineParsedCorrectlyHelper(request_tokens, 0, true, " \t \t ");
}
TEST(HTTPBalsaFrame, RequestLineSanitizedProperly) {
SCOPED_TRACE("Testing that the request line is properly sanitized.");
using enum HttpValidationPolicy::FirstLineValidationOption;
using FirstLineValidationOption =
HttpValidationPolicy::FirstLineValidationOption;
struct TestCase {
const absl::string_view input;
const absl::string_view parsed;
FirstLineValidationOption option;
BalsaFrameEnums::ErrorCode expected_error;
};
const std::vector<TestCase> cases = {
{"GET / HTTP/1.1\r\n", "GET / HTTP/1.1", NONE,
BalsaFrameEnums::BALSA_NO_ERROR},
{"GET / HTTP/1.1\r\n", "GET / HTTP/1.1", SANITIZE,
BalsaFrameEnums::BALSA_NO_ERROR},
{"GET / HTTP/1.1\r\n", "GET / HTTP/1.1", REJECT,
BalsaFrameEnums::BALSA_NO_ERROR},
{"GET /\rHTTP/1.1\r\n", "GET /\rHTTP/1.1", NONE,
BalsaFrameEnums::BALSA_NO_ERROR},
{"GET /\rHTTP/1.1\r\n", "GET / HTTP/1.1", SANITIZE,
BalsaFrameEnums::BALSA_NO_ERROR},
{"GET /\rHTTP/1.1\r\n", "", REJECT,
BalsaFrameEnums::INVALID_WS_IN_REQUEST_LINE},
{"GET \t/ HTTP/1.1\r\n", "GET \t/ HTTP/1.1", NONE,
BalsaFrameEnums::BALSA_NO_ERROR},
{"GET \t/ HTTP/1.1\r\n", "GET / HTTP/1.1", SANITIZE,
BalsaFrameEnums::BALSA_NO_ERROR},
{"GET \t/ HTTP/1.1\r\n", "", REJECT,
BalsaFrameEnums::INVALID_WS_IN_REQUEST_LINE},
{"GET \t/\rHTTP/1.1 \r\n", "GET \t/\rHTTP/1.1", NONE,
BalsaFrameEnums::BALSA_NO_ERROR},
{"GET \t/\rHTTP/1.1 \r\n", "GET / HTTP/1.1", SANITIZE,
BalsaFrameEnums::BALSA_NO_ERROR},
{"GET \t/\rHTTP/1.1 \r\n", "", REJECT,
BalsaFrameEnums::INVALID_WS_IN_REQUEST_LINE},
};
const absl::string_view kHeaderLineAndEnding = "Foo: bar\r\n\r\n";
for (auto& [firstline, parsed, ws_option, expected_error] : cases) {
SCOPED_TRACE(
absl::StrCat("Input: ", absl::CEscape(firstline),
" Expected output: ", absl::CEscape(parsed),
" whitespace option: ", static_cast<int>(ws_option)));
const std::string input = absl::StrCat(firstline, kHeaderLineAndEnding);
BalsaHeaders headers;
BalsaFrame framer;
HttpValidationPolicy policy;
policy.sanitize_cr_tab_in_first_line = ws_option;
framer.set_http_validation_policy(policy);
framer.set_is_request(true);
framer.set_balsa_headers(&headers);
framer.ProcessInput(input.data(), input.size());
EXPECT_EQ(headers.first_line(), parsed);
EXPECT_EQ(framer.ErrorCode(), expected_error);
}
}
TEST_F(HTTPBalsaFrameTest, NonnumericResponseCode) {
balsa_frame_.set_is_request(false);
VerifyFirstLineParsing("HTTP/1.1 0x3 Digits only\r\n\r\n",
BalsaFrameEnums::FAILED_CONVERTING_STATUS_CODE_TO_INT);
EXPECT_EQ("HTTP/1.1 0x3 Digits only", headers_.first_line());
}
TEST_F(HTTPBalsaFrameTest, NegativeResponseCode) {
balsa_frame_.set_is_request(false);
VerifyFirstLineParsing("HTTP/1.1 -11 No sign allowed\r\n\r\n",
BalsaFrameEnums::FAILED_CONVERTING_STATUS_CODE_TO_INT);
EXPECT_EQ("HTTP/1.1 -11 No sign allowed", headers_.first_line());
}
TEST_F(HTTPBalsaFrameTest, WithoutTrailingWhitespace) {
balsa_frame_.set_is_request(false);
VerifyFirstLineParsing(
"HTTP/1.1 101\r\n\r\n",
BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_RESPONSE_STATUSCODE);
EXPECT_EQ("HTTP/1.1 101", headers_.first_line());
}
TEST_F(HTTPBalsaFrameTest, TrailingWhitespace) {
balsa_frame_.set_is_request(false);
std::string firstline = "HTTP/1.1 101 \r\n\r\n";
balsa_frame_.ProcessInput(firstline.data(), firstline.size());
EXPECT_EQ("HTTP/1.1 101 ", headers_.first_line());
}
TEST(HTTPBalsaFrame, ResponseFirstLineParsedCorrectly) {
const char* response_tokens[3] = {"HTTP/1.1", "200", "A reason\tphrase"};
FirstLineParsedCorrectlyHelper(response_tokens, 200, false, " ");
FirstLineParsedCorrectlyHelper(response_tokens, 200, false, "\t");
FirstLineParsedCorrectlyHelper(response_tokens, 200, false, "\t ");
FirstLineParsedCorrectlyHelper(response_tokens, 200, false, " \t");
FirstLineParsedCorrectlyHelper(response_tokens, 200, false, " \t \t ");
response_tokens[1] = "312";
FirstLineParsedCorrectlyHelper(response_tokens, 312, false, " ");
FirstLineParsedCorrectlyHelper(response_tokens, 312, false, "\t");
FirstLineParsedCorrectlyHelper(response_tokens, 312, false, "\t ");
FirstLineParsedCorrectlyHelper(response_tokens, 312, false, " \t");
FirstLineParsedCorrectlyHelper(response_tokens, 312, false, " \t \t ");
response_tokens[1] = "4242";
FirstLineParsedCorrectlyHelper(response_tokens, 4242, false, " ");
FirstLineParsedCorrectlyHelper(response_tokens, 4242, false, "\t");
FirstLineParsedCorrectlyHelper(response_tokens, 4242, false, "\t ");
FirstLineParsedCorrectlyHelper(response_tokens, 4242, false, " \t");
FirstLineParsedCorrectlyHelper(response_tokens, 4242, false, " \t \t ");
}
TEST(HTTPBalsaFrame, StatusLineSanitizedProperly) {
SCOPED_TRACE("Testing that the status line is properly sanitized.");
using enum HttpValidationPolicy::FirstLineValidationOption;
using FirstLineValidationOption =
HttpValidationPolicy::FirstLineValidationOption;
struct TestCase {
const absl::string_view input;
const absl::string_view parsed;
FirstLineValidationOption option;
BalsaFrameEnums::ErrorCode expected_error;
};
const std::vector<TestCase> cases = {
{"HTTP/1.1 200 OK\r\n", "HTTP/1.1 200 OK", NONE,
BalsaFrameEnums::BALSA_NO_ERROR},
{"HTTP/1.1 200 OK\r\n", "HTTP/1.1 200 OK", SANITIZE,
BalsaFrameEnums::BALSA_NO_ERROR},
{"HTTP/1.1 200 OK\r\n", "HTTP/1.1 200 OK", REJECT,
BalsaFrameEnums::BALSA_NO_ERROR},
{"HTTP/1.1 200\rOK\r\n", "HTTP/1.1 200\rOK", NONE,
BalsaFrameEnums::BALSA_NO_ERROR},
{"HTTP/1.1 200\rOK\r\n", "HTTP/1.1 200 OK", SANITIZE,
BalsaFrameEnums::BALSA_NO_ERROR},
{"HTTP/1.1 200\rOK\r\n", "", REJECT,
BalsaFrameEnums::INVALID_WS_IN_STATUS_LINE},
{"HTTP/1.1 \t200 OK\r\n", "HTTP/1.1 \t200 OK", NONE,
BalsaFrameEnums::BALSA_NO_ERROR},
{"HTTP/1.1 \t200 OK\r\n", "HTTP/1.1 200 OK", SANITIZE,
BalsaFrameEnums::BALSA_NO_ERROR},
{"HTTP/1.1 \t200 OK\r\n", "", REJECT,
BalsaFrameEnums::INVALID_WS_IN_STATUS_LINE},
{"HTTP/1.1 \t200\rOK \r\n", "HTTP/1.1 \t200\rOK", NONE,
BalsaFrameEnums::BALSA_NO_ERROR},
{"HTTP/1.1 \t200\rOK \r\n", "HTTP/1.1 200 OK", SANITIZE,
BalsaFrameEnums::BALSA_NO_ERROR},
{"HTTP/1.1 \t200\rOK \r\n", "", REJECT,
BalsaFrameEnums::INVALID_WS_IN_STATUS_LINE},
};
const absl::string_view kHeaderLineAndEnding =
"Foo: bar\r\nContent-Length: 0\r\n\r\n";
for (auto& [firstline, parsed, ws_option, expected_error] : cases) {
SCOPED_TRACE(
absl::StrCat("Input: ", absl::CEscape(firstline),
" Expected output: ", absl::CEscape(parsed),
" whitespace option: ", static_cast<int>(ws_option)));
const std::string input = absl::StrCat(firstline, kHeaderLineAndEnding);
BalsaHeaders headers;
BalsaFrame framer;
HttpValidationPolicy policy;
policy.sanitize_cr_tab_in_first_line = ws_option;
framer.set_http_validation_policy(policy);
framer.set_is_request(false);
framer.set_balsa_headers(&headers);
framer.ProcessInput(input.data(), input.size());
EXPECT_EQ(headers.first_line(), parsed);
EXPECT_EQ(framer.ErrorCode(), expected_error);
}
}
void HeaderLineTestHelper(const char* firstline, bool is_request,
const std::pair<std::string, std::string>* headers,
size_t headers_len, const char* colon,
const char* line_ending) {
BalsaHeaders balsa_headers;
BalsaFrame framer;
framer.set_is_request(is_request);
framer.set_balsa_headers(&balsa_headers);
std::string message =
CreateMessage(firstline, headers, headers_len, colon, line_ending, "");
SCOPED_TRACE(EscapeString(message));
size_t bytes_consumed = framer.ProcessInput(message.data(), message.size());
EXPECT_EQ(message.size(), bytes_consumed);
VerifyHeaderLines(headers, headers_len, *framer.headers());
}
TEST(HTTPBalsaFrame, RequestLinesParsedProperly) {
SCOPED_TRACE("Testing that lines are properly parsed.");
const char firstline[] = "GET / HTTP/1.1\r\n";
const std::pair<std::string, std::string> headers[] = {
std::pair<std::string, std::string>("foo", "bar"),
std::pair<std::string, std::string>("duck", "water"),
std::pair<std::string, std::string>("goose", "neck"),
std::pair<std::string, std::string>("key_is_fine",
"value:includes:colons"),
std::pair<std::string, std::string>("trucks",
"along\rvalue\rincluding\rslash\rrs"),
std::pair<std::string, std::string>("monster", "truck"),
std::pair<std::string, std::string>("another_key", ":colons in value"),
std::pair<std::string, std::string>("another_key", "colons in value:"),
std::pair<std::string, std::string>("another_key",
"value includes\r\n continuation"),
std::pair<std::string, std::string>("key_without_continuations",
"multiple\n in\r\n the\n value"),
std::pair<std::string, std::string>("key_without_value",
""),
std::pair<std::string, std::string>("",
"value without key"),
std::pair<std::string, std::string>("", ""),
std::pair<std::string, std::string>("normal_key", "normal_value"),
};
const size_t headers_len = ABSL_ARRAYSIZE(headers);
HeaderLineTestHelper(firstline, true, headers, headers_len, ":", "\n");
HeaderLineTestHelper(firstline, true, headers, headers_len, ": ", "\n");
HeaderLineTestHelper(firstline, true, headers, headers_len, ": ", "\r\n");
HeaderLineTestHelper(firstline, true, headers, headers_len, ":\t", "\n");
HeaderLineTestHelper(firstline, true, headers, headers_len, ":\t", "\r\n");
HeaderLineTestHelper(firstline, true, headers, headers_len, ":\t ", "\n");
HeaderLineTestHelper(firstline, true, headers, headers_len, ":\t ", "\r\n");
HeaderLineTestHelper(firstline, true, headers, headers_len, ":\t\t", "\n");
HeaderLineTestHelper(firstline, true, headers, headers_len, ":\t\t", "\r\n");
HeaderLineTestHelper(firstline, true, headers, headers_len, ":\t \t", "\n");
HeaderLineTestHelper(firstline, true, headers, headers_len, ":\t \t", "\r\n");
}
TEST(HTTPBalsaFrame, CarriageReturnIllegalInHeaders) {
HttpValidationPolicy policy{.disallow_lone_cr_in_request_headers = true};
BalsaHeaders balsa_headers;
BalsaFrame framer;
framer.set_is_request(true);
framer.set_balsa_headers(&balsa_headers);
framer.set_http_validation_policy(policy);
framer.set_invalid_chars_level(BalsaFrame::InvalidCharsLevel::kError);
const std::pair<std::string, std::string> headers[] = {
std::pair<std::string, std::string>("foo", "bar"),
std::pair<std::string, std::string>("trucks", "value-has-solo-\r-in it"),
};
std::string message =
CreateMessage("GET / \rHTTP/1.1\r\n", headers, 2, ":", "\r\n", "");
framer.ProcessInput(message.data(), message.size());
EXPECT_EQ(framer.ErrorCode(), BalsaFrameEnums::INVALID_HEADER_CHARACTER);
}
TEST(HTTPBalsaFrame, CarriageReturnIllegalInFirstLineOnInputBoundary) {
HttpValidationPolicy policy{.disallow_lone_cr_in_request_headers = true};
BalsaHeaders balsa_headers;
BalsaFrame framer;
framer.set_is_request(true);
framer.set_balsa_headers(&balsa_headers);
framer.set_http_validation_policy(policy);
framer.set_invalid_chars_level(BalsaFrame::InvalidCharsLevel::kError);
constexpr absl::string_view message1("GET / \r");
constexpr absl::string_view message2("HTTP/1.1\r\n\r\n");
EXPECT_EQ(message1.size(),
framer.ProcessInput(message1.data(), message1.size()));
EXPECT_EQ(message2.size(),
framer.ProcessInput(message2.data(), message2.size()));
EXPECT_EQ(framer.ErrorCode(), BalsaFrameEnums::INVALID_HEADER_CHARACTER);
}
TEST(HTTPBalsaFrame, CarriageReturnIllegalInHeaderValueOnInputBoundary) {
HttpValidationPolicy policy{.disallow_lone_cr_in_request_headers = true};
BalsaHeaders balsa_headers;
BalsaFrame framer;
framer.set_is_request(true);
framer.set_balsa_headers(&balsa_headers);
framer.set_http_validation_policy(policy);
framer.set_invalid_chars_level(BalsaFrame::InvalidCharsLevel::kError);
constexpr absl::string_view message1("GET / HTTP/1.1\r\nfoo: b\r");
constexpr absl::string_view message2("ar\r\n\r\n");
EXPECT_EQ(message1.size(),
framer.ProcessInput(message1.data(), message1.size()));
EXPECT_EQ(message2.size(),
framer.ProcessInput(message2.data(), message2.size()));
EXPECT_EQ(framer.ErrorCode(), BalsaFrameEnums::INVALID_HEADER_CHARACTER);
}
TEST(HTTPBalsaFrame, CarriageReturnIllegalInHeaderKey) {
BalsaHeaders balsa_headers;
BalsaFrame framer;
framer.set_is_request(true);
framer.set_balsa_headers(&balsa_headers);
framer.set_invalid_chars_level(BalsaFrame::InvalidCharsLevel::kError);
const std::pair<std::string, std::string> headers[] = {
std::pair<std::string, std::string>("tru\rcks", "along"),
};
std::string message =
CreateMessage("GET / HTTP/1.1\r\n", headers, 1, ":", "\r\n", "");
framer.ProcessInput(message.data(), message.size());
EXPECT_EQ(framer.ErrorCode(), BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER);
}
TEST(HTTPBalsaFrame, ResponseLinesParsedProperly) {
SCOPED_TRACE("ResponseLineParsedProperly");
const char firstline[] = "HTTP/1.0 200 A reason\tphrase\r\n";
const std::pair<std::string, std::string> headers[] = {
std::pair<std::string, std::string>("foo", "bar"),
std::pair<std::string, std::string>("duck", "water"),
std::pair<std::string, std::string>("goose", "neck"),
std::pair<std::string, std::string>("key_is_fine",
"value:includes:colons"),
std::pair<std::string, std::string>("trucks",
"along\rvalue\rincluding\rslash\rrs"),
std::pair<std::string, std::string>("monster", "truck"),
std::pair<std::string, std::string>("another_key", ":colons in value"),
std::pair<std::string, std::string>("another_key", "colons in value:"),
std::pair<std::string, std::string>("another_key",
"value includes\r\n continuation"),
std::pair<std::string, std::string>("key_includes_no_continuations",
"multiple\n in\r\n the\n value"),
std::pair<std::string, std::string>("key_without_value",
""),
std::pair<std::string, std::string>("",
"value without key"),
std::pair<std::string, std::string>("", ""),
std::pair<std::string, std::string>("normal_key", "normal_value"),
};
const size_t headers_len = ABSL_ARRAYSIZE(headers);
HeaderLineTestHelper(firstline, false, headers, headers_len, ":", "\n");
HeaderLineTestHelper(firstline, false, headers, headers_len, ": ", "\n");
HeaderLineTestHelper(firstline, false, headers, headers_len, ": ", "\r\n");
HeaderLineTestHelper(firstline, false, headers, headers_len, ":\t", "\n");
HeaderLineTestHelper(firstline, false, headers, headers_len, ":\t", "\r\n");
HeaderLineTestHelper(firstline, false, headers, headers_len, ":\t ", "\n");
HeaderLineTestHelper(firstline, false, headers, headers_len, ":\t ", "\r\n");
HeaderLineTestHelper(firstline, false, headers, headers_len, ":\t\t", "\n");
HeaderLineTestHelper(firstline, false, headers, headers_len, ":\t\t", "\r\n");
HeaderLineTestHelper(firstline, false, headers, headers_len, ":\t \t", "\n");
HeaderLineTestHelper(firstline, false, headers, headers_len, ":\t \t",
"\r\n");
}
void WhitespaceHeaderTestHelper(
const std::string& message, bool is_request,
BalsaFrameEnums::ErrorCode expected_error_code) {
BalsaHeaders balsa_headers;
BalsaFrame framer;
framer.set_is_request(is_request);
framer.set_balsa_headers(&balsa_headers);
SCOPED_TRACE(EscapeString(message));
size_t bytes_consumed = framer.ProcessInput(message.data(), message.size());
EXPECT_EQ(message.size(), bytes_consumed);
if (expected_error_code == BalsaFrameEnums::BALSA_NO_ERROR) {
EXPECT_EQ(false, framer.Error());
} else {
EXPECT_EQ(true, framer.Error());
}
EXPECT_EQ(expected_error_code, framer.ErrorCode());
}
TEST(HTTPBalsaFrame, WhitespaceInRequestsProcessedProperly) {
SCOPED_TRACE(
"Test that a request header with a line with spaces and no "
"data generates an error.");
WhitespaceHeaderTestHelper(
"GET / HTTP/1.1\r\n"
" \r\n"
"\r\n",
true, BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER);
WhitespaceHeaderTestHelper(
"GET / HTTP/1.1\r\n"
" \r\n"
"test: test\r\n"
"\r\n",
true, BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER);
SCOPED_TRACE("Test proper handling for line continuation in requests.");
WhitespaceHeaderTestHelper(
"GET / HTTP/1.1\r\n"
"test: test\r\n"
" continued\r\n"
"\r\n",
true, BalsaFrameEnums::BALSA_NO_ERROR);
WhitespaceHeaderTestHelper(
"GET / HTTP/1.1\r\n"
"test: test\r\n"
" \r\n"
"\r\n",
true, BalsaFrameEnums::BALSA_NO_ERROR);
SCOPED_TRACE(
"Test a confusing and ambiguous case: is it a line continuation or a new "
"header field?");
WhitespaceHeaderTestHelper(
"GET / HTTP/1.1\r\n"
"test: test\r\n"
" confusing:continued\r\n"
"\r\n",
true, BalsaFrameEnums::BALSA_NO_ERROR);
}
TEST(HTTPBalsaFrame, WhitespaceInResponsesProcessedProperly) {
SCOPED_TRACE(
"Test that a response header with a line with spaces and no "
"data generates an error.");
WhitespaceHeaderTestHelper(
"HTTP/1.0 200 Reason\r\n"
" \r\nContent-Length: 0\r\n"
"\r\n",
false, BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER);
SCOPED_TRACE("Test proper handling for line continuation in responses.");
WhitespaceHeaderTestHelper(
"HTTP/1.0 200 Reason\r\n"
"test: test\r\n"
" continued\r\n"
"Content-Length: 0\r\n"
"\r\n",
false, BalsaFrameEnums::BALSA_NO_ERROR);
WhitespaceHeaderTestHelper(
"HTTP/1.0 200 Reason\r\n"
"test: test\r\n"
" \r\n"
"Content-Length: 0\r\n"
"\r\n",
false, BalsaFrameEnums::BALSA_NO_ERROR);
SCOPED_TRACE(
"Test a confusing and ambiguous case: is it a line continuation or a new "
"header field?");
WhitespaceHeaderTestHelper(
"HTTP/1.0 200 Reason\r\n"
"test: test\r\n"
" confusing:continued\r\n"
"Content-Length: 0\r\n"
"\r\n",
false, BalsaFrameEnums::BALSA_NO_ERROR);
}
TEST_F(HTTPBalsaFrameTest, VisitorInvokedProperlyForTrivialRequest) {
std::string message = "GET /foobar HTTP/1.0\r\n\n";
FakeHeaders fake_headers;
{
InSequence s;
EXPECT_CALL(visitor_mock_,
OnRequestFirstLineInput("GET /foobar HTTP/1.0", "GET",
"/foobar", "HTTP/1.0"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(message));
ASSERT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
}
TEST_F(HTTPBalsaFrameTest, VisitorInvokedProperlyForRequestWithBlankLines) {
std::string message = "\n\n\r\n\nGET /foobar HTTP/1.0\r\n\n";
FakeHeaders fake_headers;
{
InSequence s1;
EXPECT_CALL(visitor_mock_,
OnRequestFirstLineInput("GET /foobar HTTP/1.0", "GET",
"/foobar", "HTTP/1.0"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput("GET /foobar HTTP/1.0\r\n\n"));
ASSERT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
}
TEST_F(HTTPBalsaFrameTest,
VisitorInvokedProperlyForRequestWithSplitBlankLines) {
std::string blanks =
"\n"
"\n"
"\r\n"
"\n";
std::string header_input = "GET /foobar HTTP/1.0\r\n\n";
FakeHeaders fake_headers;
{
InSequence s1;
EXPECT_CALL(visitor_mock_,
OnRequestFirstLineInput("GET /foobar HTTP/1.0", "GET",
"/foobar", "HTTP/1.0"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput("GET /foobar HTTP/1.0\r\n\n"));
ASSERT_EQ(blanks.size(),
balsa_frame_.ProcessInput(blanks.data(), blanks.size()));
ASSERT_EQ(header_input.size(), balsa_frame_.ProcessInput(
header_input.data(), header_input.size()));
}
TEST_F(HTTPBalsaFrameTest,
VisitorInvokedProperlyForRequestWithZeroContentLength) {
std::string message =
"PUT /search?q=fo HTTP/1.1\n"
"content-length: 0 \n"
"\n";
FakeHeaders fake_headers;
fake_headers.AddKeyValue("content-length", "0");
{
InSequence s1;
EXPECT_CALL(visitor_mock_,
OnRequestFirstLineInput("PUT /search?q=fo HTTP/1.1", "PUT",
"/search?q=fo", "HTTP/1.1"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(message));
ASSERT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
}
TEST_F(HTTPBalsaFrameTest,
VisitorInvokedProperlyForRequestWithMissingContentLength) {
std::string message =
"PUT /search?q=fo HTTP/1.1\n"
"\n";
auto error_code =
BalsaFrameEnums::BalsaFrameEnums::REQUIRED_BODY_BUT_NO_CONTENT_LENGTH;
EXPECT_CALL(visitor_mock_, HandleError(error_code));
balsa_frame_.ProcessInput(message.data(), message.size());
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(error_code, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, ContentLengthNotRequired) {
HttpValidationPolicy http_validation_policy;
http_validation_policy.require_content_length_if_body_required = false;
balsa_frame_.set_http_validation_policy(http_validation_policy);
std::string message =
"PUT /search?q=fo HTTP/1.1\n"
"\n";
balsa_frame_.ProcessInput(message.data(), message.size());
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
}
TEST_F(HTTPBalsaFrameTest,
VisitorInvokedProperlyForPermittedMissingContentLength) {
std::string message =
"PUT /search?q=fo HTTP/1.1\n"
"\n";
FakeHeaders fake_headers;
{
InSequence s1;
EXPECT_CALL(visitor_mock_,
OnRequestFirstLineInput("PUT /search?q=fo HTTP/1.1", "PUT",
"/search?q=fo", "HTTP/1.1"));
}
ASSERT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
}
TEST_F(HTTPBalsaFrameTest, NothingBadHappensWhenNothingInConnectionLine) {
std::string message =
"PUT \t /search?q=fo \t HTTP/1.1 \t \r\n"
"Connection:\r\n"
"content-length: 0\r\n"
"\r\n";
FakeHeaders fake_headers;
fake_headers.AddKeyValue("Connection", "");
fake_headers.AddKeyValue("content-length", "0");
{
InSequence s1;
EXPECT_CALL(visitor_mock_,
OnRequestFirstLineInput("PUT \t /search?q=fo \t HTTP/1.1",
"PUT", "/search?q=fo", "HTTP/1.1"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(message));
ASSERT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
}
TEST_F(HTTPBalsaFrameTest, NothingBadHappensWhenOnlyCommentsInConnectionLine) {
std::string message =
"PUT \t /search?q=fo \t HTTP/1.1 \t \r\n"
"Connection: ,,,,,,,,\r\n"
"content-length: 0\r\n"
"\r\n";
FakeHeaders fake_headers;
fake_headers.AddKeyValue("Connection", ",,,,,,,,");
fake_headers.AddKeyValue("content-length", "0");
{
InSequence s1;
EXPECT_CALL(visitor_mock_,
OnRequestFirstLineInput("PUT \t /search?q=fo \t HTTP/1.1",
"PUT", "/search?q=fo", "HTTP/1.1"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(message));
ASSERT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
}
TEST_F(HTTPBalsaFrameTest,
VisitorInvokedProperlyForRequestWithZeroContentLengthMk2) {
std::string message =
"PUT \t /search?q=fo \t HTTP/1.1 \t \r\n"
"Connection: \t close \t\r\n"
"content-length: \t\t 0 \t\t \r\n"
"\r\n";
FakeHeaders fake_headers;
fake_headers.AddKeyValue("Connection", "close");
fake_headers.AddKeyValue("content-length", "0");
{
InSequence s1;
EXPECT_CALL(visitor_mock_,
OnRequestFirstLineInput("PUT \t /search?q=fo \t HTTP/1.1",
"PUT", "/search?q=fo", "HTTP/1.1"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(message));
ASSERT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
}
TEST_F(HTTPBalsaFrameTest, NothingBadHappensWhenNoVisitorIsAssigned) {
std::string headers =
"GET / HTTP/1.1\r\n"
"Connection: close\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
std::string chunks =
"3\r\n"
"123\r\n"
"0\r\n";
std::string trailer =
"crass: monkeys\r\n"
"funky: monkeys\r\n"
"\r\n";
balsa_frame_.set_balsa_visitor(nullptr);
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
ASSERT_EQ(chunks.size(),
balsa_frame_.ProcessInput(chunks.data(), chunks.size()));
EXPECT_EQ(trailer.size(),
balsa_frame_.ProcessInput(trailer.data(), trailer.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, RequestWithTrailers) {
std::string headers =
"GET / HTTP/1.1\r\n"
"Connection: close\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
std::string chunks =
"3\r\n"
"123\r\n"
"0\r\n";
std::string trailer =
"crass: monkeys\r\n"
"funky: monkeys\r\n"
"\r\n";
InSequence s;
FakeHeaders fake_headers;
fake_headers.AddKeyValue("Connection", "close");
fake_headers.AddKeyValue("transfer-encoding", "chunked");
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
testing::Mock::VerifyAndClearExpectations(&visitor_mock_);
ASSERT_EQ(chunks.size(),
balsa_frame_.ProcessInput(chunks.data(), chunks.size()));
FakeHeaders fake_trailers;
fake_trailers.AddKeyValue("crass", "monkeys");
fake_trailers.AddKeyValue("funky", "monkeys");
EXPECT_CALL(visitor_mock_, OnTrailers(fake_trailers));
EXPECT_CALL(visitor_mock_, OnTrailerInput(_)).Times(AtLeast(1));
EXPECT_EQ(trailer.size(),
balsa_frame_.ProcessInput(trailer.data(), trailer.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, NothingBadHappensWhenNoVisitorIsAssignedInResponse) {
std::string headers =
"HTTP/1.1 502 Bad Gateway\r\n"
"Connection: close\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
std::string chunks =
"3\r\n"
"123\r\n"
"0\r\n";
std::string trailer =
"crass: monkeys\r\n"
"funky: monkeys\r\n"
"\r\n";
balsa_frame_.set_is_request(false);
balsa_frame_.set_balsa_visitor(nullptr);
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
ASSERT_EQ(chunks.size(),
balsa_frame_.ProcessInput(chunks.data(), chunks.size()));
EXPECT_EQ(trailer.size(),
balsa_frame_.ProcessInput(trailer.data(), trailer.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, TransferEncodingIdentityIsIgnored) {
std::string headers =
"GET / HTTP/1.1\r\n"
"Connection: close\r\n"
"transfer-encoding: identity\r\n"
"content-length: 10\r\n"
"\r\n";
std::string body = "1234567890";
std::string message = (headers + body);
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
ASSERT_EQ(body.size(), balsa_frame_.ProcessInput(body.data(), body.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest,
NothingBadHappensWhenAVisitorIsChangedToNULLInMidParsing) {
std::string headers =
"GET / HTTP/1.1\r\n"
"Connection: close\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
std::string chunks =
"3\r\n"
"123\r\n"
"0\r\n";
std::string trailer =
"crass: monkeys\r\n"
"funky: monkeys\r\n"
"\n";
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
balsa_frame_.set_balsa_visitor(nullptr);
ASSERT_EQ(chunks.size(),
balsa_frame_.ProcessInput(chunks.data(), chunks.size()));
ASSERT_EQ(trailer.size(),
balsa_frame_.ProcessInput(trailer.data(), trailer.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest,
NothingBadHappensWhenAVisitorIsChangedToNULLInMidParsingInTrailer) {
std::string headers =
"HTTP/1.1 503 Server Not Available\r\n"
"Connection: close\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
std::string chunks =
"3\r\n"
"123\r\n"
"0\r\n";
std::string trailer =
"crass: monkeys\r\n"
"funky: monkeys\r\n"
"\n";
balsa_frame_.set_is_request(false);
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
balsa_frame_.set_balsa_visitor(nullptr);
ASSERT_EQ(chunks.size(),
balsa_frame_.ProcessInput(chunks.data(), chunks.size()));
ASSERT_EQ(trailer.size(),
balsa_frame_.ProcessInput(trailer.data(), trailer.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest,
NothingBadHappensWhenNoVisitorAssignedAndChunkingErrorOccurs) {
std::string headers =
"GET / HTTP/1.1\r\n"
"Connection: close\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
std::string chunks =
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF\r\n"
"0\r\n";
std::string trailer =
"crass: monkeys\r\n"
"funky: monkeys\r\n"
"\n";
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
balsa_frame_.set_balsa_visitor(nullptr);
EXPECT_GE(chunks.size(),
balsa_frame_.ProcessInput(chunks.data(), chunks.size()));
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::CHUNK_LENGTH_OVERFLOW, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, FramerRecognizesSemicolonAsChunkSizeDelimiter) {
std::string headers =
"GET / HTTP/1.1\r\n"
"Connection: close\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
std::string chunks =
"8; foo=bar\r\n"
"deadbeef\r\n"
"0\r\n"
"\r\n";
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
balsa_frame_.set_balsa_visitor(&visitor_mock_);
EXPECT_CALL(visitor_mock_, OnChunkLength(8));
EXPECT_CALL(visitor_mock_, OnChunkLength(0));
EXPECT_CALL(visitor_mock_, OnChunkExtensionInput("; foo=bar"));
EXPECT_CALL(visitor_mock_, OnChunkExtensionInput(""));
EXPECT_EQ(chunks.size(),
balsa_frame_.ProcessInput(chunks.data(), chunks.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
}
TEST_F(HTTPBalsaFrameTest, NonAsciiCharacterInChunkLength) {
std::string headers =
"GET / HTTP/1.1\r\n"
"Connection: close\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
std::string chunks =
"555\xAB\r\n"
"0\r\n";
std::string trailer =
"crass: monkeys\r\n"
"funky: monkeys\r\n"
"\n";
FakeHeaders fake_headers;
fake_headers.AddKeyValue("Connection", "close");
fake_headers.AddKeyValue("transfer-encoding", "chunked");
auto error_code = BalsaFrameEnums::INVALID_CHUNK_LENGTH;
{
InSequence s1;
EXPECT_CALL(visitor_mock_, OnRequestFirstLineInput("GET / HTTP/1.1", "GET",
"/", "HTTP/1.1"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, OnRawBodyInput("555\xAB"));
EXPECT_CALL(visitor_mock_, HandleError(error_code));
}
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
EXPECT_EQ(strlen("555\xAB"),
balsa_frame_.ProcessInput(chunks.data(), chunks.size()));
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::INVALID_CHUNK_LENGTH, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, VisitorCalledAsExpectedWhenChunkingOverflowOccurs) {
std::string headers =
"GET / HTTP/1.1\r\n"
"Connection: close\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
std::string chunks =
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF\r\n"
"0\r\n";
std::string trailer =
"crass: monkeys\r\n"
"funky: monkeys\r\n"
"\n";
const char* chunk_read_before_overflow = "FFFFFFFFFFFFFFFFF";
FakeHeaders fake_headers;
fake_headers.AddKeyValue("Connection", "close");
fake_headers.AddKeyValue("transfer-encoding", "chunked");
auto error_code = BalsaFrameEnums::CHUNK_LENGTH_OVERFLOW;
{
InSequence s1;
EXPECT_CALL(visitor_mock_, OnRequestFirstLineInput("GET / HTTP/1.1", "GET",
"/", "HTTP/1.1"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, OnRawBodyInput(chunk_read_before_overflow));
EXPECT_CALL(visitor_mock_, HandleError(error_code));
}
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
EXPECT_EQ(strlen(chunk_read_before_overflow),
balsa_frame_.ProcessInput(chunks.data(), chunks.size()));
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::CHUNK_LENGTH_OVERFLOW, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest,
VisitorCalledAsExpectedWhenInvalidChunkLengthOccurs) {
std::string headers =
"GET / HTTP/1.1\r\n"
"Connection: close\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
std::string chunks =
"12z123 \r\n"
"0\r\n";
std::string trailer =
"crass: monkeys\r\n"
"funky: monkeys\r\n"
"\n";
FakeHeaders fake_headers;
fake_headers.AddKeyValue("Connection", "close");
fake_headers.AddKeyValue("transfer-encoding", "chunked");
auto error_code = BalsaFrameEnums::INVALID_CHUNK_LENGTH;
{
InSequence s1;
EXPECT_CALL(visitor_mock_, OnRequestFirstLineInput("GET / HTTP/1.1", "GET",
"/", "HTTP/1.1"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, OnRawBodyInput("12z"));
EXPECT_CALL(visitor_mock_, HandleError(error_code));
}
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
EXPECT_EQ(3u, balsa_frame_.ProcessInput(chunks.data(), chunks.size()));
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::INVALID_CHUNK_LENGTH, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, VisitorInvokedProperlyForRequestWithContentLength) {
std::string message_headers =
"PUT \t /search?q=fo \t HTTP/1.1 \t \r\n"
"content-length: \t\t 20 \t\t \r\n"
"\r\n";
std::string message_body = "12345678901234567890";
std::string message =
std::string(message_headers) + std::string(message_body);
FakeHeaders fake_headers;
fake_headers.AddKeyValue("content-length", "20");
{
InSequence s1;
EXPECT_CALL(visitor_mock_,
OnRequestFirstLineInput("PUT \t /search?q=fo \t HTTP/1.1",
"PUT", "/search?q=fo", "HTTP/1.1"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, OnRawBodyInput(message_body));
EXPECT_CALL(visitor_mock_, OnBodyChunkInput(message_body));
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(message_headers));
ASSERT_EQ(message_headers.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
ASSERT_EQ(message_body.size(),
balsa_frame_.ProcessInput(message.data() + message_headers.size(),
message.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest,
VisitorInvokedProperlyForRequestWithOneCharContentLength) {
std::string message_headers =
"PUT \t /search?q=fo \t HTTP/1.1 \t \r\n"
"content-length: \t\t 2 \t\t \r\n"
"\r\n";
std::string message_body = "12";
std::string message =
std::string(message_headers) + std::string(message_body);
FakeHeaders fake_headers;
fake_headers.AddKeyValue("content-length", "2");
{
InSequence s1;
EXPECT_CALL(visitor_mock_,
OnRequestFirstLineInput("PUT \t /search?q=fo \t HTTP/1.1",
"PUT", "/search?q=fo", "HTTP/1.1"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, OnRawBodyInput(message_body));
EXPECT_CALL(visitor_mock_, OnBodyChunkInput(message_body));
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(message_headers));
ASSERT_EQ(message_headers.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
ASSERT_EQ(message_body.size(),
balsa_frame_.ProcessInput(message.data() + message_headers.size(),
message.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, InvalidChunkExtensionWithCarriageReturn) {
balsa_frame_.set_http_validation_policy(
HttpValidationPolicy{.disallow_lone_cr_in_chunk_extension = true});
std::string message_headers =
"POST /potato?salad=withmayo HTTP/1.1\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
std::string message_body =
"9; bad\rextension\r\n"
"012345678\r\n"
"0\r\n"
"\r\n";
std::string message =
std::string(message_headers) + std::string(message_body);
EXPECT_CALL(visitor_mock_,
HandleError(BalsaFrameEnums::INVALID_CHUNK_EXTENSION));
ASSERT_EQ(message_headers.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
balsa_frame_.ProcessInput(message.data() + message_headers.size(),
message.size());
}
TEST_F(HTTPBalsaFrameTest, ChunkExtensionCarriageReturnLineFeedAtBoundary) {
balsa_frame_.set_http_validation_policy(
HttpValidationPolicy{.disallow_lone_cr_in_chunk_extension = true});
EXPECT_CALL(visitor_mock_, ProcessHeaders(_));
EXPECT_CALL(visitor_mock_, HeaderDone());
constexpr absl::string_view headers(
"POST / HTTP/1.1\r\n"
"transfer-encoding: chunked\r\n\r\n");
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
constexpr absl::string_view body1("3\r");
ASSERT_EQ(body1.size(),
balsa_frame_.ProcessInput(body1.data(), body1.size()));
ASSERT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
constexpr absl::string_view body2(
"\nfoo\r\n"
"0\r\n\r\n");
EXPECT_CALL(visitor_mock_, OnBodyChunkInput("foo"));
EXPECT_CALL(visitor_mock_, MessageDone());
ASSERT_EQ(body2.size(),
balsa_frame_.ProcessInput(body2.data(), body2.size()));
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
}
TEST_F(HTTPBalsaFrameTest, ChunkExtensionLoneCarriageReturnAtBoundary) {
balsa_frame_.set_http_validation_policy(
HttpValidationPolicy{.disallow_lone_cr_in_chunk_extension = true});
EXPECT_CALL(visitor_mock_, ProcessHeaders(_));
EXPECT_CALL(visitor_mock_, HeaderDone());
constexpr absl::string_view headers(
"POST / HTTP/1.1\r\n"
"transfer-encoding: chunked\r\n\r\n");
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
constexpr absl::string_view body1("3\r");
ASSERT_EQ(body1.size(),
balsa_frame_.ProcessInput(body1.data(), body1.size()));
ASSERT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
constexpr absl::string_view body2("a");
EXPECT_EQ(0, balsa_frame_.ProcessInput(body2.data(), body2.size()));
EXPECT_EQ(BalsaFrameEnums::INVALID_CHUNK_EXTENSION, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest,
VisitorInvokedProperlyForRequestWithTransferEncoding) {
std::string message_headers =
"DELETE /search?q=fo \t HTTP/1.1 \t \r\n"
"trAnsfer-eNcoding: chunked\r\n"
"\r\n";
std::string message_body =
"A chunkjed extension \r\n"
"01234567890 more crud including numbers 123123\r\n"
"3f\n"
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n"
"0 last one\r\n"
"\r\n";
std::string message_body_data =
"0123456789"
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
std::string message =
std::string(message_headers) + std::string(message_body);
FakeHeaders fake_headers;
fake_headers.AddKeyValue("trAnsfer-eNcoding", "chunked");
{
InSequence s1;
EXPECT_CALL(visitor_mock_,
OnRequestFirstLineInput("DELETE /search?q=fo \t HTTP/1.1",
"DELETE", "/search?q=fo", "HTTP/1.1"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, OnChunkLength(10));
EXPECT_CALL(visitor_mock_,
OnChunkExtensionInput(" chunkjed extension "));
EXPECT_CALL(visitor_mock_, OnChunkLength(63));
EXPECT_CALL(visitor_mock_, OnChunkExtensionInput(""));
EXPECT_CALL(visitor_mock_, OnChunkLength(0));
EXPECT_CALL(visitor_mock_, OnChunkExtensionInput(" last one"));
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(message_headers));
std::string body_input;
EXPECT_CALL(visitor_mock_, OnRawBodyInput(_))
.WillRepeatedly([&body_input](absl::string_view input) {
absl::StrAppend(&body_input, input);
});
std::string body_data;
EXPECT_CALL(visitor_mock_, OnBodyChunkInput(_))
.WillRepeatedly([&body_data](absl::string_view input) {
absl::StrAppend(&body_data, input);
});
EXPECT_CALL(visitor_mock_, OnTrailerInput(_)).Times(0);
ASSERT_EQ(message_headers.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_EQ(message_body.size(),
balsa_frame_.ProcessInput(message.data() + message_headers.size(),
message.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
EXPECT_EQ(message_body, body_input);
EXPECT_EQ(message_body_data, body_data);
}
TEST_F(HTTPBalsaFrameTest,
VisitorInvokedProperlyForRequestWithTransferEncodingAndTrailers) {
std::string message_headers =
"DELETE /search?q=fo \t HTTP/1.1 \t \r\n"
"trAnsfer-eNcoding: chunked\r\n"
"another_random_header: \r\n"
" \t \n"
" \t includes a continuation\n"
"\r\n";
std::string message_body =
"A chunkjed extension \r\n"
"01234567890 more crud including numbers 123123\r\n"
"3f\n"
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n"
"1 \r\n"
"x \r\n"
"0 last one\r\n";
std::string trailer_data =
"a_trailer_key: and a trailer value\r\n"
"\r\n";
std::string message_body_data =
"0123456789"
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
std::string message = (std::string(message_headers) +
std::string(message_body) + std::string(trailer_data));
FakeHeaders fake_headers;
fake_headers.AddKeyValue("trAnsfer-eNcoding", "chunked");
fake_headers.AddKeyValue("another_random_header", "includes a continuation");
FakeHeaders fake_trailers;
fake_trailers.AddKeyValue("a_trailer_key", "and a trailer value");
{
InSequence s1;
EXPECT_CALL(visitor_mock_,
OnRequestFirstLineInput("DELETE /search?q=fo \t HTTP/1.1",
"DELETE", "/search?q=fo", "HTTP/1.1"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, OnChunkLength(10));
EXPECT_CALL(visitor_mock_, OnChunkLength(63));
EXPECT_CALL(visitor_mock_, OnChunkLength(1));
EXPECT_CALL(visitor_mock_, OnChunkLength(0));
EXPECT_CALL(visitor_mock_, OnTrailers(fake_trailers));
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(message_headers));
std::string body_input;
EXPECT_CALL(visitor_mock_, OnRawBodyInput(_))
.WillRepeatedly([&body_input](absl::string_view input) {
absl::StrAppend(&body_input, input);
});
std::string body_data;
EXPECT_CALL(visitor_mock_, OnBodyChunkInput(_))
.WillRepeatedly([&body_data](absl::string_view input) {
absl::StrAppend(&body_data, input);
});
EXPECT_CALL(visitor_mock_, OnTrailerInput(trailer_data));
EXPECT_CALL(visitor_mock_, OnChunkExtensionInput(_)).Times(AnyNumber());
ASSERT_EQ(message_headers.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_EQ(message_body.size() + trailer_data.size(),
balsa_frame_.ProcessInput(message.data() + message_headers.size(),
message.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
EXPECT_EQ(message_body, body_input);
EXPECT_EQ(message_body_data, body_data);
}
TEST_F(HTTPBalsaFrameTest,
VisitorInvokedProperlyWithRequestFirstLineWarningWithOnlyMethod) {
std::string message = "GET\n";
FakeHeaders fake_headers;
auto error_code = BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_REQUEST_METHOD;
{
InSequence s;
EXPECT_CALL(visitor_mock_, HandleWarning(error_code));
EXPECT_CALL(visitor_mock_, OnRequestFirstLineInput("GET", "GET", "", ""));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(message));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_REQUEST_METHOD,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest,
VisitorInvokedProperlyWithRequestFirstLineWarningWithOnlyMethodAndWS) {
std::string message = "GET \n";
FakeHeaders fake_headers;
auto error_code = BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_REQUEST_METHOD;
{
InSequence s;
EXPECT_CALL(visitor_mock_, HandleWarning(error_code));
EXPECT_CALL(visitor_mock_, OnRequestFirstLineInput("GET ", "GET", "", ""));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(message));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_REQUEST_METHOD,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, AbsoluteFormTargetUri) {
std::string message =
"GET http:
"Host: example.com\r\n"
"\r\n";
balsa_frame_.set_is_request(true);
EXPECT_CALL(visitor_mock_, OnHeaderInput(message));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
EXPECT_EQ("http:
balsa_frame_.headers()->request_uri());
EXPECT_EQ("example.com", balsa_frame_.headers()->GetHeader("host"));
}
TEST_F(HTTPBalsaFrameTest, InvalidAbsoluteFormTargetUri) {
std::string message =
"GET -pwn/index.html HTTP/1.1\r\n"
"Host: example.com\r\n"
"\r\n";
balsa_frame_.set_is_request(true);
EXPECT_CALL(visitor_mock_, OnHeaderInput(message));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.is_valid_target_uri());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
EXPECT_EQ("-pwn/index.html", balsa_frame_.headers()->request_uri());
EXPECT_EQ("example.com", balsa_frame_.headers()->GetHeader("host"));
}
TEST_F(HTTPBalsaFrameTest, RejectInvalidAbsoluteFormTargetUri) {
HttpValidationPolicy http_validation_policy{.disallow_invalid_target_uris =
true};
balsa_frame_.set_http_validation_policy(http_validation_policy);
std::string message =
"GET -pwn/index.html HTTP/1.1\r\n"
"Host: example.com\r\n"
"\r\n";
balsa_frame_.set_is_request(true);
const size_t end_of_first_line = message.find_first_of("\r\n") + 1;
EXPECT_EQ(end_of_first_line,
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::INVALID_TARGET_URI, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, RejectStarForNonOptions) {
HttpValidationPolicy http_validation_policy{.disallow_invalid_target_uris =
true};
balsa_frame_.set_http_validation_policy(http_validation_policy);
std::string message =
"GET * HTTP/1.1\r\n"
"Host: example.com\r\n"
"\r\n";
balsa_frame_.set_is_request(true);
const size_t end_of_first_line = message.find_first_of("\r\n") + 1;
EXPECT_EQ(end_of_first_line,
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::INVALID_TARGET_URI, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, AllowStarForOptions) {
HttpValidationPolicy http_validation_policy{.disallow_invalid_target_uris =
true};
balsa_frame_.set_http_validation_policy(http_validation_policy);
std::string message =
"OPTIONS * HTTP/1.1\r\n"
"Host: example.com\r\n"
"\r\n";
balsa_frame_.set_is_request(true);
EXPECT_CALL(visitor_mock_, OnHeaderInput(message));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
}
TEST_F(HTTPBalsaFrameTest, RejectConnectWithNoPort) {
HttpValidationPolicy http_validation_policy{.disallow_invalid_target_uris =
true};
balsa_frame_.set_http_validation_policy(http_validation_policy);
std::string message =
"CONNECT example.com HTTP/1.1\r\n"
"Host: example.com\r\n"
"\r\n";
balsa_frame_.set_is_request(true);
const size_t end_of_first_line = message.find_first_of("\r\n") + 1;
EXPECT_EQ(end_of_first_line,
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::INVALID_TARGET_URI, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, RejectConnectWithInvalidPort) {
HttpValidationPolicy http_validation_policy{.disallow_invalid_target_uris =
true};
balsa_frame_.set_http_validation_policy(http_validation_policy);
std::string message =
"CONNECT example.com:443z HTTP/1.1\r\n"
"Host: example.com\r\n"
"\r\n";
balsa_frame_.set_is_request(true);
const size_t end_of_first_line = message.find_first_of("\r\n") + 1;
EXPECT_EQ(end_of_first_line,
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::INVALID_TARGET_URI, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, AllowConnectWithValidPort) {
HttpValidationPolicy http_validation_policy{.disallow_invalid_target_uris =
true};
balsa_frame_.set_http_validation_policy(http_validation_policy);
std::string message =
"CONNECT example.com:443 HTTP/1.1\r\n"
"Host: example.com\r\n"
"\r\n";
balsa_frame_.set_is_request(true);
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
}
TEST_F(HTTPBalsaFrameTest,
VisitorInvokedProperlyWithRequestFirstLineWarningWithMethodAndURI) {
std::string message = "GET /uri\n";
FakeHeaders fake_headers;
auto error_code =
BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_REQUEST_REQUEST_URI;
{
InSequence s;
EXPECT_CALL(visitor_mock_, HandleWarning(error_code));
EXPECT_CALL(visitor_mock_,
OnRequestFirstLineInput("GET /uri", "GET", "/uri", ""));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(message));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_REQUEST_REQUEST_URI,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, VisitorInvokedProperlyWithResponseFirstLineError) {
std::string message = "HTTP/1.1\n\n";
FakeHeaders fake_headers;
balsa_frame_.set_is_request(false);
auto error_code = BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_RESPONSE_VERSION;
{
InSequence s;
EXPECT_CALL(visitor_mock_, HandleError(error_code));
EXPECT_CALL(visitor_mock_, OnRequestFirstLineInput).Times(0);
EXPECT_CALL(visitor_mock_, ProcessHeaders(_)).Times(0);
EXPECT_CALL(visitor_mock_, HeaderDone()).Times(0);
EXPECT_CALL(visitor_mock_, MessageDone()).Times(0);
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(_)).Times(0);
EXPECT_GE(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_RESPONSE_VERSION,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, FlagsErrorWithContentLengthOverflow) {
std::string message =
"HTTP/1.0 200 OK\r\n"
"content-length: 9999999999999999999999999999999999999999\n"
"\n";
balsa_frame_.set_is_request(false);
auto error_code = BalsaFrameEnums::UNPARSABLE_CONTENT_LENGTH;
EXPECT_CALL(visitor_mock_, HandleError(error_code));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::UNPARSABLE_CONTENT_LENGTH,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, FlagsErrorWithInvalidResponseCode) {
std::string message =
"HTTP/1.0 x OK\r\n"
"\n";
balsa_frame_.set_is_request(false);
auto error_code = BalsaFrameEnums::FAILED_CONVERTING_STATUS_CODE_TO_INT;
EXPECT_CALL(visitor_mock_, HandleError(error_code));
EXPECT_GE(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::FAILED_CONVERTING_STATUS_CODE_TO_INT,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, FlagsErrorWithOverflowingResponseCode) {
std::string message =
"HTTP/1.0 999999999999999999999999999999999999999 OK\r\n"
"\n";
balsa_frame_.set_is_request(false);
auto error_code = BalsaFrameEnums::FAILED_CONVERTING_STATUS_CODE_TO_INT;
EXPECT_CALL(visitor_mock_, HandleError(error_code));
EXPECT_GE(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::FAILED_CONVERTING_STATUS_CODE_TO_INT,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, FlagsErrorWithInvalidContentLength) {
std::string message =
"HTTP/1.0 200 OK\r\n"
"content-length: xxx\n"
"\n";
balsa_frame_.set_is_request(false);
auto error_code = BalsaFrameEnums::UNPARSABLE_CONTENT_LENGTH;
EXPECT_CALL(visitor_mock_, HandleError(error_code));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::UNPARSABLE_CONTENT_LENGTH,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, FlagsErrorWithNegativeContentLengthValue) {
std::string message =
"HTTP/1.0 200 OK\r\n"
"content-length: -20\n"
"\n";
balsa_frame_.set_is_request(false);
auto error_code = BalsaFrameEnums::UNPARSABLE_CONTENT_LENGTH;
EXPECT_CALL(visitor_mock_, HandleError(error_code));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::UNPARSABLE_CONTENT_LENGTH,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, FlagsErrorWithEmptyContentLengthValue) {
std::string message =
"HTTP/1.0 200 OK\r\n"
"content-length: \n"
"\n";
balsa_frame_.set_is_request(false);
auto error_code = BalsaFrameEnums::UNPARSABLE_CONTENT_LENGTH;
EXPECT_CALL(visitor_mock_, HandleError(error_code));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::UNPARSABLE_CONTENT_LENGTH,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, VisitorInvokedProperlyForTrivialResponse) {
std::string message =
"HTTP/1.0 200 OK\r\n"
"content-length: 0\n"
"\n";
FakeHeaders fake_headers;
fake_headers.AddKeyValue("content-length", "0");
balsa_frame_.set_is_request(false);
{
InSequence s;
EXPECT_CALL(visitor_mock_, OnResponseFirstLineInput(
"HTTP/1.0 200 OK", "HTTP/1.0", "200", "OK"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(message));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest,
VisitorInvokedProperlyForResponseWithSplitBlankLines) {
std::string blanks =
"\n"
"\r\n"
"\r\n";
std::string header_input =
"HTTP/1.0 200 OK\r\n"
"content-length: 0\n"
"\n";
FakeHeaders fake_headers;
fake_headers.AddKeyValue("content-length", "0");
balsa_frame_.set_is_request(false);
{
InSequence s;
EXPECT_CALL(visitor_mock_, OnResponseFirstLineInput(
"HTTP/1.0 200 OK", "HTTP/1.0", "200", "OK"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(header_input));
EXPECT_EQ(blanks.size(),
balsa_frame_.ProcessInput(blanks.data(), blanks.size()));
EXPECT_EQ(header_input.size(), balsa_frame_.ProcessInput(
header_input.data(), header_input.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, VisitorInvokedProperlyForResponseWithBlankLines) {
std::string blanks =
"\n"
"\r\n"
"\n"
"\n"
"\r\n"
"\r\n";
std::string header_input =
"HTTP/1.0 200 OK\r\n"
"content-length: 0\n"
"\n";
std::string message = blanks + header_input;
FakeHeaders fake_headers;
fake_headers.AddKeyValue("content-length", "0");
balsa_frame_.set_is_request(false);
{
InSequence s;
EXPECT_CALL(visitor_mock_, OnResponseFirstLineInput(
"HTTP/1.0 200 OK", "HTTP/1.0", "200", "OK"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(header_input));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, VisitorInvokedProperlyForResponseWithContentLength) {
std::string message_headers =
"HTTP/1.1 \t 200 Ok all is well\r\n"
"content-length: \t\t 20 \t\t \r\n"
"\r\n";
std::string message_body = "12345678901234567890";
std::string message =
std::string(message_headers) + std::string(message_body);
FakeHeaders fake_headers;
fake_headers.AddKeyValue("content-length", "20");
balsa_frame_.set_is_request(false);
{
InSequence s1;
EXPECT_CALL(visitor_mock_,
OnResponseFirstLineInput("HTTP/1.1 \t 200 Ok all is well",
"HTTP/1.1", "200", "Ok all is well"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, OnRawBodyInput(message_body));
EXPECT_CALL(visitor_mock_, OnBodyChunkInput(message_body));
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(message_headers));
ASSERT_EQ(message_headers.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_EQ(message_body.size(),
balsa_frame_.ProcessInput(message.data() + message_headers.size(),
message.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest,
VisitorInvokedProperlyForResponseWithTransferEncoding) {
std::string message_headers =
"HTTP/1.1 \t 200 Ok all is well\r\n"
"trAnsfer-eNcoding: chunked\r\n"
"\r\n";
std::string message_body =
"A chunkjed extension \r\n"
"01234567890 more crud including numbers 123123\r\n"
"3f\n"
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n"
"0 last one\r\n"
"\r\n";
std::string message_body_data =
"0123456789"
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
std::string message =
std::string(message_headers) + std::string(message_body);
FakeHeaders fake_headers;
fake_headers.AddKeyValue("trAnsfer-eNcoding", "chunked");
balsa_frame_.set_is_request(false);
{
InSequence s1;
EXPECT_CALL(visitor_mock_,
OnResponseFirstLineInput("HTTP/1.1 \t 200 Ok all is well",
"HTTP/1.1", "200", "Ok all is well"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, OnChunkLength(10));
EXPECT_CALL(visitor_mock_, OnChunkLength(63));
EXPECT_CALL(visitor_mock_, OnChunkLength(0));
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(message_headers));
std::string body_input;
EXPECT_CALL(visitor_mock_, OnRawBodyInput(_))
.WillRepeatedly([&body_input](absl::string_view input) {
absl::StrAppend(&body_input, input);
});
std::string body_data;
EXPECT_CALL(visitor_mock_, OnBodyChunkInput(_))
.WillRepeatedly([&body_data](absl::string_view input) {
absl::StrAppend(&body_data, input);
});
EXPECT_CALL(visitor_mock_, OnTrailerInput(_)).Times(0);
ASSERT_EQ(message_headers.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_EQ(message_body.size(),
balsa_frame_.ProcessInput(message.data() + message_headers.size(),
message.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
EXPECT_EQ(message_body, body_input);
EXPECT_EQ(message_body_data, body_data);
}
TEST_F(HTTPBalsaFrameTest,
VisitorInvokedProperlyForResponseWithTransferEncodingAndTrailers) {
std::string message_headers =
"HTTP/1.1 \t 200 Ok all is well\r\n"
"trAnsfer-eNcoding: chunked\r\n"
"\r\n";
std::string message_body =
"A chunkjed extension \r\n"
"01234567890 more crud including numbers 123123\r\n"
"3f\n"
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n"
"0 last one\r\n";
std::string trailer_data =
"a_trailer_key: and a trailer value\r\n"
"\r\n";
std::string message_body_data =
"0123456789"
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
std::string message = (std::string(message_headers) +
std::string(message_body) + std::string(trailer_data));
FakeHeaders fake_headers;
fake_headers.AddKeyValue("trAnsfer-eNcoding", "chunked");
FakeHeaders fake_headers_in_trailer;
fake_headers_in_trailer.AddKeyValue("a_trailer_key", "and a trailer value");
balsa_frame_.set_is_request(false);
{
InSequence s1;
EXPECT_CALL(visitor_mock_,
OnResponseFirstLineInput("HTTP/1.1 \t 200 Ok all is well",
"HTTP/1.1", "200", "Ok all is well"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, OnChunkLength(10));
EXPECT_CALL(visitor_mock_, OnChunkLength(63));
EXPECT_CALL(visitor_mock_, OnChunkLength(0));
EXPECT_CALL(visitor_mock_, OnTrailers(fake_headers_in_trailer));
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(message_headers));
std::string body_input;
EXPECT_CALL(visitor_mock_, OnRawBodyInput(_))
.WillRepeatedly([&body_input](absl::string_view input) {
absl::StrAppend(&body_input, input);
});
std::string body_data;
EXPECT_CALL(visitor_mock_, OnBodyChunkInput(_))
.WillRepeatedly([&body_data](absl::string_view input) {
absl::StrAppend(&body_data, input);
});
EXPECT_CALL(visitor_mock_, OnTrailerInput(trailer_data));
ASSERT_EQ(message_headers.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_EQ(message_body.size() + trailer_data.size(),
balsa_frame_.ProcessInput(message.data() + message_headers.size(),
message.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
EXPECT_EQ(message_body, body_input);
EXPECT_EQ(message_body_data, body_data);
}
TEST_F(
HTTPBalsaFrameTest,
VisitorInvokedProperlyForResponseWithTransferEncodingAndTrailersBytePer) {
std::string message_headers =
"HTTP/1.1 \t 200 Ok all is well\r\n"
"trAnsfer-eNcoding: chunked\r\n"
"\r\n";
std::string message_body =
"A chunkjed extension \r\n"
"01234567890 more crud including numbers 123123\r\n"
"3f\n"
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n"
"0 last one\r\n";
std::string trailer_data =
"a_trailer_key: and a trailer value\r\n"
"\r\n";
std::string message_body_data =
"0123456789"
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
std::string message = (std::string(message_headers) +
std::string(message_body) + std::string(trailer_data));
FakeHeaders fake_headers;
fake_headers.AddKeyValue("trAnsfer-eNcoding", "chunked");
FakeHeaders fake_headers_in_trailer;
fake_headers_in_trailer.AddKeyValue("a_trailer_key", "and a trailer value");
balsa_frame_.set_is_request(false);
{
InSequence s1;
EXPECT_CALL(visitor_mock_,
OnResponseFirstLineInput("HTTP/1.1 \t 200 Ok all is well",
"HTTP/1.1", "200", "Ok all is well"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, OnChunkLength(10));
EXPECT_CALL(visitor_mock_, OnChunkLength(63));
EXPECT_CALL(visitor_mock_, OnChunkLength(0));
EXPECT_CALL(visitor_mock_, OnTrailers(fake_headers_in_trailer));
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(message_headers));
std::string body_input;
EXPECT_CALL(visitor_mock_, OnRawBodyInput(_))
.WillRepeatedly([&body_input](absl::string_view input) {
absl::StrAppend(&body_input, input);
});
std::string body_data;
EXPECT_CALL(visitor_mock_, OnBodyChunkInput(_))
.WillRepeatedly([&body_data](absl::string_view input) {
absl::StrAppend(&body_data, input);
});
std::string trailer_input;
EXPECT_CALL(visitor_mock_, OnTrailerInput(_))
.WillRepeatedly([&trailer_input](absl::string_view input) {
absl::StrAppend(&trailer_input, input);
});
for (size_t i = 0; i < message.size(); ++i) {
ASSERT_EQ(1u, balsa_frame_.ProcessInput(message.data() + i, 1));
}
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
EXPECT_EQ(message_body, body_input);
EXPECT_EQ(message_body_data, body_data);
EXPECT_EQ(trailer_data, trailer_input);
}
TEST(HTTPBalsaFrame,
VisitorInvokedProperlyForResponseWithTransferEncodingAndTrailersRandom) {
TestSeed seed;
seed.Initialize(GetQuicheCommandLineFlag(FLAGS_randseed));
RandomEngine rng;
rng.seed(seed.GetSeed());
for (int i = 0; i < 1000; ++i) {
std::string message_headers =
"HTTP/1.1 \t 200 Ok all is well\r\n"
"trAnsfer-eNcoding: chunked\r\n"
"\r\n";
std::string message_body =
"A chunkjed extension \r\n"
"01234567890 more crud including numbers 123123\r\n"
"3f\n"
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n"
"0 last one\r\n";
std::string trailer_data =
"a_trailer_key: and a trailer value\r\n"
"\r\n";
std::string message_body_data =
"0123456789"
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
std::string message =
(std::string(message_headers) + std::string(message_body) +
std::string(trailer_data));
FakeHeaders fake_headers;
fake_headers.AddKeyValue("trAnsfer-eNcoding", "chunked");
FakeHeaders fake_headers_in_trailer;
fake_headers_in_trailer.AddKeyValue("a_trailer_key", "and a trailer value");
StrictMock<BalsaVisitorMock> visitor_mock;
BalsaHeaders headers;
BalsaFrame balsa_frame;
balsa_frame.set_is_request(false);
balsa_frame.set_balsa_headers(&headers);
balsa_frame.EnableTrailers();
balsa_frame.set_balsa_visitor(&visitor_mock);
{
InSequence s1;
EXPECT_CALL(visitor_mock, OnResponseFirstLineInput(
"HTTP/1.1 \t 200 Ok all is well",
"HTTP/1.1", "200", "Ok all is well"));
EXPECT_CALL(visitor_mock, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock, HeaderDone());
EXPECT_CALL(visitor_mock, OnTrailers(fake_headers_in_trailer));
EXPECT_CALL(visitor_mock, MessageDone());
}
EXPECT_CALL(visitor_mock, OnHeaderInput(message_headers));
std::string body_input;
EXPECT_CALL(visitor_mock, OnRawBodyInput(_))
.WillRepeatedly([&body_input](absl::string_view input) {
absl::StrAppend(&body_input, input);
});
std::string body_data;
EXPECT_CALL(visitor_mock, OnBodyChunkInput(_))
.WillRepeatedly([&body_data](absl::string_view input) {
absl::StrAppend(&body_data, input);
});
std::string trailer_input;
EXPECT_CALL(visitor_mock, OnTrailerInput(_))
.WillRepeatedly([&trailer_input](absl::string_view input) {
absl::StrAppend(&trailer_input, input);
});
EXPECT_CALL(visitor_mock, OnChunkLength(_)).Times(AtLeast(1));
EXPECT_CALL(visitor_mock, OnChunkExtensionInput(_)).Times(AtLeast(1));
size_t count = 0;
size_t total_processed = 0;
for (size_t j = 0; j < message.size();) {
auto dist = std::uniform_int_distribution<>(0, message.size() - j + 1);
count = dist(rng);
size_t processed = balsa_frame.ProcessInput(message.data() + j, count);
ASSERT_GE(count, processed);
total_processed += processed;
j += processed;
}
EXPECT_EQ(message.size(), total_processed);
EXPECT_TRUE(balsa_frame.MessageFullyRead());
EXPECT_FALSE(balsa_frame.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame.ErrorCode());
EXPECT_EQ(message_body, body_input);
EXPECT_EQ(message_body_data, body_data);
EXPECT_EQ(trailer_data, trailer_input);
}
}
TEST_F(HTTPBalsaFrameTest,
AppropriateActionTakenWhenHeadersTooLongWithTooMuchInput) {
const absl::string_view message =
"GET /asflkasfdhjsafdkljhasfdlkjhasdflkjhsafdlkjhh HTTP/1.1";
const size_t kAmountLessThanHeaderLen = 10;
ASSERT_LE(kAmountLessThanHeaderLen, message.size());
auto error_code = BalsaFrameEnums::HEADERS_TOO_LONG;
EXPECT_CALL(visitor_mock_, HandleError(error_code));
balsa_frame_.set_max_header_length(message.size() - kAmountLessThanHeaderLen);
ASSERT_EQ(balsa_frame_.max_header_length(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::HEADERS_TOO_LONG, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, AppropriateActionTakenWhenHeadersTooLongWithBody) {
std::string message =
"PUT /foo HTTP/1.1\r\n"
"Content-Length: 4\r\n"
"header: xxxxxxxxx\r\n\r\n"
"B";
auto error_code = BalsaFrameEnums::HEADERS_TOO_LONG;
EXPECT_CALL(visitor_mock_, HandleError(error_code));
balsa_frame_.set_max_header_length(message.size() - 2);
ASSERT_EQ(balsa_frame_.max_header_length(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::HEADERS_TOO_LONG, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, AppropriateActionTakenWhenHeadersTooLongWhenReset) {
std::string message =
"GET /asflkasfdhjsafdkljhasfdlkjhasdflkjhsafdlkjhh HTTP/1.1\r\n"
"\r\n";
const size_t kAmountLessThanHeaderLen = 10;
ASSERT_LE(kAmountLessThanHeaderLen, message.size());
auto error_code = BalsaFrameEnums::HEADERS_TOO_LONG;
ASSERT_EQ(message.size() - 2,
balsa_frame_.ProcessInput(message.data(), message.size() - 2));
balsa_frame_.set_max_header_length(message.size() - kAmountLessThanHeaderLen);
EXPECT_CALL(visitor_mock_, HandleError(error_code));
ASSERT_EQ(0u,
balsa_frame_.ProcessInput(message.data() + message.size() - 2, 2));
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::HEADERS_TOO_LONG, balsa_frame_.ErrorCode());
}
class BalsaFrameParsingTest : public QuicheTest {
protected:
void SetUp() override {
balsa_frame_.set_is_request(true);
balsa_frame_.set_balsa_headers(&headers_);
balsa_frame_.set_balsa_visitor(&visitor_mock_);
}
void TestEmptyHeaderKeyHelper(const std::string& message) {
InSequence s;
EXPECT_CALL(visitor_mock_, OnRequestFirstLineInput("GET / HTTP/1.1", "GET",
"/", "HTTP/1.1"));
EXPECT_CALL(visitor_mock_, OnHeaderInput(_));
EXPECT_CALL(visitor_mock_,
HandleError(BalsaFrameEnums::INVALID_HEADER_FORMAT));
ASSERT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.Error());
Mock::VerifyAndClearExpectations(&visitor_mock_);
}
void TestInvalidTrailerFormat(const std::string& trailer,
bool invalid_name_char) {
balsa_frame_.set_is_request(false);
balsa_frame_.EnableTrailers();
std::string headers =
"HTTP/1.0 200 ok\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
std::string chunks =
"3\r\n"
"123\r\n"
"0\r\n";
InSequence s;
EXPECT_CALL(visitor_mock_, OnResponseFirstLineInput);
EXPECT_CALL(visitor_mock_, OnHeaderInput);
EXPECT_CALL(visitor_mock_, ProcessHeaders);
EXPECT_CALL(visitor_mock_, HeaderDone);
EXPECT_CALL(visitor_mock_, OnChunkLength(3));
EXPECT_CALL(visitor_mock_, OnChunkExtensionInput);
EXPECT_CALL(visitor_mock_, OnRawBodyInput);
EXPECT_CALL(visitor_mock_, OnBodyChunkInput);
EXPECT_CALL(visitor_mock_, OnChunkLength(0));
EXPECT_CALL(visitor_mock_, OnChunkExtensionInput);
EXPECT_CALL(visitor_mock_, OnRawBodyInput);
EXPECT_CALL(visitor_mock_, OnRawBodyInput);
const auto expected_error =
invalid_name_char ? BalsaFrameEnums::INVALID_TRAILER_NAME_CHARACTER
: BalsaFrameEnums::INVALID_TRAILER_FORMAT;
EXPECT_CALL(visitor_mock_, HandleError(expected_error)).Times(1);
EXPECT_CALL(visitor_mock_, OnTrailers(_)).Times(0);
EXPECT_CALL(visitor_mock_, MessageDone()).Times(0);
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
ASSERT_EQ(chunks.size(),
balsa_frame_.ProcessInput(chunks.data(), chunks.size()));
EXPECT_EQ(trailer.size(),
balsa_frame_.ProcessInput(trailer.data(), trailer.size()));
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(expected_error, balsa_frame_.ErrorCode());
Mock::VerifyAndClearExpectations(&visitor_mock_);
}
BalsaHeaders headers_;
BalsaFrame balsa_frame_;
StrictMock<BalsaVisitorMock> visitor_mock_;
};
TEST_F(BalsaFrameParsingTest, AppropriateActionTakenWhenHeaderColonsAreFunny) {
std::string message =
"GET / HTTP/1.1\r\n"
"a\r\n"
"b\r\n"
"c\r\n"
"d\r\n"
"e\r\n"
"f\r\n"
"g\r\n"
"h\r\n"
"i:\r\n"
"j\r\n"
"k\r\n"
"l\r\n"
"m\r\n"
"n\r\n"
"o\r\n"
"p\r\n"
"q\r\n"
"r\r\n"
"s\r\n"
"t\r\n"
"u\r\n"
"v\r\n"
"w\r\n"
"x\r\n"
"y\r\n"
"z\r\n"
"A\r\n"
"B\r\n"
": val\r\n"
"\r\n";
EXPECT_CALL(visitor_mock_, OnRequestFirstLineInput("GET / HTTP/1.1", "GET",
"/", "HTTP/1.1"));
EXPECT_CALL(visitor_mock_, OnHeaderInput(_));
EXPECT_CALL(visitor_mock_,
HandleWarning(BalsaFrameEnums::HEADER_MISSING_COLON))
.Times(27);
EXPECT_CALL(visitor_mock_,
HandleError(BalsaFrameEnums::INVALID_HEADER_FORMAT));
ASSERT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.Error());
}
TEST_F(BalsaFrameParsingTest, ErrorWhenHeaderKeyIsEmpty) {
std::string firstKeyIsEmpty =
"GET / HTTP/1.1\r\n"
": \r\n"
"a:b\r\n"
"c:d\r\n"
"\r\n";
TestEmptyHeaderKeyHelper(firstKeyIsEmpty);
balsa_frame_.Reset();
std::string laterKeyIsEmpty =
"GET / HTTP/1.1\r\n"
"a:b\r\n"
": \r\n"
"c:d\r\n"
"\r\n";
TestEmptyHeaderKeyHelper(laterKeyIsEmpty);
}
TEST_F(BalsaFrameParsingTest, InvalidTrailerFormat) {
std::string trailer =
":monkeys\n"
"\r\n";
TestInvalidTrailerFormat(trailer, false);
balsa_frame_.Reset();
std::string trailer2 =
" \r\n"
"test: test\r\n"
"\r\n";
TestInvalidTrailerFormat(trailer2, true);
balsa_frame_.Reset();
std::string trailer3 =
"a: b\r\n"
": test\r\n"
"\r\n";
TestInvalidTrailerFormat(trailer3, false);
}
TEST_F(HTTPBalsaFrameTest,
EnsureHeaderFramingFoundWithVariousCombinationsOfRN_RN) {
const std::string message =
"GET / HTTP/1.1\r\n"
"content-length: 0\r\n"
"a\r\n"
"b\r\n"
"c\r\n"
"d\r\n"
"e\r\n"
"f\r\n"
"g\r\n"
"h\r\n"
"i\r\n"
"\r\n";
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.Error())
<< BalsaFrameEnums::ErrorCodeToString(balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest,
EnsureHeaderFramingFoundWithVariousCombinationsOfRN_N) {
const std::string message =
"GET / HTTP/1.1\n"
"content-length: 0\n"
"a\n"
"b\n"
"c\n"
"d\n"
"e\n"
"f\n"
"g\n"
"h\n"
"i\n"
"\n";
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.Error())
<< BalsaFrameEnums::ErrorCodeToString(balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest,
EnsureHeaderFramingFoundWithVariousCombinationsOfRN_RN_N) {
const std::string message =
"GET / HTTP/1.1\n"
"content-length: 0\r\n"
"a\r\n"
"b\n"
"c\r\n"
"d\n"
"e\r\n"
"f\n"
"g\r\n"
"h\n"
"i\r\n"
"\n";
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.Error())
<< BalsaFrameEnums::ErrorCodeToString(balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest,
EnsureHeaderFramingFoundWithVariousCombinationsOfRN_N_RN) {
const std::string message =
"GET / HTTP/1.1\n"
"content-length: 0\r\n"
"a\n"
"b\r\n"
"c\n"
"d\r\n"
"e\n"
"f\r\n"
"g\n"
"h\r\n"
"i\n"
"\r\n";
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.Error())
<< BalsaFrameEnums::ErrorCodeToString(balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, ReadUntilCloseStateEnteredAsExpectedAndNotExited) {
std::string message =
"HTTP/1.1 200 OK\r\n"
"\r\n";
balsa_frame_.set_is_request(false);
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.Error())
<< BalsaFrameEnums::ErrorCodeToString(balsa_frame_.ErrorCode());
EXPECT_EQ(BalsaFrameEnums::READING_UNTIL_CLOSE, balsa_frame_.ParseState());
std::string gobldygook = "-198324-9182-43981-23498-98342-jasldfn-1294hj";
for (int i = 0; i < 1000; ++i) {
EXPECT_EQ(gobldygook.size(),
balsa_frame_.ProcessInput(gobldygook.data(), gobldygook.size()));
EXPECT_FALSE(balsa_frame_.Error())
<< BalsaFrameEnums::ErrorCodeToString(balsa_frame_.ErrorCode());
EXPECT_EQ(BalsaFrameEnums::READING_UNTIL_CLOSE, balsa_frame_.ParseState());
}
}
TEST_F(HTTPBalsaFrameTest,
BytesSafeToSpliceAndBytesSplicedWorksWithContentLength) {
std::string header =
"HTTP/1.1 200 OK\r\n"
"content-length: 1000\r\n"
"\r\n";
balsa_frame_.set_is_request(false);
size_t bytes_safe_to_splice = 1000;
EXPECT_EQ(0u, balsa_frame_.BytesSafeToSplice());
EXPECT_EQ(header.size(),
balsa_frame_.ProcessInput(header.data(), header.size()));
EXPECT_EQ(bytes_safe_to_splice, balsa_frame_.BytesSafeToSplice());
while (bytes_safe_to_splice > 0) {
balsa_frame_.BytesSpliced(1);
bytes_safe_to_splice -= 1;
ASSERT_FALSE(balsa_frame_.Error())
<< BalsaFrameEnums::ParseStateToString(balsa_frame_.ParseState()) << " "
<< BalsaFrameEnums::ErrorCodeToString(balsa_frame_.ErrorCode())
<< " with bytes_safe_to_splice: " << bytes_safe_to_splice
<< " and BytesSafeToSplice(): " << balsa_frame_.BytesSafeToSplice();
}
EXPECT_EQ(0u, balsa_frame_.BytesSafeToSplice());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
}
TEST_F(HTTPBalsaFrameTest, BytesSplicedFlagsErrorsWhenNotInProperState) {
balsa_frame_.set_is_request(false);
balsa_frame_.BytesSpliced(1);
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::CALLED_BYTES_SPLICED_WHEN_UNSAFE_TO_DO_SO,
balsa_frame_.ErrorCode());
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
}
TEST_F(HTTPBalsaFrameTest,
BytesSplicedFlagsErrorsWhenTooMuchSplicedForContentLen) {
std::string header =
"HTTP/1.1 200 OK\r\n"
"content-length: 1000\r\n"
"\r\n";
balsa_frame_.set_is_request(false);
EXPECT_EQ(0u, balsa_frame_.BytesSafeToSplice());
EXPECT_EQ(header.size(),
balsa_frame_.ProcessInput(header.data(), header.size()));
EXPECT_EQ(1000u, balsa_frame_.BytesSafeToSplice());
balsa_frame_.BytesSpliced(1001);
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(
BalsaFrameEnums::CALLED_BYTES_SPLICED_AND_EXCEEDED_SAFE_SPLICE_AMOUNT,
balsa_frame_.ErrorCode());
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
}
TEST_F(HTTPBalsaFrameTest, BytesSplicedWorksAsExpectedForReadUntilClose) {
std::string header =
"HTTP/1.1 200 OK\r\n"
"\r\n";
balsa_frame_.set_is_request(false);
EXPECT_EQ(0u, balsa_frame_.BytesSafeToSplice());
EXPECT_EQ(header.size(),
balsa_frame_.ProcessInput(header.data(), header.size()));
EXPECT_EQ(BalsaFrameEnums::READING_UNTIL_CLOSE, balsa_frame_.ParseState());
EXPECT_EQ(std::numeric_limits<size_t>::max(),
balsa_frame_.BytesSafeToSplice());
for (int i = 0; i < 1000; ++i) {
EXPECT_EQ(std::numeric_limits<size_t>::max(),
balsa_frame_.BytesSafeToSplice());
balsa_frame_.BytesSpliced(12312312);
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
}
EXPECT_EQ(std::numeric_limits<size_t>::max(),
balsa_frame_.BytesSafeToSplice());
}
TEST_F(HTTPBalsaFrameTest,
BytesSplicedFlagsErrorsWhenTooMuchSplicedForChunked) {
std::string header =
"HTTP/1.1 200 OK\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
std::string body_fragment = "a\r\n";
balsa_frame_.set_is_request(false);
EXPECT_EQ(0u, balsa_frame_.BytesSafeToSplice());
EXPECT_EQ(header.size(),
balsa_frame_.ProcessInput(header.data(), header.size()));
EXPECT_EQ(0u, balsa_frame_.BytesSafeToSplice());
EXPECT_EQ(
body_fragment.size(),
balsa_frame_.ProcessInput(body_fragment.data(), body_fragment.size()));
EXPECT_EQ(10u, balsa_frame_.BytesSafeToSplice());
balsa_frame_.BytesSpliced(11);
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(
BalsaFrameEnums::CALLED_BYTES_SPLICED_AND_EXCEEDED_SAFE_SPLICE_AMOUNT,
balsa_frame_.ErrorCode());
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
}
TEST_F(HTTPBalsaFrameTest, BytesSafeToSpliceAndBytesSplicedWorksWithChunks) {
std::string header =
"HTTP/1.1 200 OK\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
balsa_frame_.set_is_request(false);
EXPECT_EQ(0u, balsa_frame_.BytesSafeToSplice());
EXPECT_EQ(header.size(),
balsa_frame_.ProcessInput(header.data(), header.size()));
{
std::string body_fragment = "3e8\r\n";
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
size_t bytes_safe_to_splice = 1000;
EXPECT_EQ(0u, balsa_frame_.BytesSafeToSplice());
EXPECT_EQ(
body_fragment.size(),
balsa_frame_.ProcessInput(body_fragment.data(), body_fragment.size()));
EXPECT_EQ(bytes_safe_to_splice, balsa_frame_.BytesSafeToSplice());
while (bytes_safe_to_splice > 0) {
balsa_frame_.BytesSpliced(1);
bytes_safe_to_splice -= 1;
ASSERT_FALSE(balsa_frame_.Error());
}
EXPECT_EQ(0u, balsa_frame_.BytesSafeToSplice());
EXPECT_FALSE(balsa_frame_.Error());
}
{
std::string body_fragment = "\r\n7d0\r\n";
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
size_t bytes_safe_to_splice = 2000;
EXPECT_EQ(0u, balsa_frame_.BytesSafeToSplice());
EXPECT_EQ(
body_fragment.size(),
balsa_frame_.ProcessInput(body_fragment.data(), body_fragment.size()));
EXPECT_EQ(bytes_safe_to_splice, balsa_frame_.BytesSafeToSplice());
while (bytes_safe_to_splice > 0) {
balsa_frame_.BytesSpliced(1);
bytes_safe_to_splice -= 1;
ASSERT_FALSE(balsa_frame_.Error());
}
EXPECT_EQ(0u, balsa_frame_.BytesSafeToSplice());
EXPECT_FALSE(balsa_frame_.Error());
}
{
std::string body_fragment = "\r\n1\r\n";
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
size_t bytes_safe_to_splice = 1;
EXPECT_EQ(0u, balsa_frame_.BytesSafeToSplice());
EXPECT_EQ(
body_fragment.size(),
balsa_frame_.ProcessInput(body_fragment.data(), body_fragment.size()));
EXPECT_EQ(bytes_safe_to_splice, balsa_frame_.BytesSafeToSplice());
while (bytes_safe_to_splice > 0) {
balsa_frame_.BytesSpliced(1);
bytes_safe_to_splice -= 1;
ASSERT_FALSE(balsa_frame_.Error());
}
EXPECT_EQ(0u, balsa_frame_.BytesSafeToSplice());
EXPECT_FALSE(balsa_frame_.Error());
}
{
std::string body_fragment = "\r\n0\r\n\r\n";
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_EQ(0u, balsa_frame_.BytesSafeToSplice());
EXPECT_EQ(
body_fragment.size(),
balsa_frame_.ProcessInput(body_fragment.data(), body_fragment.size()));
EXPECT_EQ(0u, balsa_frame_.BytesSafeToSplice());
EXPECT_FALSE(balsa_frame_.Error());
}
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
}
TEST_F(HTTPBalsaFrameTest, TwoDifferentContentLengthHeadersIsAnError) {
std::string header =
"HTTP/1.1 200 OK\r\n"
"content-length: 12\r\n"
"content-length: 14\r\n"
"\r\n";
balsa_frame_.set_is_request(false);
balsa_frame_.ProcessInput(header.data(), header.size());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::MULTIPLE_CONTENT_LENGTH_KEYS,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, TwoSameContentLengthHeadersIsNotAnError) {
std::string header =
"POST / HTTP/1.1\r\n"
"content-length: 1\r\n"
"content-length: 1\r\n"
"\r\n"
"1";
balsa_frame_.ProcessInput(header.data(), header.size());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
EXPECT_FALSE(balsa_frame_.Error());
balsa_frame_.ProcessInput(header.data(), header.size());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
}
TEST_F(HTTPBalsaFrameTest, TwoSameContentLengthHeadersIsAnError) {
HttpValidationPolicy http_validation_policy;
http_validation_policy.disallow_multiple_content_length = true;
balsa_frame_.set_http_validation_policy(http_validation_policy);
std::string header =
"POST / HTTP/1.1\r\n"
"content-length: 1\r\n"
"content-length: 1\r\n"
"\r\n"
"1";
balsa_frame_.ProcessInput(header.data(), header.size());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::MULTIPLE_CONTENT_LENGTH_KEYS,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, TwoTransferEncodingHeadersIsAnError) {
std::string header =
"HTTP/1.1 200 OK\r\n"
"transfer-encoding: chunked\r\n"
"transfer-encoding: identity\r\n"
"content-length: 3\r\n"
"\r\n";
balsa_frame_.set_is_request(false);
balsa_frame_.ProcessInput(header.data(), header.size());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::MULTIPLE_TRANSFER_ENCODING_KEYS,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, AcceptTwoTransferEncodingHeaders) {
HttpValidationPolicy http_validation_policy;
http_validation_policy.validate_transfer_encoding = false;
balsa_frame_.set_http_validation_policy(http_validation_policy);
std::string header =
"HTTP/1.1 200 OK\r\n"
"transfer-encoding: chunked\r\n"
"transfer-encoding: identity\r\n"
"content-length: 3\r\n"
"\r\n";
balsa_frame_.set_is_request(false);
balsa_frame_.ProcessInput(header.data(), header.size());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, TwoTransferEncodingTokensIsAnError) {
std::string header =
"HTTP/1.1 200 OK\r\n"
"transfer-encoding: chunked, identity\r\n"
"content-length: 3\r\n"
"\r\n";
balsa_frame_.set_is_request(false);
balsa_frame_.ProcessInput(header.data(), header.size());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::UNKNOWN_TRANSFER_ENCODING,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, AcceptTwoTransferEncodingTokens) {
HttpValidationPolicy http_validation_policy;
http_validation_policy.validate_transfer_encoding = false;
balsa_frame_.set_http_validation_policy(http_validation_policy);
std::string header =
"HTTP/1.1 200 OK\r\n"
"transfer-encoding: chunked, identity\r\n"
"content-length: 3\r\n"
"\r\n";
balsa_frame_.set_is_request(false);
balsa_frame_.ProcessInput(header.data(), header.size());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, UnknownTransferEncodingTokenIsAnError) {
std::string header =
"HTTP/1.1 200 OK\r\n"
"transfer-encoding: chunked-identity\r\n"
"content-length: 3\r\n"
"\r\n";
balsa_frame_.set_is_request(false);
balsa_frame_.ProcessInput(header.data(), header.size());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::UNKNOWN_TRANSFER_ENCODING,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, AcceptUnknownTransferEncodingToken) {
HttpValidationPolicy http_validation_policy;
http_validation_policy.validate_transfer_encoding = false;
balsa_frame_.set_http_validation_policy(http_validation_policy);
std::string header =
"HTTP/1.1 200 OK\r\n"
"transfer-encoding: chunked-identity\r\n"
"content-length: 3\r\n"
"\r\n";
balsa_frame_.set_is_request(false);
balsa_frame_.ProcessInput(header.data(), header.size());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, MissingContentLength) {
std::string header = "HTTP/1.1 200 OK\r\n\r\n";
balsa_frame_.set_is_request(false);
balsa_frame_.ProcessInput(header.data(), header.size());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::MAYBE_BODY_BUT_NO_CONTENT_LENGTH,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, MultipleTransferEncodingsWithMissingContentLength) {
HttpValidationPolicy http_validation_policy;
http_validation_policy.validate_transfer_encoding = false;
balsa_frame_.set_http_validation_policy(http_validation_policy);
std::string header =
"HTTP/1.1 200 OK\r\n"
"transfer-encoding: chunked\r\n"
"transfer-encoding: identity\r\n"
"\r\n";
balsa_frame_.set_is_request(false);
balsa_frame_.ProcessInput(header.data(), header.size());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::MAYBE_BODY_BUT_NO_CONTENT_LENGTH,
balsa_frame_.ErrorCode());
}
class DetachOnDoneFramer : public NoOpBalsaVisitor {
public:
DetachOnDoneFramer() {
framer_.set_balsa_headers(&headers_);
framer_.set_balsa_visitor(this);
}
void MessageDone() override { framer_.set_balsa_headers(nullptr); }
BalsaFrame* framer() { return &framer_; }
protected:
BalsaFrame framer_;
BalsaHeaders headers_;
};
TEST(HTTPBalsaFrame, TestDetachOnDone) {
DetachOnDoneFramer framer;
const char* message = "GET HTTP/1.1\r\n\r\n";
framer.framer()->ProcessInput(message, strlen(message));
EXPECT_TRUE(framer.framer()->MessageFullyRead());
EXPECT_FALSE(framer.framer()->Error());
}
class ModifyMaxHeaderLengthFramerInFirstLine : public DetachOnDoneFramer {
public:
void MessageDone() override {}
void OnRequestFirstLineInput(absl::string_view ,
absl::string_view ,
absl::string_view ,
absl::string_view
) override {
framer_.set_max_header_length(1);
}
};
class ModifyMaxHeaderLengthFramerInHeaderDone : public DetachOnDoneFramer {
public:
void MessageDone() override {}
void HeaderDone() override { framer_.set_max_header_length(1); }
};
TEST(HTTPBalsaFrame, ChangeMaxHeadersLengthOnFirstLine) {
std::string message =
"PUT /foo HTTP/1.1\r\n"
"Content-Length: 2\r\n"
"header: xxxxxxxxx\r\n\r\n"
"B";
ModifyMaxHeaderLengthFramerInFirstLine balsa_frame;
balsa_frame.framer()->set_is_request(true);
balsa_frame.framer()->set_max_header_length(message.size() - 1);
balsa_frame.framer()->ProcessInput(message.data(), message.size());
EXPECT_EQ(BalsaFrameEnums::HEADERS_TOO_LONG,
balsa_frame.framer()->ErrorCode());
}
TEST(HTTPBalsaFrame, ChangeMaxHeadersLengthOnHeaderDone) {
std::string message =
"PUT /foo HTTP/1.1\r\n"
"Content-Length: 2\r\n"
"header: xxxxxxxxx\r\n\r\n"
"B";
ModifyMaxHeaderLengthFramerInHeaderDone balsa_frame;
balsa_frame.framer()->set_is_request(true);
balsa_frame.framer()->set_max_header_length(message.size() - 1);
balsa_frame.framer()->ProcessInput(message.data(), message.size());
EXPECT_EQ(0, balsa_frame.framer()->ErrorCode());
}
TEST(HTTPBalsaFrame, HeadersSizeSameAsMaxLengthIsAccepted) {
std::string message =
"GET /foo HTTP/1.1\r\n"
"header: xxxxxxxxx\r\n\r\n";
ModifyMaxHeaderLengthFramerInHeaderDone balsa_frame;
balsa_frame.framer()->set_is_request(true);
balsa_frame.framer()->set_max_header_length(message.size());
balsa_frame.framer()->ProcessInput(message.data(), message.size());
EXPECT_EQ(0, balsa_frame.framer()->ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, KeyHasSpaces) {
const std::string message =
"GET / HTTP/1.1\r\n"
"key has spaces: lock\r\n"
"\r\n";
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, SpaceBeforeColon) {
const std::string message =
"GET / HTTP/1.1\r\n"
"key : lock\r\n"
"\r\n";
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, SpaceBeforeColonNotAfter) {
const std::string message =
"GET / HTTP/1.1\r\n"
"key :lock\r\n"
"\r\n";
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, KeyHasTabs) {
const std::string message =
"GET / HTTP/1.1\r\n"
"key\thas\ttabs: lock\r\n"
"\r\n";
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, TabBeforeColon) {
const std::string message =
"GET / HTTP/1.1\r\n"
"key\t: lock\r\n"
"\r\n";
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, KeyHasContinuation) {
const std::string message =
"GET / HTTP/1.1\r\n"
"key\n includes continuation: but not value\r\n"
"\r\n";
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, KeyHasMultipleContinuations) {
const std::string message =
"GET / HTTP/1.1\r\n"
"key\n includes\r\n multiple\n continuations: but not value\r\n"
"\r\n";
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, KeyHasDoubleQuote) {
const std::string message =
"GET / HTTP/1.1\r\n"
"key\"hasquote: lock\r\n"
"\r\n";
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
EXPECT_TRUE(headers_.HasHeader("key\"hasquote"));
}
TEST_F(HTTPBalsaFrameTest, KeyHasDisallowedDoubleQuote) {
HttpValidationPolicy http_validation_policy;
http_validation_policy.disallow_double_quote_in_header_name = true;
balsa_frame_.set_http_validation_policy(http_validation_policy);
const std::string message =
"GET / HTTP/1.1\r\n"
"key\"hasquote: lock\r\n"
"\r\n";
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, TrailerMissingColon) {
std::string headers =
"HTTP/1.0 302 Redirect\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
std::string chunks =
"3\r\n"
"123\r\n"
"0\r\n";
std::string trailer =
"crass_monkeys\n"
"\r\n";
balsa_frame_.set_is_request(false);
EXPECT_CALL(visitor_mock_,
HandleWarning(BalsaFrameEnums::TRAILER_MISSING_COLON));
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
ASSERT_EQ(chunks.size(),
balsa_frame_.ProcessInput(chunks.data(), chunks.size()));
FakeHeaders fake_trailers;
fake_trailers.AddKeyValue("crass_monkeys", "");
EXPECT_CALL(visitor_mock_, OnTrailers(fake_trailers));
EXPECT_EQ(trailer.size(),
balsa_frame_.ProcessInput(trailer.data(), trailer.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::TRAILER_MISSING_COLON, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, MultipleHeadersInTrailer) {
std::string headers =
"HTTP/1.1 200 OK\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
std::string chunks =
"3\r\n"
"123\n"
"0\n";
std::map<std::string, std::string> trailer;
trailer["X-Trace"] =
"http:
"foobar.example.com&start=2012-06-03_15:59:06&rpc_duration=0.243349";
trailer["Date"] = "Sun, 03 Jun 2012 22:59:06 GMT";
trailer["Content-Type"] = "text/html";
trailer["X-Backends"] = "127.0.0.1_0,foo.example.com:39359";
trailer["X-Request-Trace"] =
"foo.example.com:39359,127.0.0.1_1,"
"foo.example.com:39359,127.0.0.1_0,"
"foo.example.com:39359";
trailer["X-Service-Trace"] = "default";
trailer["X-Service"] = "default";
std::map<std::string, std::string>::const_iterator iter;
std::string trailer_data;
TestSeed seed;
seed.Initialize(GetQuicheCommandLineFlag(FLAGS_randseed));
RandomEngine rng;
rng.seed(seed.GetSeed());
FakeHeaders fake_headers_in_trailer;
for (iter = trailer.begin(); iter != trailer.end(); ++iter) {
trailer_data += iter->first;
trailer_data += ":";
std::stringstream leading_whitespace_for_value;
AppendRandomWhitespace(rng, &leading_whitespace_for_value);
trailer_data += leading_whitespace_for_value.str();
trailer_data += iter->second;
std::stringstream trailing_whitespace_for_value;
AppendRandomWhitespace(rng, &trailing_whitespace_for_value);
trailer_data += trailing_whitespace_for_value.str();
trailer_data += random_line_term(rng);
fake_headers_in_trailer.AddKeyValue(iter->first, iter->second);
}
trailer_data += random_line_term(rng);
FakeHeaders fake_headers;
fake_headers.AddKeyValue("transfer-encoding", "chunked");
{
InSequence s1;
EXPECT_CALL(visitor_mock_, OnResponseFirstLineInput(
"HTTP/1.1 200 OK", "HTTP/1.1", "200", "OK"));
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, OnChunkLength(3));
EXPECT_CALL(visitor_mock_, OnChunkLength(0));
EXPECT_CALL(visitor_mock_, OnTrailers(fake_headers_in_trailer));
EXPECT_CALL(visitor_mock_, OnTrailerInput(trailer_data));
EXPECT_CALL(visitor_mock_, MessageDone());
}
EXPECT_CALL(visitor_mock_, OnHeaderInput(headers));
std::string body_input;
EXPECT_CALL(visitor_mock_, OnRawBodyInput(_))
.WillRepeatedly([&body_input](absl::string_view input) {
absl::StrAppend(&body_input, input);
});
EXPECT_CALL(visitor_mock_, OnBodyChunkInput("123"));
balsa_frame_.set_is_request(false);
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
ASSERT_EQ(chunks.size(),
balsa_frame_.ProcessInput(chunks.data(), chunks.size()));
EXPECT_EQ(trailer_data.size(), balsa_frame_.ProcessInput(
trailer_data.data(), trailer_data.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
EXPECT_EQ(chunks, body_input);
}
TEST_F(HTTPBalsaFrameTest, NothingBadHappensWithNULLTrailer) {
std::string headers =
"HTTP/1.1 200 OK\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
std::string chunks =
"3\r\n"
"123\r\n"
"0\r\n";
std::string trailer =
"crass: monkeys\r\n"
"funky: monkeys\r\n"
"\n";
BalsaFrame balsa_frame;
balsa_frame.set_balsa_headers(&headers_);
balsa_frame.set_is_request(false);
balsa_frame.set_balsa_visitor(nullptr);
ASSERT_EQ(headers.size(),
balsa_frame.ProcessInput(headers.data(), headers.size()));
ASSERT_EQ(chunks.size(),
balsa_frame.ProcessInput(chunks.data(), chunks.size()));
ASSERT_EQ(trailer.size(),
balsa_frame.ProcessInput(trailer.data(), trailer.size()));
EXPECT_TRUE(balsa_frame.MessageFullyRead());
EXPECT_FALSE(balsa_frame.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, FrameAndResetAndFrameAgain) {
std::string headers =
"HTTP/1.1 200 OK\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
std::string chunks =
"3\r\n"
"123\r\n"
"0\r\n";
std::string trailer =
"k: v\n"
"\n";
balsa_frame_.set_is_request(false);
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
ASSERT_EQ(chunks.size(),
balsa_frame_.ProcessInput(chunks.data(), chunks.size()));
{
FakeHeaders fake_trailers;
fake_trailers.AddKeyValue("k", "v");
EXPECT_CALL(visitor_mock_, OnTrailers(fake_trailers));
}
ASSERT_EQ(trailer.size(),
balsa_frame_.ProcessInput(trailer.data(), trailer.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
balsa_frame_.Reset();
headers =
"HTTP/1.1 404 Error\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
chunks =
"4\r\n"
"1234\r\n"
"0\r\n";
trailer =
"nk: nv\n"
"\n";
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
ASSERT_EQ(chunks.size(),
balsa_frame_.ProcessInput(chunks.data(), chunks.size()));
{
FakeHeaders fake_trailers;
fake_trailers.AddKeyValue("nk", "nv");
EXPECT_CALL(visitor_mock_, OnTrailers(fake_trailers));
}
ASSERT_EQ(trailer.size(),
balsa_frame_.ProcessInput(trailer.data(), trailer.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, InvalidCharsInHeaderValueError) {
balsa_frame_.set_invalid_chars_level(BalsaFrame::InvalidCharsLevel::kError);
const std::string kEscapedInvalid1 =
"GET /foo HTTP/1.1\r\n"
"Bogus-Head: val\\x00\r\n"
"More-Invalid: \\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0B\x0C\x0E\x0F\r\n"
"And-More: \x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D"
"\x1E\x1F\r\n\r\n";
std::string message;
absl::CUnescape(kEscapedInvalid1, &message);
EXPECT_CALL(visitor_mock_,
HandleError(BalsaFrameEnums::INVALID_HEADER_CHARACTER));
balsa_frame_.ProcessInput(message.data(), message.size());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
}
TEST_F(HTTPBalsaFrameTest, InvalidCharsInHeaderNameError) {
balsa_frame_.set_invalid_chars_level(BalsaFrame::InvalidCharsLevel::kOff);
const std::string kEscapedInvalid1 =
"GET /foo HTTP/1.1\r\n"
"Bogus\\x00-Head: val\r\n\r\n";
std::string message;
absl::CUnescape(kEscapedInvalid1, &message);
EXPECT_CALL(visitor_mock_,
HandleError(BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER));
balsa_frame_.ProcessInput(message.data(), message.size());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
}
TEST_F(HTTPBalsaFrameTest, InvalidCharsInRequestHeaderError) {
balsa_frame_.set_invalid_chars_level(BalsaFrame::InvalidCharsLevel::kError);
const std::string kEscapedInvalid =
"GET /foo HTTP/1.1\r\n"
"Smuggle-Me: \\x00GET /bar HTTP/1.1\r\n"
"Another-Header: value\r\n\r\n";
std::string message;
absl::CUnescape(kEscapedInvalid, &message);
EXPECT_CALL(visitor_mock_,
HandleError(BalsaFrameEnums::INVALID_HEADER_CHARACTER));
balsa_frame_.ProcessInput(message.data(), message.size());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
}
TEST_F(HTTPBalsaFrameTest, InvalidCharsInResponseHeaderAllowed) {
balsa_frame_.set_is_request(false);
balsa_frame_.set_invalid_chars_level(BalsaFrame::InvalidCharsLevel::kOff);
const absl::string_view headers =
"HTTP/1.1 200 OK\r\n"
"Content-Length: 5\r\n"
"foo: a\022b\r\n"
"\r\n";
EXPECT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, InvalidCharsInResponseHeaderError) {
balsa_frame_.set_is_request(false);
balsa_frame_.set_invalid_chars_level(BalsaFrame::InvalidCharsLevel::kError);
const absl::string_view headers =
"HTTP/1.1 200 OK\r\n"
"Content-Length: 5\r\n"
"foo: a\022b\r\n"
"\r\n";
EXPECT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::INVALID_HEADER_CHARACTER,
balsa_frame_.ErrorCode());
}
class HTTPBalsaFrameTestOneChar : public HTTPBalsaFrameTest,
public testing::WithParamInterface<char> {
public:
char GetCharUnderTest() { return GetParam(); }
};
TEST_P(HTTPBalsaFrameTestOneChar, InvalidCharsErrorSet) {
balsa_frame_.set_invalid_chars_level(BalsaFrame::InvalidCharsLevel::kError);
const std::string kRequest =
"GET /foo HTTP/1.1\r\n"
"Bogus-Char-Goes-Here: ";
const std::string kEnding = "\r\n\r\n";
std::string message = kRequest;
const char c = GetCharUnderTest();
message.append(1, c);
message.append(kEnding);
if (c == 9 || c == 10 || c == 13) {
EXPECT_CALL(visitor_mock_,
HandleError(BalsaFrameEnums::INVALID_HEADER_CHARACTER))
.Times(0);
balsa_frame_.ProcessInput(message.data(), message.size());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
} else {
EXPECT_CALL(visitor_mock_,
HandleError(BalsaFrameEnums::INVALID_HEADER_CHARACTER));
balsa_frame_.ProcessInput(message.data(), message.size());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
}
}
INSTANTIATE_TEST_SUITE_P(TestInvalidCharSet, HTTPBalsaFrameTestOneChar,
Range<char>(0, 32));
TEST_F(HTTPBalsaFrameTest, InvalidCharEndOfLine) {
balsa_frame_.set_invalid_chars_level(BalsaFrame::InvalidCharsLevel::kError);
const std::string kInvalid1 =
"GET /foo HTTP/1.1\r\n"
"Header-Key: headervalue\\x00\r\n"
"Legit-Header: legitvalue\r\n\r\n";
std::string message;
absl::CUnescape(kInvalid1, &message);
EXPECT_CALL(visitor_mock_,
HandleError(BalsaFrameEnums::INVALID_HEADER_CHARACTER));
balsa_frame_.ProcessInput(message.data(), message.size());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
}
TEST_F(HTTPBalsaFrameTest, InvalidCharInFirstLine) {
balsa_frame_.set_invalid_chars_level(BalsaFrame::InvalidCharsLevel::kError);
const std::string kInvalid1 =
"GET /foo \\x00HTTP/1.1\r\n"
"Legit-Header: legitvalue\r\n\r\n";
std::string message;
absl::CUnescape(kInvalid1, &message);
EXPECT_CALL(visitor_mock_,
HandleError(BalsaFrameEnums::INVALID_HEADER_CHARACTER));
balsa_frame_.ProcessInput(message.data(), message.size());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
}
TEST_F(HTTPBalsaFrameTest, GibberishInHeadersAndTrailer) {
const char kGibberish1[] = {static_cast<char>(138), static_cast<char>(175),
static_cast<char>(233), 0};
const char kGibberish2[] = {'?',
'?',
static_cast<char>(128),
static_cast<char>(255),
static_cast<char>(129),
static_cast<char>(254),
0};
const char kGibberish3[] = "foo: bar : eeep : baz";
std::string gibberish_headers =
absl::StrCat(kGibberish1, ":", kGibberish2, "\r\n", kGibberish3, "\r\n");
std::string headers = absl::StrCat(
"HTTP/1.1 200 OK\r\n"
"transfer-encoding: chunked\r\n",
gibberish_headers, "\r\n");
std::string chunks =
"3\r\n"
"123\r\n"
"0\r\n";
std::string trailer = absl::StrCat("k: v\n", gibberish_headers, "\n");
balsa_frame_.set_is_request(false);
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
ASSERT_EQ(chunks.size(),
balsa_frame_.ProcessInput(chunks.data(), chunks.size()));
FakeHeaders fake_trailers;
fake_trailers.AddKeyValue("k", "v");
fake_trailers.AddKeyValue(kGibberish1, kGibberish2);
fake_trailers.AddKeyValue("foo", "bar : eeep : baz");
EXPECT_CALL(visitor_mock_, OnTrailers(fake_trailers));
ASSERT_EQ(trailer.size(),
balsa_frame_.ProcessInput(trailer.data(), trailer.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
EXPECT_TRUE(headers_.transfer_encoding_is_chunked());
absl::string_view field_value = headers_.GetHeader(kGibberish1);
EXPECT_EQ(kGibberish2, field_value);
field_value = headers_.GetHeader("foo");
EXPECT_EQ("bar : eeep : baz", field_value);
}
TEST_F(HTTPBalsaFrameTest, TrailerTooLong) {
std::string headers =
"HTTP/1.0 200 ok\r\n"
"transfer-encoding: chunked\r\n"
"\r\n";
std::string chunks =
"3\r\n"
"123\r\n"
"0\r\n";
std::string trailer =
"very : long trailer\n"
"should:cause\r\n"
"trailer :too long error\n"
"\r\n";
balsa_frame_.set_is_request(false);
ASSERT_LT(headers.size(), trailer.size());
balsa_frame_.set_max_header_length(headers.size());
EXPECT_CALL(visitor_mock_, HandleError(BalsaFrameEnums::TRAILER_TOO_LONG));
EXPECT_CALL(visitor_mock_, OnTrailers(_)).Times(0);
EXPECT_CALL(visitor_mock_, MessageDone()).Times(0);
ASSERT_EQ(headers.size(),
balsa_frame_.ProcessInput(headers.data(), headers.size()));
ASSERT_EQ(chunks.size(),
balsa_frame_.ProcessInput(chunks.data(), chunks.size()));
EXPECT_EQ(balsa_frame_.max_header_length(),
balsa_frame_.ProcessInput(trailer.data(), trailer.size()));
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::TRAILER_TOO_LONG, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, Parse100ContinueNoContinueHeadersNoCallback) {
std::string continue_headers =
"HTTP/1.1 100 Continue\r\n"
"\r\n";
balsa_frame_.set_is_request(false);
balsa_frame_.set_use_interim_headers_callback(false);
InSequence s;
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, MessageDone());
ASSERT_EQ(balsa_frame_.ProcessInput(continue_headers.data(),
continue_headers.size()),
continue_headers.size())
<< balsa_frame_.ErrorCode();
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(headers_.parsed_response_code(), 100);
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
}
TEST_F(HTTPBalsaFrameTest, Parse100Continue) {
std::string continue_headers =
"HTTP/1.1 100 Continue\r\n"
"\r\n";
balsa_frame_.set_is_request(false);
balsa_frame_.set_use_interim_headers_callback(true);
InSequence s;
EXPECT_CALL(visitor_mock_, OnInterimHeaders(Pointee(Property(
&BalsaHeaders::parsed_response_code, 100))));
EXPECT_CALL(visitor_mock_, HeaderDone()).Times(0);
EXPECT_CALL(visitor_mock_, MessageDone()).Times(0);
ASSERT_EQ(balsa_frame_.ProcessInput(continue_headers.data(),
continue_headers.size()),
continue_headers.size())
<< balsa_frame_.ErrorCode();
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(headers_.parsed_response_code(), 0u);
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
}
TEST_F(HTTPBalsaFrameTest, Support100ContinueNoCallback) {
std::string initial_headers =
"HTTP/1.1 100 Continue\r\n"
"\r\n";
std::string real_headers =
"HTTP/1.1 200 OK\r\n"
"content-length: 3\r\n"
"\r\n";
std::string body = "foo";
balsa_frame_.set_is_request(false);
BalsaHeaders continue_headers;
balsa_frame_.set_continue_headers(&continue_headers);
balsa_frame_.set_use_interim_headers_callback(false);
ASSERT_EQ(initial_headers.size(),
balsa_frame_.ProcessInput(initial_headers.data(),
initial_headers.size()));
ASSERT_EQ(real_headers.size(),
balsa_frame_.ProcessInput(real_headers.data(), real_headers.size()))
<< balsa_frame_.ErrorCode();
ASSERT_EQ(body.size(), balsa_frame_.ProcessInput(body.data(), body.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, Support100Continue) {
std::string initial_headers =
"HTTP/1.1 100 Continue\r\n"
"\r\n";
std::string real_headers =
"HTTP/1.1 200 OK\r\n"
"content-length: 3\r\n"
"\r\n";
std::string body = "foo";
balsa_frame_.set_is_request(false);
balsa_frame_.set_use_interim_headers_callback(true);
InSequence s;
EXPECT_CALL(visitor_mock_, OnInterimHeaders(Pointee(Property(
&BalsaHeaders::parsed_response_code, 100))));
ASSERT_EQ(
balsa_frame_.ProcessInput(initial_headers.data(), initial_headers.size()),
initial_headers.size());
ASSERT_FALSE(balsa_frame_.Error());
EXPECT_CALL(visitor_mock_, HeaderDone());
ASSERT_EQ(balsa_frame_.ProcessInput(real_headers.data(), real_headers.size()),
real_headers.size())
<< balsa_frame_.ErrorCode();
EXPECT_EQ(headers_.parsed_response_code(), 200);
EXPECT_CALL(visitor_mock_, MessageDone());
ASSERT_EQ(balsa_frame_.ProcessInput(body.data(), body.size()), body.size());
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(balsa_frame_.ErrorCode(), BalsaFrameEnums::BALSA_NO_ERROR);
}
TEST_F(HTTPBalsaFrameTest, InterimHeadersCallbackTakesPrecedence) {
std::string initial_headers =
"HTTP/1.1 100 Continue\r\n"
"\r\n";
std::string real_headers =
"HTTP/1.1 200 OK\r\n"
"content-length: 3\r\n"
"\r\n";
std::string body = "foo";
balsa_frame_.set_is_request(false);
BalsaHeaders continue_headers;
balsa_frame_.set_continue_headers(&continue_headers);
balsa_frame_.set_use_interim_headers_callback(true);
InSequence s;
EXPECT_CALL(visitor_mock_, OnInterimHeaders(Pointee(Property(
&BalsaHeaders::parsed_response_code, 100))));
EXPECT_CALL(visitor_mock_, ContinueHeaderDone).Times(0);
ASSERT_EQ(
balsa_frame_.ProcessInput(initial_headers.data(), initial_headers.size()),
initial_headers.size());
EXPECT_EQ(continue_headers.parsed_response_code(), 0u);
ASSERT_FALSE(balsa_frame_.Error());
EXPECT_CALL(visitor_mock_, HeaderDone());
ASSERT_EQ(balsa_frame_.ProcessInput(real_headers.data(), real_headers.size()),
real_headers.size())
<< balsa_frame_.ErrorCode();
EXPECT_EQ(headers_.parsed_response_code(), 200);
EXPECT_CALL(visitor_mock_, MessageDone());
ASSERT_EQ(balsa_frame_.ProcessInput(body.data(), body.size()), body.size());
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(balsa_frame_.ErrorCode(), BalsaFrameEnums::BALSA_NO_ERROR);
}
TEST_F(HTTPBalsaFrameTest, Support100Continue401UnauthorizedNoCallback) {
std::string initial_headers =
"HTTP/1.1 100 Continue\r\n"
"\r\n";
std::string real_headers =
"HTTP/1.1 401 Unauthorized\r\n"
"content-length: 3\r\n"
"\r\n";
std::string body = "foo";
balsa_frame_.set_is_request(false);
BalsaHeaders continue_headers;
balsa_frame_.set_continue_headers(&continue_headers);
balsa_frame_.set_use_interim_headers_callback(false);
ASSERT_EQ(initial_headers.size(),
balsa_frame_.ProcessInput(initial_headers.data(),
initial_headers.size()));
ASSERT_EQ(real_headers.size(),
balsa_frame_.ProcessInput(real_headers.data(), real_headers.size()))
<< balsa_frame_.ErrorCode();
ASSERT_EQ(body.size(), balsa_frame_.ProcessInput(body.data(), body.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, Support100Continue401Unauthorized) {
std::string initial_headers =
"HTTP/1.1 100 Continue\r\n"
"\r\n";
std::string real_headers =
"HTTP/1.1 401 Unauthorized\r\n"
"content-length: 3\r\n"
"\r\n";
std::string body = "foo";
balsa_frame_.set_is_request(false);
balsa_frame_.set_use_interim_headers_callback(true);
InSequence s;
EXPECT_CALL(visitor_mock_, OnInterimHeaders(Pointee(Property(
&BalsaHeaders::parsed_response_code, 100))));
ASSERT_EQ(
balsa_frame_.ProcessInput(initial_headers.data(), initial_headers.size()),
initial_headers.size());
ASSERT_FALSE(balsa_frame_.Error());
EXPECT_CALL(visitor_mock_, HeaderDone());
ASSERT_EQ(balsa_frame_.ProcessInput(real_headers.data(), real_headers.size()),
real_headers.size())
<< balsa_frame_.ErrorCode();
EXPECT_EQ(headers_.parsed_response_code(), 401);
EXPECT_CALL(visitor_mock_, MessageDone());
ASSERT_EQ(balsa_frame_.ProcessInput(body.data(), body.size()), body.size());
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(balsa_frame_.ErrorCode(), BalsaFrameEnums::BALSA_NO_ERROR);
}
TEST_F(HTTPBalsaFrameTest, Support100ContinueRunTogetherNoCallback) {
std::string both_headers =
"HTTP/1.1 100 Continue\r\n"
"\r\n"
"HTTP/1.1 200 OK\r\n"
"content-length: 3\r\n"
"\r\n";
std::string body = "foo";
{
InSequence s;
EXPECT_CALL(visitor_mock_, ContinueHeaderDone());
EXPECT_CALL(visitor_mock_, HeaderDone());
EXPECT_CALL(visitor_mock_, MessageDone());
}
balsa_frame_.set_is_request(false);
BalsaHeaders continue_headers;
balsa_frame_.set_continue_headers(&continue_headers);
balsa_frame_.set_use_interim_headers_callback(false);
ASSERT_EQ(both_headers.size(),
balsa_frame_.ProcessInput(both_headers.data(), both_headers.size()))
<< balsa_frame_.ErrorCode();
ASSERT_EQ(body.size(), balsa_frame_.ProcessInput(body.data(), body.size()));
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, Support100ContinueRunTogether) {
std::string both_headers =
"HTTP/1.1 100 Continue\r\n"
"\r\n"
"HTTP/1.1 200 OK\r\n"
"content-length: 3\r\n"
"\r\n";
std::string body = "foo";
balsa_frame_.set_is_request(false);
balsa_frame_.set_use_interim_headers_callback(true);
InSequence s;
EXPECT_CALL(visitor_mock_, OnInterimHeaders(Pointee(Property(
&BalsaHeaders::parsed_response_code, 100))));
EXPECT_CALL(visitor_mock_, HeaderDone());
ASSERT_EQ(balsa_frame_.ProcessInput(both_headers.data(), both_headers.size()),
both_headers.size())
<< balsa_frame_.ErrorCode();
ASSERT_FALSE(balsa_frame_.Error());
EXPECT_EQ(headers_.parsed_response_code(), 200);
EXPECT_CALL(visitor_mock_, MessageDone());
ASSERT_EQ(balsa_frame_.ProcessInput(body.data(), body.size()), body.size());
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(balsa_frame_.ErrorCode(), BalsaFrameEnums::BALSA_NO_ERROR);
}
TEST_F(HTTPBalsaFrameTest, MultipleInterimHeaders) {
std::string all_headers =
"HTTP/1.1 100 Continue\r\n"
"\r\n"
"HTTP/1.1 103 Early Hints\r\n"
"\r\n"
"HTTP/1.1 200 OK\r\n"
"content-length: 3\r\n"
"\r\n";
std::string body = "foo";
balsa_frame_.set_is_request(false);
balsa_frame_.set_use_interim_headers_callback(true);
InSequence s;
EXPECT_CALL(visitor_mock_, OnInterimHeaders(Pointee(Property(
&BalsaHeaders::parsed_response_code, 100))));
EXPECT_CALL(visitor_mock_, OnInterimHeaders(Pointee(Property(
&BalsaHeaders::parsed_response_code, 103))));
EXPECT_CALL(visitor_mock_, HeaderDone());
ASSERT_EQ(balsa_frame_.ProcessInput(all_headers.data(), all_headers.size()),
all_headers.size())
<< balsa_frame_.ErrorCode();
ASSERT_FALSE(balsa_frame_.Error());
EXPECT_EQ(headers_.parsed_response_code(), 200);
EXPECT_CALL(visitor_mock_, MessageDone());
ASSERT_EQ(balsa_frame_.ProcessInput(body.data(), body.size()), body.size());
EXPECT_TRUE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(balsa_frame_.ErrorCode(), BalsaFrameEnums::BALSA_NO_ERROR);
}
TEST_F(HTTPBalsaFrameTest, SwitchingProtocols) {
const std::string headers =
"HTTP/1.1 101 Switching Protocols\r\n"
"\r\n";
const std::string body = "Bytes for the new protocol";
const std::string message = absl::StrCat(headers, body);
balsa_frame_.set_is_request(false);
balsa_frame_.set_use_interim_headers_callback(true);
InSequence s;
EXPECT_CALL(visitor_mock_, ProcessHeaders);
EXPECT_CALL(visitor_mock_, HeaderDone());
ASSERT_EQ(balsa_frame_.ProcessInput(message.data(), message.size()),
headers.size())
<< balsa_frame_.ErrorCode();
ASSERT_FALSE(balsa_frame_.Error());
EXPECT_EQ(headers_.parsed_response_code(), 101);
balsa_frame_.AllowArbitraryBody();
EXPECT_CALL(visitor_mock_, OnRawBodyInput("Bytes for the new protocol"));
EXPECT_CALL(visitor_mock_, OnBodyChunkInput("Bytes for the new protocol"));
EXPECT_CALL(visitor_mock_, MessageDone()).Times(0);
ASSERT_EQ(balsa_frame_.ProcessInput(body.data(), body.size()), body.size());
EXPECT_FALSE(balsa_frame_.MessageFullyRead());
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(balsa_frame_.ErrorCode(), BalsaFrameEnums::BALSA_NO_ERROR);
}
TEST_F(HTTPBalsaFrameTest, Http09) {
constexpr absl::string_view request = "GET /\r\n";
InSequence s;
StrictMock<BalsaVisitorMock> visitor_mock;
balsa_frame_.set_balsa_visitor(&visitor_mock);
EXPECT_CALL(
visitor_mock,
HandleWarning(
BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_REQUEST_REQUEST_URI));
EXPECT_CALL(visitor_mock, OnRequestFirstLineInput("GET /", "GET", "/", ""));
EXPECT_CALL(visitor_mock, OnHeaderInput(request));
EXPECT_CALL(visitor_mock, ProcessHeaders(FakeHeaders{}));
EXPECT_CALL(visitor_mock, HeaderDone());
EXPECT_CALL(visitor_mock, MessageDone());
EXPECT_EQ(request.size(),
balsa_frame_.ProcessInput(request.data(), request.size()));
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::FAILED_TO_FIND_WS_AFTER_REQUEST_REQUEST_URI,
balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, ContinuationAllowed) {
const std::string message =
"GET / HTTP/1.1\r\n"
"key1: \n value starts with obs-fold\r\n"
"key2: value\n includes obs-fold\r\n"
"key3: value ends in obs-fold \n \r\n"
"\r\n";
FakeHeaders fake_headers;
fake_headers.AddKeyValue("key1", "value starts with obs-fold");
fake_headers.AddKeyValue("key2", "value\n includes obs-fold");
fake_headers.AddKeyValue("key3", "value ends in obs-fold");
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.Error());
}
TEST_F(HTTPBalsaFrameTest, ContinuationDisallowed) {
HttpValidationPolicy http_validation_policy;
http_validation_policy.disallow_header_continuation_lines = true;
balsa_frame_.set_http_validation_policy(http_validation_policy);
const std::string message =
"GET / HTTP/1.1\r\n"
"key: value\n includes obs-fold\r\n"
"\r\n";
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::INVALID_HEADER_FORMAT, balsa_frame_.ErrorCode());
}
TEST_F(HTTPBalsaFrameTest, NullAtBeginningOrEndOfValue) {
balsa_frame_.set_invalid_chars_level(BalsaFrame::InvalidCharsLevel::kError);
constexpr absl::string_view null_string("\0", 1);
const std::string message =
absl::StrCat("GET / HTTP/1.1\r\n",
"key1: ", null_string, "value starts with null\r\n",
"key2: value ends in null", null_string, "\r\n",
"\r\n");
FakeHeaders fake_headers;
fake_headers.AddKeyValue("key1", "value starts with null");
fake_headers.AddKeyValue("key2", "value ends in null");
EXPECT_CALL(visitor_mock_,
HandleError(BalsaFrameEnums::INVALID_HEADER_CHARACTER));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.Error());
}
TEST_F(HTTPBalsaFrameTest, NullInMiddleOfValue) {
balsa_frame_.set_invalid_chars_level(BalsaFrame::InvalidCharsLevel::kError);
constexpr absl::string_view null_string("\0", 1);
const std::string message =
absl::StrCat("GET / HTTP/1.1\r\n",
"key: value ", null_string, "includes null\r\n",
"\r\n");
FakeHeaders fake_headers;
fake_headers.AddKeyValue(
"key", absl::StrCat("value ", null_string, "includes null"));
EXPECT_CALL(visitor_mock_,
HandleError(BalsaFrameEnums::INVALID_HEADER_CHARACTER));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.Error());
}
TEST_F(HTTPBalsaFrameTest, ObsTextNotFoundIfNotPresent) {
HttpValidationPolicy http_validation_policy;
http_validation_policy.disallow_obs_text_in_field_names = true;
balsa_frame_.set_http_validation_policy(http_validation_policy);
const std::string message =
absl::StrCat("GET / HTTP/1.1\r\n",
"key1: key does not contain obs-text\r\n",
"\r\n");
FakeHeaders fake_headers;
fake_headers.AddKeyValue("key1", "key does not contain obs-text");
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.Error());
}
TEST_F(HTTPBalsaFrameTest, HeaderFieldNameWithObsTextButPolicyDisabled) {
HttpValidationPolicy http_validation_policy;
http_validation_policy.disallow_obs_text_in_field_names = false;
balsa_frame_.set_http_validation_policy(http_validation_policy);
balsa_frame_.set_invalid_chars_level(BalsaFrame::InvalidCharsLevel::kError);
const std::string message =
absl::StrCat("GET / HTTP/1.1\r\n",
"\x80key1: key starts with obs-text\r\n",
"\r\n");
FakeHeaders fake_headers;
fake_headers.AddKeyValue("\x80key1", "key starts with obs-text");
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.Error());
}
TEST_F(HTTPBalsaFrameTest, HeaderFieldNameWithObsTextAndPolicyEnabled) {
HttpValidationPolicy http_validation_policy;
http_validation_policy.disallow_obs_text_in_field_names = true;
balsa_frame_.set_http_validation_policy(http_validation_policy);
balsa_frame_.set_invalid_chars_level(BalsaFrame::InvalidCharsLevel::kOff);
const std::string message =
absl::StrCat("GET / HTTP/1.1\r\n",
"\x80key1: key starts with obs-text\r\n",
"\r\n");
EXPECT_CALL(visitor_mock_,
HandleError(BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.Error());
}
TEST_F(HTTPBalsaFrameTest, HeaderFieldNameWithObsTextAtEndRejected) {
HttpValidationPolicy http_validation_policy;
http_validation_policy.disallow_obs_text_in_field_names = true;
balsa_frame_.set_http_validation_policy(http_validation_policy);
const std::string message =
absl::StrCat("GET / HTTP/1.1\r\n",
"key1\x93: key ends with obs-text\r\n",
"\r\n");
EXPECT_CALL(visitor_mock_,
HandleError(BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.Error());
}
TEST_F(HTTPBalsaFrameTest, HeaderFieldNameWithObsTextInMiddleRejected) {
HttpValidationPolicy http_validation_policy;
http_validation_policy.disallow_obs_text_in_field_names = true;
balsa_frame_.set_http_validation_policy(http_validation_policy);
const std::string message =
absl::StrCat("GET / HTTP/1.1\r\n",
"ke\xffy1: key contains obs-text in middle\r\n",
"\r\n");
EXPECT_CALL(visitor_mock_,
HandleError(BalsaFrameEnums::INVALID_HEADER_NAME_CHARACTER));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(balsa_frame_.Error());
}
TEST_F(HTTPBalsaFrameTest, ObsTextInReasonPhraseAllowed) {
HttpValidationPolicy http_validation_policy;
http_validation_policy.disallow_obs_text_in_field_names = true;
balsa_frame_.set_http_validation_policy(http_validation_policy);
balsa_frame_.set_invalid_chars_level(BalsaFrame::InvalidCharsLevel::kError);
balsa_frame_.set_is_request(false);
const std::string message =
absl::StrCat("HTTP/1.1 200 O\x90K\r\n",
"surprising: obs-text allowed in reason phrase\r\n",
"content-length: 0\r\n"
"\r\n");
FakeHeaders fake_headers;
fake_headers.AddKeyValue("surprising", "obs-text allowed in reason phrase");
fake_headers.AddKeyValue("content-length", "0");
EXPECT_CALL(visitor_mock_, ProcessHeaders(fake_headers));
EXPECT_EQ(message.size(),
balsa_frame_.ProcessInput(message.data(), message.size()));
EXPECT_FALSE(balsa_frame_.Error());
EXPECT_EQ(BalsaFrameEnums::BALSA_NO_ERROR, balsa_frame_.ErrorCode());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/balsa/balsa_frame.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/balsa/balsa_frame_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
baa990a7-f3e5-4cf9-98a9-9ce6ba0b324b | cpp | google/cel-cpp | testing_descriptor_pool | internal/testing_descriptor_pool.cc | internal/testing_descriptor_pool_test.cc | #include "internal/testing_descriptor_pool.h"
#include <cstdint>
#include "google/protobuf/descriptor.pb.h"
#include "absl/base/attributes.h"
#include "absl/base/macros.h"
#include "absl/base/nullability.h"
#include "absl/log/absl_check.h"
#include "google/protobuf/descriptor.h"
namespace cel::internal {
namespace {
ABSL_CONST_INIT const uint8_t kTestingDescriptorSet[] = {
#include "internal/testing_descriptor_set_embed.inc"
};
}
absl::Nonnull<const google::protobuf::DescriptorPool*> GetTestingDescriptorPool() {
static absl::Nonnull<const google::protobuf::DescriptorPool* const> pool = []() {
google::protobuf::FileDescriptorSet file_desc_set;
ABSL_CHECK(file_desc_set.ParseFromArray(
kTestingDescriptorSet, ABSL_ARRAYSIZE(kTestingDescriptorSet)));
auto* pool = new google::protobuf::DescriptorPool();
for (const auto& file_desc : file_desc_set.file()) {
ABSL_CHECK(pool->BuildFile(file_desc) != nullptr);
}
return pool;
}();
return pool;
}
} | #include "internal/testing_descriptor_pool.h"
#include "internal/testing.h"
#include "google/protobuf/descriptor.h"
namespace cel::internal {
namespace {
using ::testing::NotNull;
TEST(TestingDescriptorPool, NullValue) {
ASSERT_THAT(GetTestingDescriptorPool()->FindEnumTypeByName(
"google.protobuf.NullValue"),
NotNull());
}
TEST(TestingDescriptorPool, BoolValue) {
const auto* desc = GetTestingDescriptorPool()->FindMessageTypeByName(
"google.protobuf.BoolValue");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_BOOLVALUE);
}
TEST(TestingDescriptorPool, Int32Value) {
const auto* desc = GetTestingDescriptorPool()->FindMessageTypeByName(
"google.protobuf.Int32Value");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_INT32VALUE);
}
TEST(TestingDescriptorPool, Int64Value) {
const auto* desc = GetTestingDescriptorPool()->FindMessageTypeByName(
"google.protobuf.Int64Value");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_INT64VALUE);
}
TEST(TestingDescriptorPool, UInt32Value) {
const auto* desc = GetTestingDescriptorPool()->FindMessageTypeByName(
"google.protobuf.UInt32Value");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_UINT32VALUE);
}
TEST(TestingDescriptorPool, UInt64Value) {
const auto* desc = GetTestingDescriptorPool()->FindMessageTypeByName(
"google.protobuf.UInt64Value");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_UINT64VALUE);
}
TEST(TestingDescriptorPool, FloatValue) {
const auto* desc = GetTestingDescriptorPool()->FindMessageTypeByName(
"google.protobuf.FloatValue");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_FLOATVALUE);
}
TEST(TestingDescriptorPool, DoubleValue) {
const auto* desc = GetTestingDescriptorPool()->FindMessageTypeByName(
"google.protobuf.DoubleValue");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_DOUBLEVALUE);
}
TEST(TestingDescriptorPool, BytesValue) {
const auto* desc = GetTestingDescriptorPool()->FindMessageTypeByName(
"google.protobuf.BytesValue");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_BYTESVALUE);
}
TEST(TestingDescriptorPool, StringValue) {
const auto* desc = GetTestingDescriptorPool()->FindMessageTypeByName(
"google.protobuf.StringValue");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_STRINGVALUE);
}
TEST(TestingDescriptorPool, Any) {
const auto* desc =
GetTestingDescriptorPool()->FindMessageTypeByName("google.protobuf.Any");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(), google::protobuf::Descriptor::WELLKNOWNTYPE_ANY);
}
TEST(TestingDescriptorPool, Duration) {
const auto* desc = GetTestingDescriptorPool()->FindMessageTypeByName(
"google.protobuf.Duration");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_DURATION);
}
TEST(TestingDescriptorPool, Timestamp) {
const auto* desc = GetTestingDescriptorPool()->FindMessageTypeByName(
"google.protobuf.Timestamp");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_TIMESTAMP);
}
TEST(TestingDescriptorPool, Value) {
const auto* desc = GetTestingDescriptorPool()->FindMessageTypeByName(
"google.protobuf.Value");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(), google::protobuf::Descriptor::WELLKNOWNTYPE_VALUE);
}
TEST(TestingDescriptorPool, ListValue) {
const auto* desc = GetTestingDescriptorPool()->FindMessageTypeByName(
"google.protobuf.ListValue");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_LISTVALUE);
}
TEST(TestingDescriptorPool, Struct) {
const auto* desc = GetTestingDescriptorPool()->FindMessageTypeByName(
"google.protobuf.Struct");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(), google::protobuf::Descriptor::WELLKNOWNTYPE_STRUCT);
}
TEST(TestingDescriptorPool, FieldMask) {
const auto* desc = GetTestingDescriptorPool()->FindMessageTypeByName(
"google.protobuf.FieldMask");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_FIELDMASK);
}
TEST(TestingDescriptorPool, Empty) {
const auto* desc = GetTestingDescriptorPool()->FindMessageTypeByName(
"google.protobuf.Empty");
ASSERT_THAT(desc, NotNull());
}
TEST(TestingDescriptorPool, TestAllTypesProto2) {
EXPECT_THAT(GetTestingDescriptorPool()->FindMessageTypeByName(
"google.api.expr.test.v1.proto2.TestAllTypes"),
NotNull());
}
TEST(TestingDescriptorPool, TestAllTypesProto3) {
EXPECT_THAT(GetTestingDescriptorPool()->FindMessageTypeByName(
"google.api.expr.test.v1.proto3.TestAllTypes"),
NotNull());
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/testing_descriptor_pool.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/testing_descriptor_pool_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
392aa368-f3b4-4624-abe5-8318bc62bf19 | cpp | tensorflow/tensorflow | test_utils | tensorflow/compiler/mlir/tfrt/translate/mlrt/test_utils.cc | tensorflow/core/lib/monitoring/test_utils_test.cc | #include "tensorflow/compiler/mlir/tfrt/translate/mlrt/test_utils.h"
#include <algorithm>
#include <cstring>
#include <functional>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/tfrt/mlrt/attribute/attribute.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/kernel.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/interpreter_testutil.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace mlrt {
namespace testing {
absl::StatusOr<std::string> EncodeAttribute(const tensorflow::AttrValue& attr) {
if (attr.has_b()) {
std::string result;
result.resize(sizeof(uint8_t));
uint8_t v = attr.b();
std::memcpy(result.data(), &v, sizeof(v));
return result;
}
if (attr.has_i()) {
std::string result;
result.resize(sizeof(int64_t));
int64_t v = attr.i();
std::memcpy(result.data(), &v, sizeof(v));
return result;
}
if (attr.has_f()) {
std::string result;
result.resize(sizeof(float));
float v = attr.f();
std::memcpy(result.data(), &v, sizeof(v));
return result;
}
if (attr.has_s()) {
return attr.s();
}
if (attr.has_list()) {
if (attr.list().s_size() > 0) {
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto ctor = mlrt::bc::New<mlrt::bc::Vector<mlrt::bc::String>>(
&allocator, attr.list().s_size());
for (int i = 0; i < attr.list().s_size(); ++i) {
ctor.ConstructAt(i, attr.list().s(i));
}
return std::string(buffer.data(), buffer.size());
}
}
if (attr.has_tensor()) {
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
tensorflow::Tensor tensor;
if (!tensor.FromProto(attr.tensor())) {
return absl::InvalidArgumentError("Invalid tensor proto.");
}
auto tensor_attr_ctor = mlrt::bc::New<tensorflow::tf_mlrt::TensorAttr>(
&allocator, tensor.dtype());
auto shape = tensor.shape().dim_sizes();
tensor_attr_ctor.construct_shape(shape.size())
.Assign(shape.begin(), shape.end());
auto tensor_data = tensor.tensor_data();
tensor_attr_ctor.construct_data(tensor_data.size())
.Place(tensor_data.data(), tensor_data.size());
return std::string(buffer.data(), buffer.size());
}
return absl::InvalidArgumentError("Unsupported attribute.");
}
namespace {
bool CanBeInlined(const tensorflow::AttrValue& attr) {
return attr.has_b() || attr.has_f();
}
}
absl::Status EncodeAttributes(AttributeTable& attributes,
const tensorflow::AttrValueMap& attr_map) {
std::vector<std::pair<std::string, tensorflow::AttrValue>> attrs(
attr_map.begin(), attr_map.end());
std::sort(attrs.begin(), attrs.end(),
[](const auto& x, const auto& y) { return x.first < y.first; });
for (int i = 0; i < attrs.size(); ++i) {
const tensorflow::AttrValue& attr = attrs[i].second;
TF_ASSIGN_OR_RETURN(auto attr_str, EncodeAttribute(attr));
if (CanBeInlined(attr)) {
attributes.AddInline(absl::StrCat(i), attr_str);
} else {
attributes.Add(absl::StrCat(i), attr_str);
}
}
return absl::OkStatus();
}
absl::StatusOr<std::pair<mlrt::bc::Kernel, mlrt::bc::Vector<mlrt::bc::String>>>
CreateKernelAndAttrs(int num_inputs, int num_outputs,
mlrt::ExecutionContext& exec_ctx, mlrt::bc::Buffer* buffer,
const tensorflow::AttrValueMap& attrs) {
mlrt::bc::Allocator allocator(buffer);
auto attributes_ctor = mlrt::bc::New<mlrt::bc::Vector<mlrt::bc::String>>(
&allocator, attrs.size());
AttributeTable attribute_table(attributes_ctor);
TF_RETURN_IF_ERROR(EncodeAttributes(attribute_table, attrs));
auto kernel_ctor = mlrt::bc::New<mlrt::bc::Kernel>(&allocator);
kernel_ctor.set_code(0);
std::vector<int> input_indices(num_inputs);
std::iota(input_indices.begin(), input_indices.end(), 0);
kernel_ctor.construct_arguments(input_indices.size())
.Assign(input_indices.begin(), input_indices.end());
std::vector<int> output_indices(num_outputs);
std::iota(output_indices.begin(), output_indices.end(), num_inputs);
kernel_ctor.construct_results(output_indices.size())
.Assign(output_indices.begin(), output_indices.end());
std::vector<uint32_t> attr_indices;
attr_indices.reserve(attrs.size());
for (int i = 0; i < attrs.size(); ++i) {
attr_indices.push_back(attribute_table.GetHandle(absl::StrCat(i)));
}
kernel_ctor.construct_attributes(attr_indices.size())
.Assign(attr_indices.begin(), attr_indices.end());
mlrt::bc::Vector<mlrt::bc::String> attributes(
buffer->Get(attributes_ctor.address()));
mlrt::bc::Kernel kernel(buffer->Get(kernel_ctor.address()));
return std::make_pair(kernel, attributes);
}
}
} | #include "tensorflow/core/lib/monitoring/test_utils.h"
#include <string>
#include "tensorflow/core/lib/monitoring/types.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace monitoring {
namespace testing {
namespace {
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
template <typename MessageType>
StatusOr<MessageType> ParseTextProto(const std::string& text_proto) {
protobuf::TextFormat::Parser parser;
MessageType parsed_proto;
protobuf::io::ArrayInputStream input_stream(text_proto.data(),
text_proto.size());
if (!parser.Parse(&input_stream, &parsed_proto)) {
return errors::InvalidArgument("Could not parse text proto: ", text_proto);
}
return parsed_proto;
}
TEST(HistogramTest, Subtract) {
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram1,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 500.0
num: 3.0
sum: 555.0
sum_squares: 252525.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket: 0
bucket: 1
bucket: 1
bucket: 1
)pb"));
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram2,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 5.0
num: 1.0
sum: 5.0
sum_squares: 25.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket: 0
bucket: 1
bucket: 0
bucket: 0
)pb"));
TF_ASSERT_OK_AND_ASSIGN(
Histogram delta, Histogram(histogram1).Subtract(Histogram(histogram2)));
EXPECT_FLOAT_EQ(delta.num(), 2.0);
EXPECT_FLOAT_EQ(delta.sum(), 550.0);
EXPECT_FLOAT_EQ(delta.sum_squares(), 252500.0);
EXPECT_FLOAT_EQ(delta.num(0), 0.0);
EXPECT_FLOAT_EQ(delta.num(1), 0.0);
EXPECT_FLOAT_EQ(delta.num(2), 1.0);
EXPECT_FLOAT_EQ(delta.num(3), 1.0);
}
TEST(HistogramTest, ReverseSubtract) {
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram1,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 500.0
num: 3.0
sum: 555.0
sum_squares: 252525.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket: 0
bucket: 1
bucket: 1
bucket: 1
)pb"));
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram2,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 5.0
num: 1.0
sum: 5.0
sum_squares: 25.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket: 0
bucket: 1
bucket: 0
bucket: 0
)pb"));
EXPECT_THAT(
Histogram(histogram2).Subtract(Histogram(histogram1)),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr("Failed to subtract a histogram by a larger histogram.")));
}
TEST(HistogramTest, NegativeSubtract) {
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram1,
ParseTextProto<HistogramProto>(R"pb(
min: -100.0
max: 0.0
num: 5.0
sum: -500.0
sum_squares: 50000.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket: 5
bucket: 0
bucket: 0
bucket: 0
)pb"));
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram2,
ParseTextProto<HistogramProto>(R"pb(
min: -100.0
max: 0.0
num: 2.0
sum: -200.0
sum_squares: 20000.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket: 2
bucket: 0
bucket: 0
bucket: 0
)pb"));
TF_ASSERT_OK_AND_ASSIGN(
Histogram delta, Histogram(histogram1).Subtract(Histogram(histogram2)));
EXPECT_FLOAT_EQ(delta.num(), 3.0);
EXPECT_FLOAT_EQ(delta.sum(), -300.0);
EXPECT_FLOAT_EQ(delta.sum_squares(), 30000.0);
EXPECT_FLOAT_EQ(delta.num(0), 3.0);
EXPECT_FLOAT_EQ(delta.num(1), 0.0);
EXPECT_FLOAT_EQ(delta.num(2), 0.0);
EXPECT_FLOAT_EQ(delta.num(3), 0.0);
}
TEST(HistogramTest, SingleBucketSubtract) {
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram1,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 1.0
num: 100.0
sum: 100.0
sum_squares: 100.0
bucket: 100
)pb"));
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram2,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 1.0
num: 50.0
sum: 50.0
sum_squares: 50.0
bucket: 50
)pb"));
TF_ASSERT_OK_AND_ASSIGN(
Histogram delta, Histogram(histogram1).Subtract(Histogram(histogram2)));
EXPECT_FLOAT_EQ(delta.num(), 50.0);
EXPECT_FLOAT_EQ(delta.sum(), 50.0);
EXPECT_FLOAT_EQ(delta.sum_squares(), 50.0);
EXPECT_FLOAT_EQ(delta.num(0), 50.0);
}
TEST(HistogramTest, SelfSubtract) {
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 500.0
num: 3.0
sum: 555.0
sum_squares: 252525.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket: 0
bucket: 1
bucket: 1
bucket: 1
)pb"));
TF_ASSERT_OK_AND_ASSIGN(Histogram delta,
Histogram(histogram).Subtract(Histogram(histogram)));
EXPECT_FLOAT_EQ(delta.num(), 0.0);
EXPECT_FLOAT_EQ(delta.sum(), 0.0);
EXPECT_FLOAT_EQ(delta.sum_squares(), 0.0);
EXPECT_FLOAT_EQ(delta.num(0), 0.0);
EXPECT_FLOAT_EQ(delta.num(1), 0.0);
EXPECT_FLOAT_EQ(delta.num(2), 0.0);
EXPECT_FLOAT_EQ(delta.num(3), 0.0);
}
TEST(HistogramTest, SubtractEmptyHistogram) {
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 500.0
num: 3.0
sum: 555.0
sum_squares: 252525.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket: 0
bucket: 1
bucket: 1
bucket: 1
)pb"));
const HistogramProto empty;
TF_ASSERT_OK_AND_ASSIGN(Histogram delta,
Histogram(histogram).Subtract(Histogram(empty)));
EXPECT_FLOAT_EQ(delta.num(), 3.0);
EXPECT_FLOAT_EQ(delta.sum(), 555.0);
EXPECT_FLOAT_EQ(delta.sum_squares(), 252525.0);
EXPECT_FLOAT_EQ(delta.num(0), 0.0);
EXPECT_FLOAT_EQ(delta.num(1), 1.0);
EXPECT_FLOAT_EQ(delta.num(2), 1.0);
EXPECT_FLOAT_EQ(delta.num(3), 1.0);
}
TEST(HistogramTest, SubtractTwoEmptyHistograms) {
const HistogramProto histogram1;
const HistogramProto histogram2;
TF_ASSERT_OK_AND_ASSIGN(
Histogram delta, Histogram(histogram1).Subtract(Histogram(histogram2)));
EXPECT_FLOAT_EQ(delta.num(), 0.0);
EXPECT_FLOAT_EQ(delta.sum(), 0.0);
EXPECT_FLOAT_EQ(delta.sum_squares(), 0.0);
EXPECT_FLOAT_EQ(delta.num(0), 0.0);
EXPECT_FLOAT_EQ(delta.num(1), 0.0);
EXPECT_FLOAT_EQ(delta.num(2), 0.0);
EXPECT_FLOAT_EQ(delta.num(3), 0.0);
}
TEST(HistogramTest, DifferentBuckets) {
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram1,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 500.0
num: 3.0
sum: 555.0
sum_squares: 252525.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket: 0
bucket: 1
bucket: 1
bucket: 1
)pb"));
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram2,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 50000.0
num: 5.0
sum: 55555.0
sum_squares: 2525252525.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket_limit: 1000.0
bucket: 0
bucket: 1
bucket: 1
bucket: 1
bucket: 2
)pb"));
EXPECT_THAT(
Histogram(histogram1).Subtract(Histogram(histogram2)),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Subtracting a histogram with different buckets.")));
}
TEST(PercentilesTest, Percentiles) {
tensorflow::monitoring::Percentiles percentiles_value;
percentiles_value.total_samples = 100;
percentiles_value.accumulator = -100;
Percentiles percentiles(percentiles_value);
EXPECT_EQ(percentiles.num(), 100);
EXPECT_FLOAT_EQ(percentiles.sum(), -100);
Percentiles delta = percentiles.Subtract(percentiles);
EXPECT_EQ(delta.num(), 0);
EXPECT_FLOAT_EQ(delta.sum(), 0);
delta = delta.Subtract(percentiles);
EXPECT_EQ(delta.num(), -100);
EXPECT_FLOAT_EQ(delta.sum(), 100);
}
TEST(PercentilesTest, Subtract) {
tensorflow::monitoring::Percentiles percentiles_value1;
percentiles_value1.total_samples = 100;
percentiles_value1.accumulator = 100;
Percentiles percentiles1(percentiles_value1);
EXPECT_EQ(percentiles1.num(), 100);
EXPECT_FLOAT_EQ(percentiles1.sum(), 100);
tensorflow::monitoring::Percentiles percentiles_value2;
percentiles_value2.total_samples = 90;
percentiles_value2.accumulator = 90;
Percentiles percentiles2(percentiles_value2);
EXPECT_EQ(percentiles2.num(), 90);
EXPECT_FLOAT_EQ(percentiles2.sum(), 90);
Percentiles delta = percentiles1.Subtract(percentiles2);
EXPECT_EQ(delta.num(), 10);
EXPECT_FLOAT_EQ(delta.sum(), 10);
}
TEST(PercentilesTest, ReverseSubtract) {
tensorflow::monitoring::Percentiles percentiles_value1;
percentiles_value1.total_samples = 100;
percentiles_value1.accumulator = 100;
Percentiles percentiles1(percentiles_value1);
EXPECT_EQ(percentiles1.num(), 100);
EXPECT_FLOAT_EQ(percentiles1.sum(), 100);
tensorflow::monitoring::Percentiles percentiles_value2;
percentiles_value2.total_samples = 90;
percentiles_value2.accumulator = 90;
Percentiles percentiles2(percentiles_value2);
EXPECT_EQ(percentiles2.num(), 90);
EXPECT_FLOAT_EQ(percentiles2.sum(), 90);
Percentiles delta = percentiles2.Subtract(percentiles1);
EXPECT_EQ(delta.num(), -10);
EXPECT_FLOAT_EQ(delta.sum(), -10);
}
TEST(PercentilesTest, SubtractEmptyPercentile) {
tensorflow::monitoring::Percentiles percentiles_value;
percentiles_value.total_samples = 1;
percentiles_value.accumulator = 1;
Percentiles percentiles(percentiles_value);
EXPECT_EQ(percentiles.num(), 1);
EXPECT_FLOAT_EQ(percentiles.sum(), 1);
Percentiles empty_percentile((tensorflow::monitoring::Percentiles()));
EXPECT_EQ(empty_percentile.num(), 0);
EXPECT_FLOAT_EQ(empty_percentile.sum(), 0);
Percentiles delta = percentiles.Subtract(empty_percentile);
EXPECT_EQ(delta.num(), 1);
EXPECT_FLOAT_EQ(delta.sum(), 1);
}
TEST(PercentilesTest, EmptyPercentiles) {
Percentiles empty_percentile((tensorflow::monitoring::Percentiles()));
EXPECT_EQ(empty_percentile.num(), 0);
EXPECT_FLOAT_EQ(empty_percentile.sum(), 0);
Percentiles delta = empty_percentile.Subtract(empty_percentile);
EXPECT_EQ(delta.num(), 0);
EXPECT_FLOAT_EQ(delta.sum(), 0);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/translate/mlrt/test_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/test_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
22b80074-28d5-4dc2-b4e0-e438581af639 | cpp | google/cel-cpp | duration | extensions/protobuf/internal/duration.cc | extensions/protobuf/internal/duration_test.cc | #include "extensions/protobuf/internal/duration.h"
#include <cstdint>
#include "google/protobuf/duration.pb.h"
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "extensions/protobuf/internal/duration_lite.h"
#include "extensions/protobuf/internal/is_generated_message.h"
#include "extensions/protobuf/internal/is_message_lite.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
namespace cel::extensions::protobuf_internal {
absl::StatusOr<absl::Duration> UnwrapDynamicDurationProto(
const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Duration");
const auto* desc = message.GetDescriptor();
if (ABSL_PREDICT_FALSE(desc == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing descriptor"));
}
if constexpr (NotMessageLite<google::protobuf::Duration>) {
if (IsGeneratedMessage(message)) {
return UnwrapGeneratedDurationProto(
google::protobuf::DownCastMessage<google::protobuf::Duration>(message));
}
}
const auto* reflect = message.GetReflection();
if (ABSL_PREDICT_FALSE(reflect == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing reflection"));
}
const auto* seconds_field =
desc->FindFieldByNumber(google::protobuf::Duration::kSecondsFieldNumber);
if (ABSL_PREDICT_FALSE(seconds_field == nullptr)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(), " missing seconds field descriptor"));
}
if (ABSL_PREDICT_FALSE(seconds_field->cpp_type() !=
google::protobuf::FieldDescriptor::CPPTYPE_INT64)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(), " has unexpected seconds field type: ",
seconds_field->cpp_type_name()));
}
if (ABSL_PREDICT_FALSE(seconds_field->is_map() ||
seconds_field->is_repeated())) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " has unexpected ",
seconds_field->name(), " field cardinality: REPEATED"));
}
const auto* nanos_field =
desc->FindFieldByNumber(google::protobuf::Duration::kNanosFieldNumber);
if (ABSL_PREDICT_FALSE(nanos_field == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing nanos field descriptor"));
}
if (ABSL_PREDICT_FALSE(nanos_field->cpp_type() !=
google::protobuf::FieldDescriptor::CPPTYPE_INT32)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(),
" has unexpected nanos field type: ", nanos_field->cpp_type_name()));
}
if (ABSL_PREDICT_FALSE(nanos_field->is_map() || nanos_field->is_repeated())) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " has unexpected ",
nanos_field->name(), " field cardinality: REPEATED"));
}
return absl::Seconds(reflect->GetInt64(message, seconds_field)) +
absl::Nanoseconds(reflect->GetInt32(message, nanos_field));
}
absl::Status WrapDynamicDurationProto(absl::Duration value,
google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Duration");
const auto* desc = message.GetDescriptor();
if (ABSL_PREDICT_FALSE(desc == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing descriptor"));
}
if constexpr (NotMessageLite<google::protobuf::Duration>) {
if (IsGeneratedMessage(message)) {
return WrapGeneratedDurationProto(
value, google::protobuf::DownCastMessage<google::protobuf::Duration>(message));
}
}
const auto* reflect = message.GetReflection();
if (ABSL_PREDICT_FALSE(reflect == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing reflection"));
}
const auto* seconds_field =
desc->FindFieldByNumber(google::protobuf::Duration::kSecondsFieldNumber);
if (ABSL_PREDICT_FALSE(seconds_field == nullptr)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(), " missing seconds field descriptor"));
}
if (ABSL_PREDICT_FALSE(seconds_field->cpp_type() !=
google::protobuf::FieldDescriptor::CPPTYPE_INT64)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(), " has unexpected seconds field type: ",
seconds_field->cpp_type_name()));
}
if (ABSL_PREDICT_FALSE(seconds_field->is_map() ||
seconds_field->is_repeated())) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " has unexpected ",
seconds_field->name(), " field cardinality: REPEATED"));
}
const auto* nanos_field =
desc->FindFieldByNumber(google::protobuf::Duration::kNanosFieldNumber);
if (ABSL_PREDICT_FALSE(nanos_field == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing nanos field descriptor"));
}
if (ABSL_PREDICT_FALSE(nanos_field->cpp_type() !=
google::protobuf::FieldDescriptor::CPPTYPE_INT32)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(),
" has unexpected nanos field type: ", nanos_field->cpp_type_name()));
}
if (ABSL_PREDICT_FALSE(nanos_field->is_map() || nanos_field->is_repeated())) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " has unexpected ",
nanos_field->name(), " field cardinality: REPEATED"));
}
reflect->SetInt64(&message, seconds_field,
absl::IDivDuration(value, absl::Seconds(1), &value));
reflect->SetInt32(&message, nanos_field,
static_cast<int32_t>(absl::IDivDuration(
value, absl::Nanoseconds(1), &value)));
return absl::OkStatus();
}
} | #include "extensions/protobuf/internal/duration.h"
#include <memory>
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/descriptor.pb.h"
#include "absl/memory/memory.h"
#include "absl/time/time.h"
#include "extensions/protobuf/internal/duration_lite.h"
#include "internal/testing.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/descriptor_database.h"
#include "google/protobuf/dynamic_message.h"
namespace cel::extensions::protobuf_internal {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::Eq;
TEST(Duration, GeneratedFromProto) {
EXPECT_THAT(UnwrapGeneratedDurationProto(google::protobuf::Duration()),
IsOkAndHolds(Eq(absl::ZeroDuration())));
}
TEST(Duration, CustomFromProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::Duration::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
EXPECT_THAT(UnwrapDynamicDurationProto(*factory.GetPrototype(
pool.FindMessageTypeByName("google.protobuf.Duration"))),
IsOkAndHolds(Eq(absl::ZeroDuration())));
}
TEST(Duration, GeneratedToProto) {
google::protobuf::Duration proto;
ASSERT_OK(WrapGeneratedDurationProto(absl::Seconds(1) + absl::Nanoseconds(2),
proto));
EXPECT_EQ(proto.seconds(), 1);
EXPECT_EQ(proto.nanos(), 2);
}
TEST(Duration, CustomToProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::Duration::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
std::unique_ptr<google::protobuf::Message> proto = absl::WrapUnique(
factory
.GetPrototype(pool.FindMessageTypeByName("google.protobuf.Duration"))
->New());
const auto* descriptor = proto->GetDescriptor();
const auto* reflection = proto->GetReflection();
const auto* seconds_field = descriptor->FindFieldByName("seconds");
ASSERT_NE(seconds_field, nullptr);
const auto* nanos_field = descriptor->FindFieldByName("nanos");
ASSERT_NE(nanos_field, nullptr);
ASSERT_OK(WrapDynamicDurationProto(absl::Seconds(1) + absl::Nanoseconds(2),
*proto));
EXPECT_EQ(reflection->GetInt64(*proto, seconds_field), 1);
EXPECT_EQ(reflection->GetInt32(*proto, nanos_field), 2);
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/protobuf/internal/duration.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/protobuf/internal/duration_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
7c39691e-fb22-41a1-8231-568386ff230a | cpp | tensorflow/tensorflow | debug_events_writer | tensorflow/core/util/debug_events_writer.cc | tensorflow/core/util/debug_events_writer_test.cc | #include "tensorflow/core/util/debug_events_writer.h"
#include <deque>
#include <memory>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/host_info.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace tfdbg {
namespace {
void MaybeSetDebugEventTimestamp(DebugEvent* debug_event, Env* env) {
if (debug_event->wall_time() == 0) {
debug_event->set_wall_time(env->NowMicros() / 1e6);
}
}
}
SingleDebugEventFileWriter::SingleDebugEventFileWriter(const string& file_path)
: env_(Env::Default()),
file_path_(file_path),
num_outstanding_events_(0),
writer_mu_() {}
Status SingleDebugEventFileWriter::Init() {
if (record_writer_ != nullptr) {
return absl::OkStatus();
}
record_writer_.reset();
TF_RETURN_WITH_CONTEXT_IF_ERROR(
env_->NewWritableFile(file_path_, &writable_file_),
"Creating writable file ", file_path_);
record_writer_ = std::make_unique<io::RecordWriter>(writable_file_.get());
if (record_writer_ == nullptr) {
return errors::Unknown("Could not create record writer at path: ",
file_path_);
}
num_outstanding_events_.store(0);
VLOG(1) << "Successfully opened debug events file: " << file_path_;
return absl::OkStatus();
}
void SingleDebugEventFileWriter::WriteSerializedDebugEvent(
StringPiece debug_event_str) {
if (record_writer_ == nullptr) {
if (!Init().ok()) {
LOG(ERROR) << "Write failed because file could not be opened.";
return;
}
}
num_outstanding_events_.fetch_add(1);
{
mutex_lock l(writer_mu_);
record_writer_->WriteRecord(debug_event_str).IgnoreError();
}
}
Status SingleDebugEventFileWriter::Flush() {
const int num_outstanding = num_outstanding_events_.load();
if (num_outstanding == 0) {
return absl::OkStatus();
}
if (writable_file_ == nullptr) {
return errors::Unknown("Unexpected NULL file for path: ", file_path_);
}
{
mutex_lock l(writer_mu_);
TF_RETURN_WITH_CONTEXT_IF_ERROR(record_writer_->Flush(), "Failed to flush ",
num_outstanding, " debug events to ",
file_path_);
}
TF_RETURN_WITH_CONTEXT_IF_ERROR(writable_file_->Sync(), "Failed to sync ",
num_outstanding, " debug events to ",
file_path_);
num_outstanding_events_.store(0);
return absl::OkStatus();
}
Status SingleDebugEventFileWriter::Close() {
Status status = Flush();
if (writable_file_ != nullptr) {
Status close_status = writable_file_->Close();
if (!close_status.ok()) {
status = close_status;
}
record_writer_.reset(nullptr);
writable_file_.reset(nullptr);
}
num_outstanding_events_ = 0;
return status;
}
const string SingleDebugEventFileWriter::FileName() { return file_path_; }
mutex DebugEventsWriter::factory_mu_(LINKER_INITIALIZED);
DebugEventsWriter::~DebugEventsWriter() { Close().IgnoreError(); }
DebugEventsWriter* DebugEventsWriter::GetDebugEventsWriter(
const string& dump_root, const string& tfdbg_run_id,
int64_t circular_buffer_size) {
mutex_lock l(DebugEventsWriter::factory_mu_);
std::unordered_map<string, std::unique_ptr<DebugEventsWriter>>* writer_pool =
DebugEventsWriter::GetDebugEventsWriterMap();
if (writer_pool->find(dump_root) == writer_pool->end()) {
std::unique_ptr<DebugEventsWriter> writer(
new DebugEventsWriter(dump_root, tfdbg_run_id, circular_buffer_size));
writer_pool->insert(std::make_pair(dump_root, std::move(writer)));
}
return (*writer_pool)[dump_root].get();
}
Status DebugEventsWriter::LookUpDebugEventsWriter(
const string& dump_root, DebugEventsWriter** debug_events_writer) {
mutex_lock l(DebugEventsWriter::factory_mu_);
std::unordered_map<string, std::unique_ptr<DebugEventsWriter>>* writer_pool =
DebugEventsWriter::GetDebugEventsWriterMap();
if (writer_pool->find(dump_root) == writer_pool->end()) {
return errors::FailedPrecondition(
"No DebugEventsWriter has been created at dump root ", dump_root);
}
*debug_events_writer = (*writer_pool)[dump_root].get();
return absl::OkStatus();
}
Status DebugEventsWriter::Init() {
mutex_lock l(initialization_mu_);
if (is_initialized_) {
return absl::OkStatus();
}
if (!env_->IsDirectory(dump_root_).ok()) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(env_->RecursivelyCreateDir(dump_root_),
"Failed to create directory ", dump_root_);
}
int64_t time_in_seconds = env_->NowMicros() / 1e6;
file_prefix_ = io::JoinPath(
dump_root_, strings::Printf("%s.%010lld.%s", kFileNamePrefix,
static_cast<long long>(time_in_seconds),
port::Hostname().c_str()));
TF_RETURN_IF_ERROR(InitNonMetadataFile(SOURCE_FILES));
TF_RETURN_IF_ERROR(InitNonMetadataFile(STACK_FRAMES));
TF_RETURN_IF_ERROR(InitNonMetadataFile(GRAPHS));
metadata_writer_.reset();
string metadata_filename = GetFileNameInternal(METADATA);
metadata_writer_ =
std::make_unique<SingleDebugEventFileWriter>(metadata_filename);
if (metadata_writer_ == nullptr) {
return errors::Unknown("Could not create debug event metadata file writer");
}
DebugEvent debug_event;
DebugMetadata* metadata = debug_event.mutable_debug_metadata();
metadata->set_tensorflow_version(TF_VERSION_STRING);
metadata->set_file_version(
strings::Printf("%s%d", kVersionPrefix, kCurrentFormatVersion));
metadata->set_tfdbg_run_id(tfdbg_run_id_);
TF_RETURN_IF_ERROR(SerializeAndWriteDebugEvent(&debug_event, METADATA));
TF_RETURN_WITH_CONTEXT_IF_ERROR(
metadata_writer_->Flush(), "Failed to flush debug event metadata writer");
TF_RETURN_IF_ERROR(InitNonMetadataFile(EXECUTION));
TF_RETURN_IF_ERROR(InitNonMetadataFile(GRAPH_EXECUTION_TRACES));
is_initialized_ = true;
return absl::OkStatus();
}
Status DebugEventsWriter::WriteSourceFile(SourceFile* source_file) {
DebugEvent debug_event;
debug_event.set_allocated_source_file(source_file);
return SerializeAndWriteDebugEvent(&debug_event, SOURCE_FILES);
}
Status DebugEventsWriter::WriteStackFrameWithId(
StackFrameWithId* stack_frame_with_id) {
DebugEvent debug_event;
debug_event.set_allocated_stack_frame_with_id(stack_frame_with_id);
return SerializeAndWriteDebugEvent(&debug_event, STACK_FRAMES);
}
Status DebugEventsWriter::WriteGraphOpCreation(
GraphOpCreation* graph_op_creation) {
DebugEvent debug_event;
debug_event.set_allocated_graph_op_creation(graph_op_creation);
return SerializeAndWriteDebugEvent(&debug_event, GRAPHS);
}
Status DebugEventsWriter::WriteDebuggedGraph(DebuggedGraph* debugged_graph) {
DebugEvent debug_event;
debug_event.set_allocated_debugged_graph(debugged_graph);
return SerializeAndWriteDebugEvent(&debug_event, GRAPHS);
}
Status DebugEventsWriter::WriteExecution(Execution* execution) {
if (circular_buffer_size_ <= 0) {
DebugEvent debug_event;
debug_event.set_allocated_execution(execution);
return SerializeAndWriteDebugEvent(&debug_event, EXECUTION);
} else {
DebugEvent debug_event;
MaybeSetDebugEventTimestamp(&debug_event, env_);
debug_event.set_allocated_execution(execution);
string serialized;
debug_event.SerializeToString(&serialized);
mutex_lock l(execution_buffer_mu_);
execution_buffer_.emplace_back(std::move(serialized));
if (execution_buffer_.size() > circular_buffer_size_) {
execution_buffer_.pop_front();
}
return absl::OkStatus();
}
}
Status DebugEventsWriter::WriteGraphExecutionTrace(
GraphExecutionTrace* graph_execution_trace) {
TF_RETURN_IF_ERROR(Init());
if (circular_buffer_size_ <= 0) {
DebugEvent debug_event;
debug_event.set_allocated_graph_execution_trace(graph_execution_trace);
return SerializeAndWriteDebugEvent(&debug_event, GRAPH_EXECUTION_TRACES);
} else {
DebugEvent debug_event;
MaybeSetDebugEventTimestamp(&debug_event, env_);
debug_event.set_allocated_graph_execution_trace(graph_execution_trace);
string serialized;
debug_event.SerializeToString(&serialized);
mutex_lock l(graph_execution_trace_buffer_mu_);
graph_execution_trace_buffer_.emplace_back(std::move(serialized));
if (graph_execution_trace_buffer_.size() > circular_buffer_size_) {
graph_execution_trace_buffer_.pop_front();
}
return absl::OkStatus();
}
}
Status DebugEventsWriter::WriteGraphExecutionTrace(
const string& tfdbg_context_id, const string& device_name,
const string& op_name, int32_t output_slot, int32_t tensor_debug_mode,
const Tensor& tensor_value) {
std::unique_ptr<GraphExecutionTrace> trace(new GraphExecutionTrace());
trace->set_tfdbg_context_id(tfdbg_context_id);
if (!op_name.empty()) {
trace->set_op_name(op_name);
}
if (output_slot > 0) {
trace->set_output_slot(output_slot);
}
if (tensor_debug_mode > 0) {
trace->set_tensor_debug_mode(TensorDebugMode(tensor_debug_mode));
}
trace->set_device_name(device_name);
tensor_value.AsProtoTensorContent(trace->mutable_tensor_proto());
return WriteGraphExecutionTrace(trace.release());
}
void DebugEventsWriter::WriteSerializedNonExecutionDebugEvent(
const string& debug_event_str, DebugEventFileType type) {
std::unique_ptr<SingleDebugEventFileWriter>* writer = nullptr;
SelectWriter(type, &writer);
(*writer)->WriteSerializedDebugEvent(debug_event_str);
}
void DebugEventsWriter::WriteSerializedExecutionDebugEvent(
const string& debug_event_str, DebugEventFileType type) {
const std::unique_ptr<SingleDebugEventFileWriter>* writer = nullptr;
std::deque<string>* buffer = nullptr;
mutex* mu = nullptr;
switch (type) {
case EXECUTION:
writer = &execution_writer_;
buffer = &execution_buffer_;
mu = &execution_buffer_mu_;
break;
case GRAPH_EXECUTION_TRACES:
writer = &graph_execution_traces_writer_;
buffer = &graph_execution_trace_buffer_;
mu = &graph_execution_trace_buffer_mu_;
break;
default:
return;
}
if (circular_buffer_size_ <= 0) {
(*writer)->WriteSerializedDebugEvent(debug_event_str);
} else {
mutex_lock l(*mu);
buffer->push_back(debug_event_str);
if (buffer->size() > circular_buffer_size_) {
buffer->pop_front();
}
}
}
int DebugEventsWriter::RegisterDeviceAndGetId(const string& device_name) {
mutex_lock l(device_mu_);
int& device_id = device_name_to_id_[device_name];
if (device_id == 0) {
device_id = device_name_to_id_.size();
DebugEvent debug_event;
MaybeSetDebugEventTimestamp(&debug_event, env_);
DebuggedDevice* debugged_device = debug_event.mutable_debugged_device();
debugged_device->set_device_name(device_name);
debugged_device->set_device_id(device_id);
string serialized;
debug_event.SerializeToString(&serialized);
graphs_writer_->WriteSerializedDebugEvent(serialized);
}
return device_id;
}
Status DebugEventsWriter::FlushNonExecutionFiles() {
TF_RETURN_IF_ERROR(Init());
if (source_files_writer_ != nullptr) {
TF_RETURN_IF_ERROR(source_files_writer_->Flush());
}
if (stack_frames_writer_ != nullptr) {
TF_RETURN_IF_ERROR(stack_frames_writer_->Flush());
}
if (graphs_writer_ != nullptr) {
TF_RETURN_IF_ERROR(graphs_writer_->Flush());
}
return absl::OkStatus();
}
Status DebugEventsWriter::FlushExecutionFiles() {
TF_RETURN_IF_ERROR(Init());
if (execution_writer_ != nullptr) {
if (circular_buffer_size_ > 0) {
mutex_lock l(execution_buffer_mu_);
while (!execution_buffer_.empty()) {
execution_writer_->WriteSerializedDebugEvent(execution_buffer_.front());
execution_buffer_.pop_front();
}
}
TF_RETURN_IF_ERROR(execution_writer_->Flush());
}
if (graph_execution_traces_writer_ != nullptr) {
if (circular_buffer_size_ > 0) {
mutex_lock l(graph_execution_trace_buffer_mu_);
while (!graph_execution_trace_buffer_.empty()) {
graph_execution_traces_writer_->WriteSerializedDebugEvent(
graph_execution_trace_buffer_.front());
graph_execution_trace_buffer_.pop_front();
}
}
TF_RETURN_IF_ERROR(graph_execution_traces_writer_->Flush());
}
return absl::OkStatus();
}
string DebugEventsWriter::FileName(DebugEventFileType type) {
if (file_prefix_.empty()) {
Init().IgnoreError();
}
return GetFileNameInternal(type);
}
Status DebugEventsWriter::Close() {
{
mutex_lock l(initialization_mu_);
if (!is_initialized_) {
return absl::OkStatus();
}
}
std::vector<string> failed_to_close_files;
if (metadata_writer_ != nullptr) {
if (!metadata_writer_->Close().ok()) {
failed_to_close_files.push_back(metadata_writer_->FileName());
}
metadata_writer_.reset(nullptr);
}
TF_RETURN_IF_ERROR(FlushNonExecutionFiles());
if (source_files_writer_ != nullptr) {
if (!source_files_writer_->Close().ok()) {
failed_to_close_files.push_back(source_files_writer_->FileName());
}
source_files_writer_.reset(nullptr);
}
if (stack_frames_writer_ != nullptr) {
if (!stack_frames_writer_->Close().ok()) {
failed_to_close_files.push_back(stack_frames_writer_->FileName());
}
stack_frames_writer_.reset(nullptr);
}
if (graphs_writer_ != nullptr) {
if (!graphs_writer_->Close().ok()) {
failed_to_close_files.push_back(graphs_writer_->FileName());
}
graphs_writer_.reset(nullptr);
}
TF_RETURN_IF_ERROR(FlushExecutionFiles());
if (execution_writer_ != nullptr) {
if (!execution_writer_->Close().ok()) {
failed_to_close_files.push_back(execution_writer_->FileName());
}
execution_writer_.reset(nullptr);
}
if (graph_execution_traces_writer_ != nullptr) {
if (!graph_execution_traces_writer_->Close().ok()) {
failed_to_close_files.push_back(
graph_execution_traces_writer_->FileName());
}
graph_execution_traces_writer_.reset(nullptr);
}
if (failed_to_close_files.empty()) {
return absl::OkStatus();
} else {
return errors::FailedPrecondition(
"Failed to close %d debug-events files associated with tfdbg",
failed_to_close_files.size());
}
}
std::unordered_map<string, std::unique_ptr<DebugEventsWriter>>*
DebugEventsWriter::GetDebugEventsWriterMap() {
static std::unordered_map<string, std::unique_ptr<DebugEventsWriter>>*
writer_pool =
new std::unordered_map<string, std::unique_ptr<DebugEventsWriter>>();
return writer_pool;
}
DebugEventsWriter::DebugEventsWriter(const string& dump_root,
const string& tfdbg_run_id,
int64_t circular_buffer_size)
: env_(Env::Default()),
dump_root_(dump_root),
tfdbg_run_id_(tfdbg_run_id),
is_initialized_(false),
initialization_mu_(),
circular_buffer_size_(circular_buffer_size),
execution_buffer_(),
execution_buffer_mu_(),
graph_execution_trace_buffer_(),
graph_execution_trace_buffer_mu_(),
device_name_to_id_(),
device_mu_() {}
Status DebugEventsWriter::InitNonMetadataFile(DebugEventFileType type) {
std::unique_ptr<SingleDebugEventFileWriter>* writer = nullptr;
SelectWriter(type, &writer);
const string filename = GetFileNameInternal(type);
writer->reset();
*writer = std::make_unique<SingleDebugEventFileWriter>(filename);
if (*writer == nullptr) {
return errors::Unknown("Could not create debug event file writer for ",
filename);
}
TF_RETURN_WITH_CONTEXT_IF_ERROR(
(*writer)->Init(), "Initializing debug event writer at path ", filename);
VLOG(1) << "Successfully opened debug event file: " << filename;
return absl::OkStatus();
}
Status DebugEventsWriter::SerializeAndWriteDebugEvent(DebugEvent* debug_event,
DebugEventFileType type) {
std::unique_ptr<SingleDebugEventFileWriter>* writer = nullptr;
SelectWriter(type, &writer);
if (writer != nullptr) {
MaybeSetDebugEventTimestamp(debug_event, env_);
string str;
debug_event->AppendToString(&str);
(*writer)->WriteSerializedDebugEvent(str);
return absl::OkStatus();
} else {
return errors::Internal(
"Unable to find debug events file writer for DebugEventsFileType ",
type);
}
}
void DebugEventsWriter::SelectWriter(
DebugEventFileType type,
std::unique_ptr<SingleDebugEventFileWriter>** writer) {
switch (type) {
case METADATA:
*writer = &metadata_writer_;
break;
case SOURCE_FILES:
*writer = &source_files_writer_;
break;
case STACK_FRAMES:
*writer = &stack_frames_writer_;
break;
case GRAPHS:
*writer = &graphs_writer_;
break;
case EXECUTION:
*writer = &execution_writer_;
break;
case GRAPH_EXECUTION_TRACES:
*writer = &graph_execution_traces_writer_;
break;
}
}
const string DebugEventsWriter::GetSuffix(DebugEventFileType type) {
switch (type) {
case METADATA:
return kMetadataSuffix;
case SOURCE_FILES:
return kSourceFilesSuffix;
case STACK_FRAMES:
return kStackFramesSuffix;
case GRAPHS:
return kGraphsSuffix;
case EXECUTION:
return kExecutionSuffix;
case GRAPH_EXECUTION_TRACES:
return kGraphExecutionTracesSuffix;
default:
string suffix;
return suffix;
}
}
string DebugEventsWriter::GetFileNameInternal(DebugEventFileType type) {
const string suffix = GetSuffix(type);
return strings::StrCat(file_prefix_, ".", suffix);
}
}
} | #include "tensorflow/core/util/debug_events_writer.h"
#include <algorithm>
#include <atomic>
#include <memory>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace tfdbg {
Env* env() { return Env::Default(); }
class DebugEventsWriterTest : public ::testing::Test {
public:
static string GetDebugEventFileName(DebugEventsWriter* writer,
DebugEventFileType type) {
return writer->FileName(type);
}
static void ReadDebugEventProtos(DebugEventsWriter* writer,
DebugEventFileType type,
std::vector<DebugEvent>* protos) {
protos->clear();
const string filename = writer->FileName(type);
std::unique_ptr<RandomAccessFile> debug_events_file;
TF_CHECK_OK(env()->NewRandomAccessFile(filename, &debug_events_file));
io::RecordReader* reader = new io::RecordReader(debug_events_file.get());
uint64 offset = 0;
DebugEvent actual;
while (ReadDebugEventProto(reader, &offset, &actual)) {
protos->push_back(actual);
}
delete reader;
}
static bool ReadDebugEventProto(io::RecordReader* reader, uint64* offset,
DebugEvent* proto) {
tstring record;
Status s = reader->ReadRecord(offset, &record);
if (!s.ok()) {
return false;
}
return ParseProtoUnlimited(proto, record);
}
void SetUp() override {
dump_root_ = io::JoinPath(
testing::TmpDir(),
strings::Printf("%010lld", static_cast<long long>(env()->NowMicros())));
tfdbg_run_id_ = "test_tfdbg_run_id";
}
void TearDown() override {
if (env()->IsDirectory(dump_root_).ok()) {
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
TF_ASSERT_OK(env()->DeleteRecursively(dump_root_, &undeleted_files,
&undeleted_dirs));
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
}
string dump_root_;
string tfdbg_run_id_;
};
TEST_F(DebugEventsWriterTest, GetDebugEventsWriterSameRootGivesSameObject) {
DebugEventsWriter* writer_1 = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
DebugEventsWriter* writer_2 = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
EXPECT_EQ(writer_1, writer_2);
}
TEST_F(DebugEventsWriterTest, ConcurrentGetDebugEventsWriterSameDumpRoot) {
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 4);
std::vector<DebugEventsWriter*> writers;
mutex mu;
auto fn = [this, &writers, &mu]() {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
{
mutex_lock l(mu);
writers.push_back(writer);
}
};
for (size_t i = 0; i < 4; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
EXPECT_EQ(writers.size(), 4);
EXPECT_EQ(writers[0], writers[1]);
EXPECT_EQ(writers[1], writers[2]);
EXPECT_EQ(writers[2], writers[3]);
}
TEST_F(DebugEventsWriterTest, ConcurrentGetDebugEventsWriterDiffDumpRoots) {
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 3);
std::atomic_int_fast64_t counter(0);
std::vector<DebugEventsWriter*> writers;
mutex mu;
auto fn = [this, &counter, &writers, &mu]() {
const string new_dump_root =
io::JoinPath(dump_root_, strings::Printf("%ld", counter.fetch_add(1)));
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
new_dump_root, tfdbg_run_id_,
DebugEventsWriter::kDefaultCyclicBufferSize);
{
mutex_lock l(mu);
writers.push_back(writer);
}
};
for (size_t i = 0; i < 3; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
EXPECT_EQ(writers.size(), 3);
EXPECT_NE(writers[0], writers[1]);
EXPECT_NE(writers[0], writers[2]);
EXPECT_NE(writers[1], writers[2]);
}
TEST_F(DebugEventsWriterTest, GetDebugEventsWriterDifferentRoots) {
DebugEventsWriter* writer_1 = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
const string dump_root_2 = io::JoinPath(dump_root_, "subdirectory");
DebugEventsWriter* writer_2 = DebugEventsWriter::GetDebugEventsWriter(
dump_root_2, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
EXPECT_NE(writer_1, writer_2);
}
TEST_F(DebugEventsWriterTest, GetAndInitDebugEventsWriter) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::METADATA, &actuals);
EXPECT_EQ(actuals.size(), 1);
EXPECT_GT(actuals[0].debug_metadata().tensorflow_version().length(), 0);
const string file_version = actuals[0].debug_metadata().file_version();
EXPECT_EQ(file_version.find(DebugEventsWriter::kVersionPrefix), 0);
EXPECT_GT(file_version.size(), strlen(DebugEventsWriter::kVersionPrefix));
EXPECT_EQ(actuals[0].debug_metadata().tfdbg_run_id(), "test_tfdbg_run_id");
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
}
TEST_F(DebugEventsWriterTest, CallingCloseWithoutInitIsOkay) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Close());
}
TEST_F(DebugEventsWriterTest, CallingCloseTwiceIsOkay) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Close());
TF_ASSERT_OK(writer->Close());
}
TEST_F(DebugEventsWriterTest, ConcurrentInitCalls) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 4);
auto fn = [&writer]() { TF_ASSERT_OK(writer->Init()); };
for (size_t i = 0; i < 3; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::METADATA, &actuals);
EXPECT_EQ(actuals.size(), 1);
EXPECT_GT(actuals[0].debug_metadata().tensorflow_version().length(), 0);
const string file_version = actuals[0].debug_metadata().file_version();
EXPECT_EQ(file_version.find(DebugEventsWriter::kVersionPrefix), 0);
EXPECT_GT(file_version.size(), strlen(DebugEventsWriter::kVersionPrefix));
EXPECT_EQ(actuals[0].debug_metadata().tfdbg_run_id(), "test_tfdbg_run_id");
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
}
TEST_F(DebugEventsWriterTest, InitTwiceDoesNotCreateNewMetadataFile) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::METADATA, &actuals);
EXPECT_EQ(actuals.size(), 1);
EXPECT_GT(actuals[0].debug_metadata().tensorflow_version().length(), 0);
EXPECT_EQ(actuals[0].debug_metadata().tfdbg_run_id(), "test_tfdbg_run_id");
EXPECT_GE(actuals[0].debug_metadata().file_version().size(), 0);
string metadata_path_1 =
GetDebugEventFileName(writer, DebugEventFileType::METADATA);
TF_ASSERT_OK(writer->Init());
EXPECT_EQ(GetDebugEventFileName(writer, DebugEventFileType::METADATA),
metadata_path_1);
TF_ASSERT_OK(writer->Close());
ReadDebugEventProtos(writer, DebugEventFileType::METADATA, &actuals);
EXPECT_EQ(actuals.size(), 1);
EXPECT_GT(actuals[0].debug_metadata().tensorflow_version().length(), 0);
EXPECT_EQ(actuals[0].debug_metadata().tfdbg_run_id(), "test_tfdbg_run_id");
EXPECT_GE(actuals[0].debug_metadata().file_version().size(), 0);
}
TEST_F(DebugEventsWriterTest, WriteSourceFile) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
SourceFile* source_file_1 = new SourceFile();
source_file_1->set_file_path("/home/tf_programs/main.py");
source_file_1->set_host_name("localhost.localdomain");
source_file_1->add_lines("import tensorflow as tf");
source_file_1->add_lines("");
source_file_1->add_lines("print(tf.constant([42.0]))");
source_file_1->add_lines("");
TF_ASSERT_OK(writer->WriteSourceFile(source_file_1));
SourceFile* source_file_2 = new SourceFile();
source_file_2->set_file_path("/home/tf_programs/train.py");
source_file_2->set_host_name("localhost.localdomain");
source_file_2->add_lines("import tensorflow.keras as keras");
source_file_2->add_lines("");
source_file_2->add_lines("model = keras.Sequential()");
TF_ASSERT_OK(writer->WriteSourceFile(source_file_2));
TF_ASSERT_OK(writer->FlushNonExecutionFiles());
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), 2);
EXPECT_GT(actuals[0].wall_time(), 0);
EXPECT_GT(actuals[1].wall_time(), actuals[0].wall_time());
SourceFile actual_source_file_1 = actuals[0].source_file();
EXPECT_EQ(actual_source_file_1.file_path(), "/home/tf_programs/main.py");
EXPECT_EQ(actual_source_file_1.host_name(), "localhost.localdomain");
EXPECT_EQ(actual_source_file_1.lines().size(), 4);
EXPECT_EQ(actual_source_file_1.lines()[0], "import tensorflow as tf");
EXPECT_EQ(actual_source_file_1.lines()[1], "");
EXPECT_EQ(actual_source_file_1.lines()[2], "print(tf.constant([42.0]))");
EXPECT_EQ(actual_source_file_1.lines()[3], "");
SourceFile actual_source_file_2 = actuals[1].source_file();
EXPECT_EQ(actual_source_file_2.file_path(), "/home/tf_programs/train.py");
EXPECT_EQ(actual_source_file_2.host_name(), "localhost.localdomain");
EXPECT_EQ(actual_source_file_2.lines().size(), 3);
EXPECT_EQ(actual_source_file_2.lines()[0],
"import tensorflow.keras as keras");
EXPECT_EQ(actual_source_file_2.lines()[1], "");
EXPECT_EQ(actual_source_file_2.lines()[2], "model = keras.Sequential()");
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::EXECUTION, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), 0);
}
TEST_F(DebugEventsWriterTest, WriteStackFramesFile) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
StackFrameWithId* stack_frame_1 = new StackFrameWithId();
stack_frame_1->set_id("deadbeaf");
GraphDebugInfo::FileLineCol* file_line_col =
stack_frame_1->mutable_file_line_col();
file_line_col->set_file_index(12);
file_line_col->set_line(20);
file_line_col->set_col(2);
file_line_col->set_func("my_func");
file_line_col->set_code(" x = y + z");
StackFrameWithId* stack_frame_2 = new StackFrameWithId();
stack_frame_2->set_id("eeeeeeec");
file_line_col = stack_frame_2->mutable_file_line_col();
file_line_col->set_file_index(12);
file_line_col->set_line(21);
file_line_col->set_col(4);
file_line_col->set_func("my_func");
file_line_col->set_code(" x = x ** 2.0");
TF_ASSERT_OK(writer->WriteStackFrameWithId(stack_frame_1));
TF_ASSERT_OK(writer->WriteStackFrameWithId(stack_frame_2));
TF_ASSERT_OK(writer->FlushNonExecutionFiles());
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
EXPECT_EQ(actuals.size(), 2);
EXPECT_GT(actuals[0].wall_time(), 0);
EXPECT_GT(actuals[1].wall_time(), actuals[0].wall_time());
StackFrameWithId actual_stack_frame_1 = actuals[0].stack_frame_with_id();
EXPECT_EQ(actual_stack_frame_1.id(), "deadbeaf");
GraphDebugInfo::FileLineCol file_line_col_1 =
actual_stack_frame_1.file_line_col();
EXPECT_EQ(file_line_col_1.file_index(), 12);
EXPECT_EQ(file_line_col_1.line(), 20);
EXPECT_EQ(file_line_col_1.col(), 2);
EXPECT_EQ(file_line_col_1.func(), "my_func");
EXPECT_EQ(file_line_col_1.code(), " x = y + z");
StackFrameWithId actual_stack_frame_2 = actuals[1].stack_frame_with_id();
EXPECT_EQ(actual_stack_frame_2.id(), "eeeeeeec");
GraphDebugInfo::FileLineCol file_line_col_2 =
actual_stack_frame_2.file_line_col();
EXPECT_EQ(file_line_col_2.file_index(), 12);
EXPECT_EQ(file_line_col_2.line(), 21);
EXPECT_EQ(file_line_col_2.col(), 4);
EXPECT_EQ(file_line_col_2.func(), "my_func");
EXPECT_EQ(file_line_col_2.code(), " x = x ** 2.0");
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), 0);
}
TEST_F(DebugEventsWriterTest, WriteGraphOpCreationAndDebuggedGraph) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
GraphOpCreation* graph_op_creation = new GraphOpCreation();
graph_op_creation->set_op_type("MatMul");
graph_op_creation->set_op_name("Dense_1/MatMul");
TF_ASSERT_OK(writer->WriteGraphOpCreation(graph_op_creation));
DebuggedGraph* debugged_graph = new DebuggedGraph();
debugged_graph->set_graph_id("deadbeaf");
debugged_graph->set_graph_name("my_func_graph");
TF_ASSERT_OK(writer->WriteDebuggedGraph(debugged_graph));
TF_ASSERT_OK(writer->FlushNonExecutionFiles());
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), 2);
EXPECT_GT(actuals[0].wall_time(), 0);
EXPECT_GT(actuals[1].wall_time(), actuals[0].wall_time());
GraphOpCreation actual_op_creation = actuals[0].graph_op_creation();
EXPECT_EQ(actual_op_creation.op_type(), "MatMul");
EXPECT_EQ(actual_op_creation.op_name(), "Dense_1/MatMul");
DebuggedGraph actual_debugged_graph = actuals[1].debugged_graph();
EXPECT_EQ(actual_debugged_graph.graph_id(), "deadbeaf");
EXPECT_EQ(actual_debugged_graph.graph_name(), "my_func_graph");
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
EXPECT_EQ(actuals.size(), 0);
}
TEST_F(DebugEventsWriterTest, ConcurrentWriteCallsToTheSameFile) {
const size_t kConcurrentWrites = 100;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 8);
std::atomic_int_fast64_t counter(0);
auto fn = [&writer, &counter]() {
const string file_path = strings::Printf(
"/home/tf_programs/program_%.3ld.py", counter.fetch_add(1));
SourceFile* source_file = new SourceFile();
source_file->set_file_path(file_path);
source_file->set_host_name("localhost.localdomain");
TF_ASSERT_OK(writer->WriteSourceFile(source_file));
};
for (size_t i = 0; i < kConcurrentWrites; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), kConcurrentWrites);
std::vector<string> file_paths;
std::vector<string> host_names;
for (size_t i = 0; i < kConcurrentWrites; ++i) {
file_paths.push_back(actuals[i].source_file().file_path());
host_names.push_back(actuals[i].source_file().host_name());
}
std::sort(file_paths.begin(), file_paths.end());
for (size_t i = 0; i < kConcurrentWrites; ++i) {
EXPECT_EQ(file_paths[i],
strings::Printf("/home/tf_programs/program_%.3ld.py", i));
EXPECT_EQ(host_names[i], "localhost.localdomain");
}
}
TEST_F(DebugEventsWriterTest, ConcurrentWriteAndFlushCallsToTheSameFile) {
const size_t kConcurrentWrites = 100;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 8);
std::atomic_int_fast64_t counter(0);
auto fn = [&writer, &counter]() {
const string file_path = strings::Printf(
"/home/tf_programs/program_%.3ld.py", counter.fetch_add(1));
SourceFile* source_file = new SourceFile();
source_file->set_file_path(file_path);
source_file->set_host_name("localhost.localdomain");
TF_ASSERT_OK(writer->WriteSourceFile(source_file));
TF_ASSERT_OK(writer->FlushNonExecutionFiles());
};
for (size_t i = 0; i < kConcurrentWrites; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), kConcurrentWrites);
std::vector<string> file_paths;
std::vector<string> host_names;
for (size_t i = 0; i < kConcurrentWrites; ++i) {
file_paths.push_back(actuals[i].source_file().file_path());
host_names.push_back(actuals[i].source_file().host_name());
}
std::sort(file_paths.begin(), file_paths.end());
for (size_t i = 0; i < kConcurrentWrites; ++i) {
EXPECT_EQ(file_paths[i],
strings::Printf("/home/tf_programs/program_%.3ld.py", i));
EXPECT_EQ(host_names[i], "localhost.localdomain");
}
}
TEST_F(DebugEventsWriterTest, ConcurrentWriteCallsToTheDifferentFiles) {
const int32_t kConcurrentWrites = 30;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 10);
std::atomic_int_fast32_t counter(0);
auto fn = [&writer, &counter]() {
const int32_t index = counter.fetch_add(1);
if (index % 3 == 0) {
SourceFile* source_file = new SourceFile();
source_file->set_file_path(
strings::Printf("/home/tf_programs/program_%.2d.py", index));
source_file->set_host_name("localhost.localdomain");
TF_ASSERT_OK(writer->WriteSourceFile(source_file));
} else if (index % 3 == 1) {
StackFrameWithId* stack_frame = new StackFrameWithId();
stack_frame->set_id(strings::Printf("e%.2d", index));
TF_ASSERT_OK(writer->WriteStackFrameWithId(stack_frame));
} else {
GraphOpCreation* op_creation = new GraphOpCreation();
op_creation->set_op_type("Log");
op_creation->set_op_name(strings::Printf("Log_%.2d", index));
TF_ASSERT_OK(writer->WriteGraphOpCreation(op_creation));
}
};
for (size_t i = 0; i < kConcurrentWrites; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), kConcurrentWrites / 3);
std::vector<string> file_paths;
std::vector<string> host_names;
for (int32_t i = 0; i < kConcurrentWrites / 3; ++i) {
file_paths.push_back(actuals[i].source_file().file_path());
host_names.push_back(actuals[i].source_file().host_name());
}
std::sort(file_paths.begin(), file_paths.end());
for (int32_t i = 0; i < kConcurrentWrites / 3; ++i) {
EXPECT_EQ(file_paths[i],
strings::Printf("/home/tf_programs/program_%.2d.py", i * 3));
EXPECT_EQ(host_names[i], "localhost.localdomain");
}
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
EXPECT_EQ(actuals.size(), kConcurrentWrites / 3);
std::vector<string> stack_frame_ids;
for (int32_t i = 0; i < kConcurrentWrites / 3; ++i) {
stack_frame_ids.push_back(actuals[i].stack_frame_with_id().id());
}
std::sort(stack_frame_ids.begin(), stack_frame_ids.end());
for (int32_t i = 0; i < kConcurrentWrites / 3; ++i) {
EXPECT_EQ(stack_frame_ids[i], strings::Printf("e%.2d", i * 3 + 1));
}
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), kConcurrentWrites / 3);
std::vector<string> op_types;
std::vector<string> op_names;
for (int32_t i = 0; i < kConcurrentWrites / 3; ++i) {
op_types.push_back(actuals[i].graph_op_creation().op_type());
op_names.push_back(actuals[i].graph_op_creation().op_name());
}
std::sort(op_names.begin(), op_names.end());
for (int32_t i = 0; i < kConcurrentWrites / 3; ++i) {
EXPECT_EQ(op_types[i], "Log");
EXPECT_EQ(op_names[i], strings::Printf("Log_%.2d", i * 3 + 2));
}
}
TEST_F(DebugEventsWriterTest, WriteExecutionWithCyclicBufferNoFlush) {
const size_t kCyclicBufferSize = 10;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, kCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
for (size_t i = 0; i < kCyclicBufferSize * 2; ++i) {
Execution* execution = new Execution();
execution->set_op_type("Log");
execution->add_input_tensor_ids(i);
TF_ASSERT_OK(writer->WriteExecution(execution));
}
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::EXECUTION, &actuals);
EXPECT_EQ(actuals.size(), 0);
TF_ASSERT_OK(writer->Close());
}
TEST_F(DebugEventsWriterTest, WriteExecutionWithCyclicBufferFlush) {
const size_t kCyclicBufferSize = 10;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, kCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
for (size_t i = 0; i < kCyclicBufferSize * 2; ++i) {
Execution* execution = new Execution();
execution->set_op_type("Log");
execution->add_input_tensor_ids(i);
TF_ASSERT_OK(writer->WriteExecution(execution));
}
TF_ASSERT_OK(writer->FlushExecutionFiles());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::EXECUTION, &actuals);
EXPECT_EQ(actuals.size(), kCyclicBufferSize);
for (size_t i = 0; i < kCyclicBufferSize; ++i) {
EXPECT_EQ(actuals[i].execution().op_type(), "Log");
EXPECT_EQ(actuals[i].execution().input_tensor_ids().size(), 1);
EXPECT_EQ(actuals[i].execution().input_tensor_ids()[0],
kCyclicBufferSize + i);
}
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 8);
std::atomic_int_fast64_t counter(0);
auto fn = [&writer, &counter]() {
Execution* execution = new Execution();
execution->set_op_type("Abs");
execution->add_input_tensor_ids(counter.fetch_add(1));
TF_ASSERT_OK(writer->WriteExecution(execution));
};
for (size_t i = 0; i < kCyclicBufferSize * 2; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
TF_ASSERT_OK(writer->Close());
ReadDebugEventProtos(writer, DebugEventFileType::EXECUTION, &actuals);
EXPECT_EQ(actuals.size(), kCyclicBufferSize * 2);
for (size_t i = 0; i < kCyclicBufferSize; ++i) {
const size_t index = i + kCyclicBufferSize;
EXPECT_EQ(actuals[index].execution().op_type(), "Abs");
EXPECT_EQ(actuals[index].execution().input_tensor_ids().size(), 1);
EXPECT_GE(actuals[index].execution().input_tensor_ids()[0], 0);
EXPECT_LE(actuals[index].execution().input_tensor_ids()[0],
kCyclicBufferSize * 2);
}
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), 0);
}
TEST_F(DebugEventsWriterTest, WriteGrahExecutionTraceWithCyclicBufferNoFlush) {
const size_t kCyclicBufferSize = 10;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, kCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
for (size_t i = 0; i < kCyclicBufferSize * 2; ++i) {
GraphExecutionTrace* trace = new GraphExecutionTrace();
trace->set_tfdbg_context_id(strings::Printf("graph_%.2ld", i));
TF_ASSERT_OK(writer->WriteGraphExecutionTrace(trace));
}
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), 0);
TF_ASSERT_OK(writer->Close());
}
TEST_F(DebugEventsWriterTest, WriteGrahExecutionTraceWithoutPreviousInitCall) {
const size_t kCyclicBufferSize = -1;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, kCyclicBufferSize);
GraphExecutionTrace* trace = new GraphExecutionTrace();
trace->set_tfdbg_context_id(strings::Printf("graph_0"));
TF_ASSERT_OK(writer->WriteGraphExecutionTrace(trace));
TF_ASSERT_OK(writer->FlushExecutionFiles());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), 1);
EXPECT_EQ(actuals[0].graph_execution_trace().tfdbg_context_id(), "graph_0");
TF_ASSERT_OK(writer->Close());
}
TEST_F(DebugEventsWriterTest, WriteGrahExecutionTraceWithCyclicBufferFlush) {
const size_t kCyclicBufferSize = 10;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, kCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
for (size_t i = 0; i < kCyclicBufferSize * 2; ++i) {
GraphExecutionTrace* trace = new GraphExecutionTrace();
trace->set_tfdbg_context_id(strings::Printf("graph_%.2ld", i));
TF_ASSERT_OK(writer->WriteGraphExecutionTrace(trace));
}
TF_ASSERT_OK(writer->FlushExecutionFiles());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), kCyclicBufferSize);
for (size_t i = 0; i < kCyclicBufferSize; ++i) {
EXPECT_EQ(actuals[i].graph_execution_trace().tfdbg_context_id(),
strings::Printf("graph_%.2ld", i + kCyclicBufferSize));
}
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 8);
std::atomic_int_fast64_t counter(0);
auto fn = [&writer, &counter]() {
GraphExecutionTrace* trace = new GraphExecutionTrace();
trace->set_tfdbg_context_id(
strings::Printf("new_graph_%.2ld", counter.fetch_add(1)));
TF_ASSERT_OK(writer->WriteGraphExecutionTrace(trace));
};
for (size_t i = 0; i < kCyclicBufferSize * 2; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
TF_ASSERT_OK(writer->Close());
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), kCyclicBufferSize * 2);
for (size_t i = 0; i < kCyclicBufferSize; ++i) {
const size_t index = i + kCyclicBufferSize;
EXPECT_EQ(actuals[index].graph_execution_trace().tfdbg_context_id().find(
"new_graph_"),
0);
}
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::EXECUTION, &actuals);
EXPECT_EQ(actuals.size(), 0);
}
TEST_F(DebugEventsWriterTest, RegisterDeviceAndGetIdTrace) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 8);
int device_ids[8];
for (int i = 0; i < 8; ++i) {
thread_pool->Schedule([i, &writer, &device_ids]() {
const string device_name = strings::Printf(
"/job:localhost/replica:0/task:0/device:GPU:%d", i % 4);
device_ids[i] = writer->RegisterDeviceAndGetId(device_name);
});
}
delete thread_pool;
TF_ASSERT_OK(writer->FlushNonExecutionFiles());
TF_ASSERT_OK(writer->Close());
EXPECT_EQ(device_ids[0], device_ids[4]);
EXPECT_EQ(device_ids[1], device_ids[5]);
EXPECT_EQ(device_ids[2], device_ids[6]);
EXPECT_EQ(device_ids[3], device_ids[7]);
EXPECT_EQ(absl::flat_hash_set<int>(device_ids, device_ids + 8).size(), 4);
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), 4);
for (const DebugEvent& actual : actuals) {
const string& device_name = actual.debugged_device().device_name();
int device_index = -1;
CHECK(absl::SimpleAtoi(device_name.substr(strlen(
"/job:localhost/replica:0/task:0/device:GPU:")),
&device_index));
EXPECT_EQ(actual.debugged_device().device_id(), device_ids[device_index]);
}
}
TEST_F(DebugEventsWriterTest, DisableCyclicBufferBehavior) {
const size_t kCyclicBufferSize = 0;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, kCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
const size_t kNumEvents = 20;
for (size_t i = 0; i < kNumEvents; ++i) {
Execution* execution = new Execution();
execution->set_op_type("Log");
execution->add_input_tensor_ids(i);
TF_ASSERT_OK(writer->WriteExecution(execution));
}
TF_ASSERT_OK(writer->FlushExecutionFiles());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::EXECUTION, &actuals);
EXPECT_EQ(actuals.size(), kNumEvents);
for (size_t i = 0; i < kNumEvents; ++i) {
EXPECT_EQ(actuals[i].execution().op_type(), "Log");
EXPECT_EQ(actuals[i].execution().input_tensor_ids().size(), 1);
EXPECT_EQ(actuals[i].execution().input_tensor_ids()[0], i);
}
for (size_t i = 0; i < kNumEvents; ++i) {
GraphExecutionTrace* trace = new GraphExecutionTrace();
trace->set_tfdbg_context_id(strings::Printf("graph_%.2ld", i));
TF_ASSERT_OK(writer->WriteGraphExecutionTrace(trace));
}
TF_ASSERT_OK(writer->FlushExecutionFiles());
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), kNumEvents);
for (size_t i = 0; i < kNumEvents; ++i) {
EXPECT_EQ(actuals[i].graph_execution_trace().tfdbg_context_id(),
strings::Printf("graph_%.2ld", i));
}
TF_ASSERT_OK(writer->Close());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/debug_events_writer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/debug_events_writer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
94631c1a-afbc-4275-be7c-6ef151723962 | cpp | tensorflow/tensorflow | sign | tensorflow/lite/experimental/shlo/ops/sign.cc | tensorflow/lite/experimental/shlo/ops/sign_test.cc | #include "tensorflow/lite/experimental/shlo/ops/sign.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Sign {
template <class T>
T operator()(T v) const {
constexpr T one = static_cast<T>(1);
constexpr T minus_one = static_cast<T>(-1);
constexpr T zero = static_cast<T>(0);
return v < zero ? minus_one : (v > zero ? one : v);
}
};
template <>
F16 Sign::operator()(F16 v) const {
return static_cast<F16>(operator()(static_cast<float>(v)));
}
template <>
BF16 Sign::operator()(BF16 v) const {
return static_cast<BF16>(operator()(static_cast<float>(v)));
}
SignOp Create(SignOp::Attributes) { return {}; }
absl::Status Prepare(SignOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(CheckCtx("sign"), input,
IsSignedIntTensor, IsFloatTensor,
IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("sign"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(SignOp& op, const Tensor& input, Tensor& output) {
Sign sign;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), sign, input,
output)
} else if (IsSignedIntTensor(input) || IsFloatTensor(input)) {
DISPATCH_INT_FLOAT(detail::EvaluateNoQuantization,
input.tensor_element_type(), sign, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.sign: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/sign.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<SignOp> {
static std::string Get() { return "Sign"; }
};
namespace {
struct Sign {
template <class T>
T operator()(T v) const {
constexpr T one = static_cast<T>(1);
constexpr T minus_one = static_cast<T>(-1);
constexpr T zero = static_cast<T>(0);
return v < zero ? minus_one : (v > zero ? one : v);
}
} sign_ref;
template <>
F16 Sign::operator()(F16 v) const {
return static_cast<F16>(operator()(static_cast<float>(v)));
}
template <>
BF16 Sign::operator()(BF16 v) const {
return static_cast<BF16>(operator()(static_cast<float>(v)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(Sign, UnaryElementwiseOpShapePropagationTest,
SignOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Sign, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<SignOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<SignOp, ConcatTypes<BoolTestType, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Sign, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct SignTest : ::testing::Test {};
TYPED_TEST_SUITE(SignTest, ArithmeticTestTypes, TestParamNames);
TYPED_TEST(SignTest, ArithmeticTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), sign_ref);
auto op = Create(SignOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedSignTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedSignTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedSignTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = sign_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(SignOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/sign.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/sign_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4319d63b-c78f-484c-8fe8-e6acfcb63ac0 | cpp | google/glog | demangle | src/demangle.cc | src/demangle_unittest.cc | #include "demangle.h"
#include <algorithm>
#include <cstdlib>
#include <limits>
#include "utilities.h"
#if defined(HAVE___CXA_DEMANGLE)
# include <cxxabi.h>
#endif
#if defined(GLOG_OS_WINDOWS)
# include <dbghelp.h>
#endif
namespace google {
inline namespace glog_internal_namespace_ {
#if !defined(GLOG_OS_WINDOWS) && !defined(HAVE___CXA_DEMANGLE)
namespace {
struct AbbrevPair {
const char* const abbrev;
const char* const real_name;
};
const AbbrevPair kOperatorList[] = {
{"nw", "new"}, {"na", "new[]"}, {"dl", "delete"}, {"da", "delete[]"},
{"ps", "+"}, {"ng", "-"}, {"ad", "&"}, {"de", "*"},
{"co", "~"}, {"pl", "+"}, {"mi", "-"}, {"ml", "*"},
{"dv", "/"}, {"rm", "%"}, {"an", "&"}, {"or", "|"},
{"eo", "^"}, {"aS", "="}, {"pL", "+="}, {"mI", "-="},
{"mL", "*="}, {"dV", "/="}, {"rM", "%="}, {"aN", "&="},
{"oR", "|="}, {"eO", "^="}, {"ls", "<<"}, {"rs", ">>"},
{"lS", "<<="}, {"rS", ">>="}, {"eq", "=="}, {"ne", "!="},
{"lt", "<"}, {"gt", ">"}, {"le", "<="}, {"ge", ">="},
{"nt", "!"}, {"aa", "&&"}, {"oo", "||"}, {"pp", "++"},
{"mm", "--"}, {"cm", ","}, {"pm", "->*"}, {"pt", "->"},
{"cl", "()"}, {"ix", "[]"}, {"qu", "?"}, {"st", "sizeof"},
{"sz", "sizeof"}, {nullptr, nullptr},
};
const AbbrevPair kBuiltinTypeList[] = {
{"v", "void"}, {"w", "wchar_t"},
{"b", "bool"}, {"c", "char"},
{"a", "signed char"}, {"h", "unsigned char"},
{"s", "short"}, {"t", "unsigned short"},
{"i", "int"}, {"j", "unsigned int"},
{"l", "long"}, {"m", "unsigned long"},
{"x", "long long"}, {"y", "unsigned long long"},
{"n", "__int128"}, {"o", "unsigned __int128"},
{"f", "float"}, {"d", "double"},
{"e", "long double"}, {"g", "__float128"},
{"z", "ellipsis"}, {"Dn", "decltype(nullptr)"},
{nullptr, nullptr}};
const AbbrevPair kSubstitutionList[] = {
{"St", ""},
{"Sa", "allocator"},
{"Sb", "basic_string"},
{"Ss", "string"},
{"Si", "istream"},
{"So", "ostream"},
{"Sd", "iostream"},
{nullptr, nullptr}};
struct State {
const char* mangled_cur;
char* out_cur;
const char* out_begin;
const char* out_end;
const char* prev_name;
ssize_t prev_name_length;
short nest_level;
bool append;
bool overflowed;
uint32 local_level;
uint32 expr_level;
uint32 arg_level;
};
size_t StrLen(const char* str) {
size_t len = 0;
while (*str != '\0') {
++str;
++len;
}
return len;
}
bool AtLeastNumCharsRemaining(const char* str, ssize_t n) {
for (ssize_t i = 0; i < n; ++i) {
if (str[i] == '\0') {
return false;
}
}
return true;
}
bool StrPrefix(const char* str, const char* prefix) {
size_t i = 0;
while (str[i] != '\0' && prefix[i] != '\0' && str[i] == prefix[i]) {
++i;
}
return prefix[i] == '\0';
}
void InitState(State* state, const char* mangled, char* out, size_t out_size) {
state->mangled_cur = mangled;
state->out_cur = out;
state->out_begin = out;
state->out_end = out + out_size;
state->prev_name = nullptr;
state->prev_name_length = -1;
state->nest_level = -1;
state->append = true;
state->overflowed = false;
state->local_level = 0;
state->expr_level = 0;
state->arg_level = 0;
}
bool ParseOneCharToken(State* state, const char one_char_token) {
if (state->mangled_cur[0] == one_char_token) {
++state->mangled_cur;
return true;
}
return false;
}
bool ParseTwoCharToken(State* state, const char* two_char_token) {
if (state->mangled_cur[0] == two_char_token[0] &&
state->mangled_cur[1] == two_char_token[1]) {
state->mangled_cur += 2;
return true;
}
return false;
}
bool ParseCharClass(State* state, const char* char_class) {
const char* p = char_class;
for (; *p != '\0'; ++p) {
if (state->mangled_cur[0] == *p) {
++state->mangled_cur;
return true;
}
}
return false;
}
bool Optional(bool) { return true; }
using ParseFunc = bool (*)(State*);
bool OneOrMore(ParseFunc parse_func, State* state) {
if (parse_func(state)) {
while (parse_func(state)) {
}
return true;
}
return false;
}
bool ZeroOrMore(ParseFunc parse_func, State* state) {
while (parse_func(state)) {
}
return true;
}
void Append(State* state, const char* const str, ssize_t length) {
if (state->out_cur == nullptr) {
state->overflowed = true;
return;
}
for (ssize_t i = 0; i < length; ++i) {
if (state->out_cur + 1 < state->out_end) {
*state->out_cur = str[i];
++state->out_cur;
} else {
state->overflowed = true;
break;
}
}
if (!state->overflowed) {
*state->out_cur = '\0';
}
}
bool IsLower(char c) { return c >= 'a' && c <= 'z'; }
bool IsAlpha(char c) {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
}
bool IsDigit(char c) { return c >= '0' && c <= '9'; }
bool IsFunctionCloneSuffix(const char* str) {
size_t i = 0;
while (str[i] != '\0') {
if (str[i] != '.' || !IsAlpha(str[i + 1])) {
return false;
}
i += 2;
while (IsAlpha(str[i])) {
++i;
}
if (str[i] != '.' || !IsDigit(str[i + 1])) {
return false;
}
i += 2;
while (IsDigit(str[i])) {
++i;
}
}
return true;
}
void MaybeAppendWithLength(State* state, const char* const str,
ssize_t length) {
if (state->append && length > 0) {
if (str[0] == '<' && state->out_begin < state->out_cur &&
state->out_cur[-1] == '<') {
Append(state, " ", 1);
}
if (IsAlpha(str[0]) || str[0] == '_') {
state->prev_name = state->out_cur;
state->prev_name_length = length;
}
Append(state, str, length);
}
}
bool MaybeAppend(State* state, const char* const str) {
if (state->append) {
size_t length = StrLen(str);
MaybeAppendWithLength(state, str, static_cast<ssize_t>(length));
}
return true;
}
bool EnterNestedName(State* state) {
state->nest_level = 0;
return true;
}
bool LeaveNestedName(State* state, short prev_value) {
state->nest_level = prev_value;
return true;
}
bool DisableAppend(State* state) {
state->append = false;
return true;
}
bool RestoreAppend(State* state, bool prev_value) {
state->append = prev_value;
return true;
}
void MaybeIncreaseNestLevel(State* state) {
if (state->nest_level > -1) {
++state->nest_level;
}
}
void MaybeAppendSeparator(State* state) {
if (state->nest_level >= 1) {
MaybeAppend(state, "::");
}
}
void MaybeCancelLastSeparator(State* state) {
if (state->nest_level >= 1 && state->append &&
state->out_begin <= state->out_cur - 2) {
state->out_cur -= 2;
*state->out_cur = '\0';
}
}
bool IdentifierIsAnonymousNamespace(State* state, ssize_t length) {
const char anon_prefix[] = "_GLOBAL__N_";
return (length > static_cast<ssize_t>(sizeof(anon_prefix)) -
1 &&
StrPrefix(state->mangled_cur, anon_prefix));
}
bool ParseMangledName(State* state);
bool ParseEncoding(State* state);
bool ParseName(State* state);
bool ParseUnscopedName(State* state);
bool ParseUnscopedTemplateName(State* state);
bool ParseNestedName(State* state);
bool ParsePrefix(State* state);
bool ParseUnqualifiedName(State* state);
bool ParseSourceName(State* state);
bool ParseLocalSourceName(State* state);
bool ParseNumber(State* state, int* number_out);
bool ParseFloatNumber(State* state);
bool ParseSeqId(State* state);
bool ParseIdentifier(State* state, ssize_t length);
bool ParseAbiTags(State* state);
bool ParseAbiTag(State* state);
bool ParseOperatorName(State* state);
bool ParseSpecialName(State* state);
bool ParseCallOffset(State* state);
bool ParseNVOffset(State* state);
bool ParseVOffset(State* state);
bool ParseCtorDtorName(State* state);
bool ParseType(State* state);
bool ParseCVQualifiers(State* state);
bool ParseBuiltinType(State* state);
bool ParseFunctionType(State* state);
bool ParseBareFunctionType(State* state);
bool ParseClassEnumType(State* state);
bool ParseArrayType(State* state);
bool ParsePointerToMemberType(State* state);
bool ParseTemplateParam(State* state);
bool ParseTemplateTemplateParam(State* state);
bool ParseTemplateArgs(State* state);
bool ParseTemplateArg(State* state);
bool ParseExpression(State* state);
bool ParseExprPrimary(State* state);
bool ParseLocalName(State* state);
bool ParseDiscriminator(State* state);
bool ParseSubstitution(State* state);
bool ParseMangledName(State* state) {
return ParseTwoCharToken(state, "_Z") && ParseEncoding(state);
}
bool ParseEncoding(State* state) {
State copy = *state;
if (ParseName(state) && ParseBareFunctionType(state)) {
return true;
}
*state = copy;
if (ParseName(state) || ParseSpecialName(state)) {
return true;
}
return false;
}
bool ParseName(State* state) {
if (ParseNestedName(state) || ParseLocalName(state)) {
return true;
}
State copy = *state;
if (ParseUnscopedTemplateName(state) && ParseTemplateArgs(state)) {
return true;
}
*state = copy;
if (ParseUnscopedName(state)) {
return true;
}
return false;
}
bool ParseUnscopedName(State* state) {
if (ParseUnqualifiedName(state)) {
return true;
}
State copy = *state;
if (ParseTwoCharToken(state, "St") && MaybeAppend(state, "std::") &&
ParseUnqualifiedName(state)) {
return true;
}
*state = copy;
return false;
}
bool ParseUnscopedTemplateName(State* state) {
return ParseUnscopedName(state) || ParseSubstitution(state);
}
bool ParseNestedName(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'N') && EnterNestedName(state) &&
Optional(ParseCVQualifiers(state)) && ParsePrefix(state) &&
LeaveNestedName(state, copy.nest_level) &&
ParseOneCharToken(state, 'E')) {
return true;
}
*state = copy;
return false;
}
bool ParsePrefix(State* state) {
bool has_something = false;
while (true) {
MaybeAppendSeparator(state);
if (ParseTemplateParam(state) || ParseSubstitution(state) ||
ParseUnscopedName(state)) {
has_something = true;
MaybeIncreaseNestLevel(state);
continue;
}
MaybeCancelLastSeparator(state);
if (has_something && ParseTemplateArgs(state)) {
return ParsePrefix(state);
} else {
break;
}
}
return true;
}
bool ParseUnqualifiedName(State* state) {
return (ParseOperatorName(state) || ParseCtorDtorName(state) ||
(ParseSourceName(state) && Optional(ParseAbiTags(state))) ||
(ParseLocalSourceName(state) && Optional(ParseAbiTags(state))));
}
bool ParseSourceName(State* state) {
State copy = *state;
int length = -1;
if (ParseNumber(state, &length) && ParseIdentifier(state, length)) {
return true;
}
*state = copy;
return false;
}
bool ParseLocalSourceName(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'L') && ParseSourceName(state) &&
Optional(ParseDiscriminator(state))) {
return true;
}
*state = copy;
return false;
}
bool ParseNumber(State* state, int* number_out) {
int sign = 1;
if (ParseOneCharToken(state, 'n')) {
sign = -1;
}
const char* p = state->mangled_cur;
int number = 0;
constexpr int int_max_by_10 = std::numeric_limits<int>::max() / 10;
for (; *p != '\0'; ++p) {
if (IsDigit(*p)) {
if (number > int_max_by_10) {
return false;
}
const int digit = *p - '0';
const int shifted = number * 10;
if (digit > std::numeric_limits<int>::max() - shifted) {
return false;
}
number = shifted + digit;
} else {
break;
}
}
if (p != state->mangled_cur) {
state->mangled_cur = p;
if (number_out != nullptr) {
*number_out = number * sign;
}
return true;
}
return false;
}
bool ParseFloatNumber(State* state) {
const char* p = state->mangled_cur;
for (; *p != '\0'; ++p) {
if (!IsDigit(*p) && !(*p >= 'a' && *p <= 'f')) {
break;
}
}
if (p != state->mangled_cur) {
state->mangled_cur = p;
return true;
}
return false;
}
bool ParseSeqId(State* state) {
const char* p = state->mangled_cur;
for (; *p != '\0'; ++p) {
if (!IsDigit(*p) && !(*p >= 'A' && *p <= 'Z')) {
break;
}
}
if (p != state->mangled_cur) {
state->mangled_cur = p;
return true;
}
return false;
}
bool ParseIdentifier(State* state, ssize_t length) {
if (length == -1 || !AtLeastNumCharsRemaining(state->mangled_cur, length)) {
return false;
}
if (IdentifierIsAnonymousNamespace(state, length)) {
MaybeAppend(state, "(anonymous namespace)");
} else {
MaybeAppendWithLength(state, state->mangled_cur, length);
}
if (length < 0 ||
static_cast<std::size_t>(length) > StrLen(state->mangled_cur)) {
return false;
}
state->mangled_cur += length;
return true;
}
bool ParseAbiTags(State* state) {
State copy = *state;
DisableAppend(state);
if (OneOrMore(ParseAbiTag, state)) {
RestoreAppend(state, copy.append);
return true;
}
*state = copy;
return false;
}
bool ParseAbiTag(State* state) {
return ParseOneCharToken(state, 'B') && ParseSourceName(state);
}
bool ParseOperatorName(State* state) {
if (!AtLeastNumCharsRemaining(state->mangled_cur, 2)) {
return false;
}
State copy = *state;
if (ParseTwoCharToken(state, "cv") && MaybeAppend(state, "operator ") &&
EnterNestedName(state) && ParseType(state) &&
LeaveNestedName(state, copy.nest_level)) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'v') && ParseCharClass(state, "0123456789") &&
ParseSourceName(state)) {
return true;
}
*state = copy;
if (!(IsLower(state->mangled_cur[0]) && IsAlpha(state->mangled_cur[1]))) {
return false;
}
const AbbrevPair* p;
for (p = kOperatorList; p->abbrev != nullptr; ++p) {
if (state->mangled_cur[0] == p->abbrev[0] &&
state->mangled_cur[1] == p->abbrev[1]) {
MaybeAppend(state, "operator");
if (IsLower(*p->real_name)) {
MaybeAppend(state, " ");
}
MaybeAppend(state, p->real_name);
state->mangled_cur += 2;
return true;
}
}
return false;
}
bool ParseSpecialName(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "VTIS") &&
ParseType(state)) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "Tc") && ParseCallOffset(state) &&
ParseCallOffset(state) && ParseEncoding(state)) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "GV") && ParseName(state)) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'T') && ParseCallOffset(state) &&
ParseEncoding(state)) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "TC") && ParseType(state) &&
ParseNumber(state, nullptr) && ParseOneCharToken(state, '_') &&
DisableAppend(state) && ParseType(state)) {
RestoreAppend(state, copy.append);
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "FJ") &&
ParseType(state)) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "GR") && ParseName(state)) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "GA") && ParseEncoding(state)) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "hv") &&
ParseCallOffset(state) && ParseEncoding(state)) {
return true;
}
*state = copy;
return false;
}
bool ParseCallOffset(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'h') && ParseNVOffset(state) &&
ParseOneCharToken(state, '_')) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'v') && ParseVOffset(state) &&
ParseOneCharToken(state, '_')) {
return true;
}
*state = copy;
return false;
}
bool ParseNVOffset(State* state) { return ParseNumber(state, nullptr); }
bool ParseVOffset(State* state) {
State copy = *state;
if (ParseNumber(state, nullptr) && ParseOneCharToken(state, '_') &&
ParseNumber(state, nullptr)) {
return true;
}
*state = copy;
return false;
}
bool ParseCtorDtorName(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'C') && ParseCharClass(state, "123")) {
const char* const prev_name = state->prev_name;
const ssize_t prev_name_length = state->prev_name_length;
MaybeAppendWithLength(state, prev_name, prev_name_length);
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "012")) {
const char* const prev_name = state->prev_name;
const ssize_t prev_name_length = state->prev_name_length;
MaybeAppend(state, "~");
MaybeAppendWithLength(state, prev_name, prev_name_length);
return true;
}
*state = copy;
return false;
}
bool ParseType(State* state) {
State copy = *state;
if (ParseCVQualifiers(state) && ParseType(state)) {
return true;
}
*state = copy;
if (ParseCharClass(state, "OPRCG") && ParseType(state)) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "Dp") && ParseType(state)) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "tT") &&
ParseExpression(state) && ParseOneCharToken(state, 'E')) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'U') && ParseSourceName(state) &&
ParseType(state)) {
return true;
}
*state = copy;
if (ParseBuiltinType(state) || ParseFunctionType(state) ||
ParseClassEnumType(state) || ParseArrayType(state) ||
ParsePointerToMemberType(state) || ParseSubstitution(state)) {
return true;
}
if (ParseTemplateTemplateParam(state) && ParseTemplateArgs(state)) {
return true;
}
*state = copy;
if (ParseTemplateParam(state)) {
return true;
}
return false;
}
bool ParseCVQualifiers(State* state) {
int num_cv_qualifiers = 0;
num_cv_qualifiers += ParseOneCharToken(state, 'r');
num_cv_qualifiers += ParseOneCharToken(state, 'V');
num_cv_qualifiers += ParseOneCharToken(state, 'K');
return num_cv_qualifiers > 0;
}
bool ParseBuiltinType(State* state) {
const AbbrevPair* p;
for (p = kBuiltinTypeList; p->abbrev != nullptr; ++p) {
if (state->mangled_cur[0] == p->abbrev[0]) {
MaybeAppend(state, p->real_name);
++state->mangled_cur;
return true;
}
}
State copy = *state;
if (ParseOneCharToken(state, 'u') && ParseSourceName(state)) {
return true;
}
*state = copy;
return false;
}
bool ParseFunctionType(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'F') &&
Optional(ParseOneCharToken(state, 'Y')) && ParseBareFunctionType(state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
*state = copy;
return false;
}
bool ParseBareFunctionType(State* state) {
State copy = *state;
DisableAppend(state);
if (OneOrMore(ParseType, state)) {
RestoreAppend(state, copy.append);
MaybeAppend(state, "()");
return true;
}
*state = copy;
return false;
}
bool ParseClassEnumType(State* state) { return ParseName(state); }
bool ParseArrayType(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'A') && ParseNumber(state, nullptr) &&
ParseOneCharToken(state, '_') && ParseType(state)) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'A') && Optional(ParseExpression(state)) &&
ParseOneCharToken(state, '_') && ParseType(state)) {
return true;
}
*state = copy;
return false;
}
bool ParsePointerToMemberType(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'M') && ParseType(state) && ParseType(state)) {
return true;
}
*state = copy;
return false;
}
bool ParseTemplateParam(State* state) {
if (ParseTwoCharToken(state, "T_")) {
MaybeAppend(state, "?");
return true;
}
State copy = *state;
if (ParseOneCharToken(state, 'T') && ParseNumber(state, nullptr) &&
ParseOneCharToken(state, '_')) {
MaybeAppend(state, "?");
return true;
}
*state = copy;
return false;
}
bool ParseTemplateTemplateParam(State* state) {
return (ParseTemplateParam(state) || ParseSubstitution(state));
}
bool ParseTemplateArgs(State* state) {
State copy = *state;
DisableAppend(state);
if (ParseOneCharToken(state, 'I') && OneOrMore(ParseTemplateArg, state) &&
ParseOneCharToken(state, 'E')) {
RestoreAppend(state, copy.append);
MaybeAppend(state, "<>");
return true;
}
*state = copy;
return false;
}
bool ParseTemplateArg(State* state) {
constexpr uint32 max_levels = 6;
if (state->arg_level > max_levels) {
return false;
}
++state->arg_level;
State copy = *state;
if ((ParseOneCharToken(state, 'I') || ParseOneCharToken(state, 'J')) &&
ZeroOrMore(ParseTemplateArg, state) && ParseOneCharToken(state, 'E')) {
--state->arg_level;
return true;
}
*state = copy;
if (ParseType(state) || ParseExprPrimary(state)) {
--state->arg_level;
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'X') && ParseExpression(state) &&
ParseOneCharToken(state, 'E')) {
--state->arg_level;
return true;
}
*state = copy;
return false;
}
bool ParseExpression(State* state) {
if (ParseTemplateParam(state) || ParseExprPrimary(state)) {
return true;
}
constexpr uint32 max_levels = 5;
if (state->expr_level > max_levels) {
return false;
}
++state->expr_level;
State copy = *state;
if (ParseOperatorName(state) && ParseExpression(state) &&
ParseExpression(state) && ParseExpression(state)) {
--state->expr_level;
return true;
}
*state = copy;
if (ParseOperatorName(state) && ParseExpression(state) &&
ParseExpression(state)) {
--state->expr_level;
return true;
}
*state = copy;
if (ParseOperatorName(state) && ParseExpression(state)) {
--state->expr_level;
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "st") && ParseType(state)) {
return true;
--state->expr_level;
}
*state = copy;
if (ParseTwoCharToken(state, "sr") && ParseType(state) &&
ParseUnqualifiedName(state) && ParseTemplateArgs(state)) {
--state->expr_level;
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "sr") && ParseType(state) &&
ParseUnqualifiedName(state)) {
--state->expr_level;
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "sp") && ParseType(state)) {
--state->expr_level;
return true;
}
*state = copy;
return false;
}
bool ParseExprPrimary(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'L') && ParseType(state) &&
ParseNumber(state, nullptr) && ParseOneCharToken(state, 'E')) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'L') && ParseType(state) &&
ParseFloatNumber(state) && ParseOneCharToken(state, 'E')) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'L') && ParseMangledName(state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "LZ") && ParseEncoding(state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
*state = copy;
return false;
}
bool ParseLocalName(State* state) {
constexpr uint32 max_levels = 5;
if (state->local_level > max_levels) {
return false;
}
++state->local_level;
State copy = *state;
if (ParseOneCharToken(state, 'Z') && ParseEncoding(state) &&
ParseOneCharToken(state, 'E') && MaybeAppend(state, "::") &&
ParseName(state) && Optional(ParseDiscriminator(state))) {
--state->local_level;
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'Z') && ParseEncoding(state) &&
ParseTwoCharToken(state, "Es") && Optional(ParseDiscriminator(state))) {
--state->local_level;
return true;
}
*state = copy;
return false;
}
bool ParseDiscriminator(State* state) {
State copy = *state;
if (ParseOneCharToken(state, '_') && ParseNumber(state, nullptr)) {
return true;
}
*state = copy;
return false;
}
bool ParseSubstitution(State* state) {
if (ParseTwoCharToken(state, "S_")) {
MaybeAppend(state, "?");
return true;
}
State copy = *state;
if (ParseOneCharToken(state, 'S') && ParseSeqId(state) &&
ParseOneCharToken(state, '_')) {
MaybeAppend(state, "?");
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'S')) {
const AbbrevPair* p;
for (p = kSubstitutionList; p->abbrev != nullptr; ++p) {
if (state->mangled_cur[0] == p->abbrev[1]) {
MaybeAppend(state, "std");
if (p->real_name[0] != '\0') {
MaybeAppend(state, "::");
MaybeAppend(state, p->real_name);
}
++state->mangled_cur;
return true;
}
}
}
*state = copy;
return false;
}
bool ParseTopLevelMangledName(State* state) {
if (ParseMangledName(state)) {
if (state->mangled_cur[0] != '\0') {
if (IsFunctionCloneSuffix(state->mangled_cur)) {
return true;
}
if (state->mangled_cur[0] == '@') {
MaybeAppend(state, state->mangled_cur);
return true;
}
return ParseName(state);
}
return true;
}
return false;
}
}
#endif
bool Demangle(const char* mangled, char* out, size_t out_size) {
#if defined(GLOG_OS_WINDOWS)
# if defined(HAVE_DBGHELP)
char buffer[1024];
const char* lparen = strchr(mangled, '(');
if (lparen) {
const char* rparen = strchr(lparen, ')');
size_t length = static_cast<size_t>(rparen - lparen) - 1;
strncpy(buffer, lparen + 1, length);
buffer[length] = '\0';
mangled = buffer;
}
return UnDecorateSymbolName(mangled, out, out_size, UNDNAME_COMPLETE);
# else
(void)mangled;
(void)out;
(void)out_size;
return false;
# endif
#elif defined(HAVE___CXA_DEMANGLE)
int status = -1;
std::size_t n = 0;
std::unique_ptr<char, decltype(&std::free)> unmangled{
abi::__cxa_demangle(mangled, nullptr, &n, &status), &std::free};
if (!unmangled) {
return false;
}
std::copy_n(unmangled.get(), std::min(n, out_size), out);
return status == 0;
#else
State state;
InitState(&state, mangled, out, out_size);
return ParseTopLevelMangledName(&state) && !state.overflowed;
#endif
}
}
} | #include "demangle.h"
#include <fstream>
#include <iostream>
#include <string>
#include "config.h"
#include "glog/logging.h"
#include "googletest.h"
#include "utilities.h"
#ifdef GLOG_USE_GFLAGS
# include <gflags/gflags.h>
using namespace GFLAGS_NAMESPACE;
#endif
GLOG_DEFINE_bool(demangle_filter, false,
"Run demangle_unittest in filter mode");
using namespace std;
using namespace google;
static const char* DemangleIt(const char* const mangled) {
static char demangled[4096];
if (Demangle(mangled, demangled, sizeof(demangled))) {
return demangled;
} else {
return mangled;
}
}
#if defined(GLOG_OS_WINDOWS)
# if defined(HAVE_DBGHELP) && !defined(NDEBUG)
TEST(Demangle, Windows) {
EXPECT_STREQ("public: static void __cdecl Foo::func(int)",
DemangleIt("?func@Foo@@SAXH@Z"));
EXPECT_STREQ("public: static void __cdecl Foo::func(int)",
DemangleIt("@ILT+1105(?func@Foo@@SAXH@Z)"));
EXPECT_STREQ("int __cdecl foobarArray(int * const)",
DemangleIt("?foobarArray@@YAHQAH@Z"));
}
# endif
#else
TEST(Demangle, CornerCases) {
const size_t size = 10;
char tmp[size] = {0};
const char* demangled = "foobar()";
const char* mangled = "_Z6foobarv";
EXPECT_TRUE(Demangle(mangled, tmp, sizeof(tmp)));
EXPECT_STREQ(demangled, tmp);
EXPECT_TRUE(Demangle(mangled, tmp, size - 1));
EXPECT_STREQ(demangled, tmp);
EXPECT_FALSE(Demangle(mangled, tmp, size - 2));
EXPECT_FALSE(Demangle(mangled, tmp, 1));
EXPECT_FALSE(Demangle(mangled, tmp, 0));
EXPECT_FALSE(Demangle(mangled, nullptr, 0));
}
TEST(Demangle, Clones) {
char tmp[20];
EXPECT_TRUE(Demangle("_ZL3Foov", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.clone.3", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.constprop.80", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.isra.18", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.isra.2.constprop.18", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_FALSE(Demangle("_ZL3Foov.clo", tmp, sizeof(tmp)));
EXPECT_FALSE(Demangle("_ZL3Foov.clone.", tmp, sizeof(tmp)));
EXPECT_FALSE(Demangle("_ZL3Foov.clone.foo", tmp, sizeof(tmp)));
EXPECT_FALSE(Demangle("_ZL3Foov.isra.2.constprop.", tmp, sizeof(tmp)));
}
TEST(Demangle, FromFile) {
string test_file = FLAGS_test_srcdir + "/src/demangle_unittest.txt";
ifstream f(test_file.c_str());
EXPECT_FALSE(f.fail());
string line;
while (getline(f, line)) {
if (line.empty() || line[0] == '#') {
continue;
}
string::size_type tab_pos = line.find('\t');
EXPECT_NE(string::npos, tab_pos);
string mangled = line.substr(0, tab_pos);
string demangled = line.substr(tab_pos + 1);
EXPECT_EQ(demangled, DemangleIt(mangled.c_str()));
}
}
#endif
int main(int argc, char** argv) {
InitGoogleTest(&argc, argv);
#ifdef GLOG_USE_GFLAGS
ParseCommandLineFlags(&argc, &argv, true);
#endif
FLAGS_logtostderr = true;
InitGoogleLogging(argv[0]);
if (FLAGS_demangle_filter) {
string line;
while (getline(cin, line, '\n')) {
cout << DemangleIt(line.c_str()) << endl;
}
return 0;
} else if (argc > 1) {
cout << DemangleIt(argv[1]) << endl;
return 0;
} else {
return RUN_ALL_TESTS();
}
} | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/demangle.cc | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/demangle_unittest.cc | de309c08c05382fee0792380de7df1bd65332da2 |
b34100b3-f345-49db-a7b0-5d4f8ad618ea | cpp | google/quiche | http2_frame_decoder | quiche/http2/decoder/http2_frame_decoder.cc | quiche/http2/decoder/http2_frame_decoder_test.cc | #include "quiche/http2/decoder/http2_frame_decoder.h"
#include <ostream>
#include "quiche/http2/decoder/decode_status.h"
#include "quiche/http2/hpack/varint/hpack_varint_decoder.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
std::ostream& operator<<(std::ostream& out, Http2FrameDecoder::State v) {
switch (v) {
case Http2FrameDecoder::State::kStartDecodingHeader:
return out << "kStartDecodingHeader";
case Http2FrameDecoder::State::kResumeDecodingHeader:
return out << "kResumeDecodingHeader";
case Http2FrameDecoder::State::kResumeDecodingPayload:
return out << "kResumeDecodingPayload";
case Http2FrameDecoder::State::kDiscardPayload:
return out << "kDiscardPayload";
}
int unknown = static_cast<int>(v);
QUICHE_BUG(http2_bug_155_1) << "Http2FrameDecoder::State " << unknown;
return out << "Http2FrameDecoder::State(" << unknown << ")";
}
Http2FrameDecoder::Http2FrameDecoder(Http2FrameDecoderListener* listener)
: state_(State::kStartDecodingHeader),
maximum_payload_size_(Http2SettingsInfo::DefaultMaxFrameSize()) {
set_listener(listener);
}
void Http2FrameDecoder::set_listener(Http2FrameDecoderListener* listener) {
if (listener == nullptr) {
listener = &no_op_listener_;
}
frame_decoder_state_.set_listener(listener);
}
Http2FrameDecoderListener* Http2FrameDecoder::listener() const {
return frame_decoder_state_.listener();
}
DecodeStatus Http2FrameDecoder::DecodeFrame(DecodeBuffer* db) {
QUICHE_DVLOG(2) << "Http2FrameDecoder::DecodeFrame state=" << state_;
switch (state_) {
case State::kStartDecodingHeader:
if (frame_decoder_state_.StartDecodingFrameHeader(db)) {
return StartDecodingPayload(db);
}
state_ = State::kResumeDecodingHeader;
return DecodeStatus::kDecodeInProgress;
case State::kResumeDecodingHeader:
if (frame_decoder_state_.ResumeDecodingFrameHeader(db)) {
return StartDecodingPayload(db);
}
return DecodeStatus::kDecodeInProgress;
case State::kResumeDecodingPayload:
return ResumeDecodingPayload(db);
case State::kDiscardPayload:
return DiscardPayload(db);
}
QUICHE_NOTREACHED();
return DecodeStatus::kDecodeError;
}
size_t Http2FrameDecoder::remaining_payload() const {
return frame_decoder_state_.remaining_payload();
}
uint32_t Http2FrameDecoder::remaining_padding() const {
return frame_decoder_state_.remaining_padding();
}
DecodeStatus Http2FrameDecoder::StartDecodingPayload(DecodeBuffer* db) {
const Http2FrameHeader& header = frame_header();
if (!listener()->OnFrameHeader(header)) {
QUICHE_DVLOG(2)
<< "OnFrameHeader rejected the frame, will discard; header: " << header;
state_ = State::kDiscardPayload;
frame_decoder_state_.InitializeRemainders();
return DecodeStatus::kDecodeError;
}
if (header.payload_length > maximum_payload_size_) {
QUICHE_DVLOG(2) << "Payload length is greater than allowed: "
<< header.payload_length << " > " << maximum_payload_size_
<< "\n header: " << header;
state_ = State::kDiscardPayload;
frame_decoder_state_.InitializeRemainders();
listener()->OnFrameSizeError(header);
return DecodeStatus::kDecodeError;
}
DecodeBufferSubset subset(db, header.payload_length);
DecodeStatus status;
switch (header.type) {
case Http2FrameType::DATA:
status = StartDecodingDataPayload(&subset);
break;
case Http2FrameType::HEADERS:
status = StartDecodingHeadersPayload(&subset);
break;
case Http2FrameType::PRIORITY:
status = StartDecodingPriorityPayload(&subset);
break;
case Http2FrameType::RST_STREAM:
status = StartDecodingRstStreamPayload(&subset);
break;
case Http2FrameType::SETTINGS:
status = StartDecodingSettingsPayload(&subset);
break;
case Http2FrameType::PUSH_PROMISE:
status = StartDecodingPushPromisePayload(&subset);
break;
case Http2FrameType::PING:
status = StartDecodingPingPayload(&subset);
break;
case Http2FrameType::GOAWAY:
status = StartDecodingGoAwayPayload(&subset);
break;
case Http2FrameType::WINDOW_UPDATE:
status = StartDecodingWindowUpdatePayload(&subset);
break;
case Http2FrameType::CONTINUATION:
status = StartDecodingContinuationPayload(&subset);
break;
case Http2FrameType::ALTSVC:
status = StartDecodingAltSvcPayload(&subset);
break;
case Http2FrameType::PRIORITY_UPDATE:
status = StartDecodingPriorityUpdatePayload(&subset);
break;
default:
status = StartDecodingUnknownPayload(&subset);
break;
}
if (status == DecodeStatus::kDecodeDone) {
state_ = State::kStartDecodingHeader;
return status;
} else if (status == DecodeStatus::kDecodeInProgress) {
state_ = State::kResumeDecodingPayload;
return status;
} else {
state_ = State::kDiscardPayload;
return status;
}
}
DecodeStatus Http2FrameDecoder::ResumeDecodingPayload(DecodeBuffer* db) {
size_t remaining = frame_decoder_state_.remaining_total_payload();
QUICHE_DCHECK_LE(remaining, frame_header().payload_length);
DecodeBufferSubset subset(db, remaining);
DecodeStatus status;
switch (frame_header().type) {
case Http2FrameType::DATA:
status = ResumeDecodingDataPayload(&subset);
break;
case Http2FrameType::HEADERS:
status = ResumeDecodingHeadersPayload(&subset);
break;
case Http2FrameType::PRIORITY:
status = ResumeDecodingPriorityPayload(&subset);
break;
case Http2FrameType::RST_STREAM:
status = ResumeDecodingRstStreamPayload(&subset);
break;
case Http2FrameType::SETTINGS:
status = ResumeDecodingSettingsPayload(&subset);
break;
case Http2FrameType::PUSH_PROMISE:
status = ResumeDecodingPushPromisePayload(&subset);
break;
case Http2FrameType::PING:
status = ResumeDecodingPingPayload(&subset);
break;
case Http2FrameType::GOAWAY:
status = ResumeDecodingGoAwayPayload(&subset);
break;
case Http2FrameType::WINDOW_UPDATE:
status = ResumeDecodingWindowUpdatePayload(&subset);
break;
case Http2FrameType::CONTINUATION:
status = ResumeDecodingContinuationPayload(&subset);
break;
case Http2FrameType::ALTSVC:
status = ResumeDecodingAltSvcPayload(&subset);
break;
case Http2FrameType::PRIORITY_UPDATE:
status = ResumeDecodingPriorityUpdatePayload(&subset);
break;
default:
status = ResumeDecodingUnknownPayload(&subset);
break;
}
if (status == DecodeStatus::kDecodeDone) {
state_ = State::kStartDecodingHeader;
return status;
} else if (status == DecodeStatus::kDecodeInProgress) {
return status;
} else {
state_ = State::kDiscardPayload;
return status;
}
}
void Http2FrameDecoder::RetainFlags(uint8_t valid_flags) {
frame_decoder_state_.RetainFlags(valid_flags);
}
void Http2FrameDecoder::ClearFlags() { frame_decoder_state_.ClearFlags(); }
DecodeStatus Http2FrameDecoder::StartDecodingAltSvcPayload(DecodeBuffer* db) {
ClearFlags();
return altsvc_payload_decoder_.StartDecodingPayload(&frame_decoder_state_,
db);
}
DecodeStatus Http2FrameDecoder::ResumeDecodingAltSvcPayload(DecodeBuffer* db) {
QUICHE_DCHECK_EQ(frame_decoder_state_.remaining_total_payload(),
frame_decoder_state_.remaining_payload());
return altsvc_payload_decoder_.ResumeDecodingPayload(&frame_decoder_state_,
db);
}
DecodeStatus Http2FrameDecoder::StartDecodingContinuationPayload(
DecodeBuffer* db) {
RetainFlags(Http2FrameFlag::END_HEADERS);
return continuation_payload_decoder_.StartDecodingPayload(
&frame_decoder_state_, db);
}
DecodeStatus Http2FrameDecoder::ResumeDecodingContinuationPayload(
DecodeBuffer* db) {
QUICHE_DCHECK_EQ(frame_decoder_state_.remaining_total_payload(),
frame_decoder_state_.remaining_payload());
return continuation_payload_decoder_.ResumeDecodingPayload(
&frame_decoder_state_, db);
}
DecodeStatus Http2FrameDecoder::StartDecodingDataPayload(DecodeBuffer* db) {
RetainFlags(Http2FrameFlag::END_STREAM | Http2FrameFlag::PADDED);
return data_payload_decoder_.StartDecodingPayload(&frame_decoder_state_, db);
}
DecodeStatus Http2FrameDecoder::ResumeDecodingDataPayload(DecodeBuffer* db) {
return data_payload_decoder_.ResumeDecodingPayload(&frame_decoder_state_, db);
}
DecodeStatus Http2FrameDecoder::StartDecodingGoAwayPayload(DecodeBuffer* db) {
ClearFlags();
return goaway_payload_decoder_.StartDecodingPayload(&frame_decoder_state_,
db);
}
DecodeStatus Http2FrameDecoder::ResumeDecodingGoAwayPayload(DecodeBuffer* db) {
QUICHE_DCHECK_EQ(frame_decoder_state_.remaining_total_payload(),
frame_decoder_state_.remaining_payload());
return goaway_payload_decoder_.ResumeDecodingPayload(&frame_decoder_state_,
db);
}
DecodeStatus Http2FrameDecoder::StartDecodingHeadersPayload(DecodeBuffer* db) {
RetainFlags(Http2FrameFlag::END_STREAM | Http2FrameFlag::END_HEADERS |
Http2FrameFlag::PADDED | Http2FrameFlag::PRIORITY);
return headers_payload_decoder_.StartDecodingPayload(&frame_decoder_state_,
db);
}
DecodeStatus Http2FrameDecoder::ResumeDecodingHeadersPayload(DecodeBuffer* db) {
QUICHE_DCHECK_LE(frame_decoder_state_.remaining_payload_and_padding(),
frame_header().payload_length);
return headers_payload_decoder_.ResumeDecodingPayload(&frame_decoder_state_,
db);
}
DecodeStatus Http2FrameDecoder::StartDecodingPingPayload(DecodeBuffer* db) {
RetainFlags(Http2FrameFlag::ACK);
return ping_payload_decoder_.StartDecodingPayload(&frame_decoder_state_, db);
}
DecodeStatus Http2FrameDecoder::ResumeDecodingPingPayload(DecodeBuffer* db) {
QUICHE_DCHECK_EQ(frame_decoder_state_.remaining_total_payload(),
frame_decoder_state_.remaining_payload());
return ping_payload_decoder_.ResumeDecodingPayload(&frame_decoder_state_, db);
}
DecodeStatus Http2FrameDecoder::StartDecodingPriorityPayload(DecodeBuffer* db) {
ClearFlags();
return priority_payload_decoder_.StartDecodingPayload(&frame_decoder_state_,
db);
}
DecodeStatus Http2FrameDecoder::ResumeDecodingPriorityPayload(
DecodeBuffer* db) {
QUICHE_DCHECK_EQ(frame_decoder_state_.remaining_total_payload(),
frame_decoder_state_.remaining_payload());
return priority_payload_decoder_.ResumeDecodingPayload(&frame_decoder_state_,
db);
}
DecodeStatus Http2FrameDecoder::StartDecodingPriorityUpdatePayload(
DecodeBuffer* db) {
ClearFlags();
return priority_payload_update_decoder_.StartDecodingPayload(
&frame_decoder_state_, db);
}
DecodeStatus Http2FrameDecoder::ResumeDecodingPriorityUpdatePayload(
DecodeBuffer* db) {
QUICHE_DCHECK_EQ(frame_decoder_state_.remaining_total_payload(),
frame_decoder_state_.remaining_payload());
return priority_payload_update_decoder_.ResumeDecodingPayload(
&frame_decoder_state_, db);
}
DecodeStatus Http2FrameDecoder::StartDecodingPushPromisePayload(
DecodeBuffer* db) {
RetainFlags(Http2FrameFlag::END_HEADERS | Http2FrameFlag::PADDED);
return push_promise_payload_decoder_.StartDecodingPayload(
&frame_decoder_state_, db);
}
DecodeStatus Http2FrameDecoder::ResumeDecodingPushPromisePayload(
DecodeBuffer* db) {
QUICHE_DCHECK_LE(frame_decoder_state_.remaining_payload_and_padding(),
frame_header().payload_length);
return push_promise_payload_decoder_.ResumeDecodingPayload(
&frame_decoder_state_, db);
}
DecodeStatus Http2FrameDecoder::StartDecodingRstStreamPayload(
DecodeBuffer* db) {
ClearFlags();
return rst_stream_payload_decoder_.StartDecodingPayload(&frame_decoder_state_,
db);
}
DecodeStatus Http2FrameDecoder::ResumeDecodingRstStreamPayload(
DecodeBuffer* db) {
QUICHE_DCHECK_EQ(frame_decoder_state_.remaining_total_payload(),
frame_decoder_state_.remaining_payload());
return rst_stream_payload_decoder_.ResumeDecodingPayload(
&frame_decoder_state_, db);
}
DecodeStatus Http2FrameDecoder::StartDecodingSettingsPayload(DecodeBuffer* db) {
RetainFlags(Http2FrameFlag::ACK);
return settings_payload_decoder_.StartDecodingPayload(&frame_decoder_state_,
db);
}
DecodeStatus Http2FrameDecoder::ResumeDecodingSettingsPayload(
DecodeBuffer* db) {
QUICHE_DCHECK_EQ(frame_decoder_state_.remaining_total_payload(),
frame_decoder_state_.remaining_payload());
return settings_payload_decoder_.ResumeDecodingPayload(&frame_decoder_state_,
db);
}
DecodeStatus Http2FrameDecoder::StartDecodingUnknownPayload(DecodeBuffer* db) {
return unknown_payload_decoder_.StartDecodingPayload(&frame_decoder_state_,
db);
}
DecodeStatus Http2FrameDecoder::ResumeDecodingUnknownPayload(DecodeBuffer* db) {
QUICHE_DCHECK_EQ(frame_decoder_state_.remaining_total_payload(),
frame_decoder_state_.remaining_payload());
return unknown_payload_decoder_.ResumeDecodingPayload(&frame_decoder_state_,
db);
}
DecodeStatus Http2FrameDecoder::StartDecodingWindowUpdatePayload(
DecodeBuffer* db) {
ClearFlags();
return window_update_payload_decoder_.StartDecodingPayload(
&frame_decoder_state_, db);
}
DecodeStatus Http2FrameDecoder::ResumeDecodingWindowUpdatePayload(
DecodeBuffer* db) {
QUICHE_DCHECK_EQ(frame_decoder_state_.remaining_total_payload(),
frame_decoder_state_.remaining_payload());
return window_update_payload_decoder_.ResumeDecodingPayload(
&frame_decoder_state_, db);
}
DecodeStatus Http2FrameDecoder::DiscardPayload(DecodeBuffer* db) {
QUICHE_DVLOG(2) << "remaining_payload="
<< frame_decoder_state_.remaining_payload_
<< "; remaining_padding="
<< frame_decoder_state_.remaining_padding_;
frame_decoder_state_.remaining_payload_ +=
frame_decoder_state_.remaining_padding_;
frame_decoder_state_.remaining_padding_ = 0;
const size_t avail = frame_decoder_state_.AvailablePayload(db);
QUICHE_DVLOG(2) << "avail=" << avail;
if (avail > 0) {
frame_decoder_state_.ConsumePayload(avail);
db->AdvanceCursor(avail);
}
if (frame_decoder_state_.remaining_payload_ == 0) {
state_ = State::kStartDecodingHeader;
return DecodeStatus::kDecodeDone;
}
return DecodeStatus::kDecodeInProgress;
}
} | #include "quiche/http2/decoder/http2_frame_decoder.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/test_tools/frame_parts.h"
#include "quiche/http2/test_tools/frame_parts_collector_listener.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/http2/test_tools/verify_macros.h"
#include "quiche/common/platform/api/quiche_logging.h"
using ::testing::AssertionSuccess;
namespace http2 {
namespace test {
class Http2FrameDecoderPeer {
public:
static size_t remaining_total_payload(Http2FrameDecoder* decoder) {
return decoder->frame_decoder_state_.remaining_total_payload();
}
};
namespace {
class Http2FrameDecoderTest : public RandomDecoderTest {
protected:
DecodeStatus StartDecoding(DecodeBuffer* db) override {
QUICHE_DVLOG(2) << "StartDecoding, db->Remaining=" << db->Remaining();
collector_.Reset();
PrepareDecoder();
DecodeStatus status = decoder_->DecodeFrame(db);
if (status != DecodeStatus::kDecodeInProgress) {
++fast_decode_count_;
if (status == DecodeStatus::kDecodeError) {
ConfirmDiscardsRemainingPayload();
}
}
return status;
}
DecodeStatus ResumeDecoding(DecodeBuffer* db) override {
QUICHE_DVLOG(2) << "ResumeDecoding, db->Remaining=" << db->Remaining();
DecodeStatus status = decoder_->DecodeFrame(db);
if (status != DecodeStatus::kDecodeInProgress) {
++slow_decode_count_;
if (status == DecodeStatus::kDecodeError) {
ConfirmDiscardsRemainingPayload();
}
}
return status;
}
void ConfirmDiscardsRemainingPayload() {
ASSERT_TRUE(decoder_->IsDiscardingPayload());
size_t remaining =
Http2FrameDecoderPeer::remaining_total_payload(decoder_.get());
size_t extra = 10;
std::string junk(remaining + extra, '0');
DecodeBuffer tmp(junk);
EXPECT_EQ(DecodeStatus::kDecodeDone, decoder_->DecodeFrame(&tmp));
EXPECT_EQ(remaining, tmp.Offset());
EXPECT_EQ(extra, tmp.Remaining());
EXPECT_FALSE(decoder_->IsDiscardingPayload());
}
void PrepareDecoder() {
decoder_ = std::make_unique<Http2FrameDecoder>(&collector_);
decoder_->set_maximum_payload_size(maximum_payload_size_);
}
void ResetDecodeSpeedCounters() {
fast_decode_count_ = 0;
slow_decode_count_ = 0;
}
AssertionResult VerifyCollected(const FrameParts& expected) {
HTTP2_VERIFY_FALSE(collector_.IsInProgress());
HTTP2_VERIFY_EQ(1u, collector_.size());
return expected.VerifyEquals(*collector_.frame(0));
}
AssertionResult DecodePayloadAndValidateSeveralWays(absl::string_view payload,
Validator validator) {
DecodeBuffer db(payload);
bool start_decoding_requires_non_empty = false;
return DecodeAndValidateSeveralWays(&db, start_decoding_requires_non_empty,
validator);
}
AssertionResult DecodePayloadAndValidateSeveralWays(
absl::string_view payload, const FrameParts& expected) {
auto validator = [&expected, this](const DecodeBuffer& ,
DecodeStatus status) -> AssertionResult {
HTTP2_VERIFY_EQ(status, DecodeStatus::kDecodeDone);
return VerifyCollected(expected);
};
ResetDecodeSpeedCounters();
HTTP2_VERIFY_SUCCESS(DecodePayloadAndValidateSeveralWays(
payload, ValidateDoneAndEmpty(validator)));
HTTP2_VERIFY_GT(fast_decode_count_, 0u);
HTTP2_VERIFY_GT(slow_decode_count_, 0u);
std::string next_frame = Random().RandString(10);
std::string input(payload.data(), payload.size());
input += next_frame;
ResetDecodeSpeedCounters();
HTTP2_VERIFY_SUCCESS(DecodePayloadAndValidateSeveralWays(
payload, ValidateDoneAndOffset(payload.size(), validator)));
HTTP2_VERIFY_GT(fast_decode_count_, 0u);
HTTP2_VERIFY_GT(slow_decode_count_, 0u);
return AssertionSuccess();
}
template <size_t N>
AssertionResult DecodePayloadAndValidateSeveralWays(
const char (&buf)[N], const FrameParts& expected) {
return DecodePayloadAndValidateSeveralWays(absl::string_view(buf, N),
expected);
}
template <size_t N>
AssertionResult DecodePayloadAndValidateSeveralWays(
const char (&buf)[N], const Http2FrameHeader& header) {
return DecodePayloadAndValidateSeveralWays(absl::string_view(buf, N),
FrameParts(header));
}
template <size_t N>
AssertionResult DecodePayloadExpectingError(const char (&buf)[N],
const FrameParts& expected) {
auto validator = [&expected, this](const DecodeBuffer& ,
DecodeStatus status) -> AssertionResult {
HTTP2_VERIFY_EQ(status, DecodeStatus::kDecodeError);
return VerifyCollected(expected);
};
ResetDecodeSpeedCounters();
EXPECT_TRUE(
DecodePayloadAndValidateSeveralWays(ToStringPiece(buf), validator));
EXPECT_GT(fast_decode_count_, 0u);
EXPECT_GT(slow_decode_count_, 0u);
return AssertionSuccess();
}
template <size_t N>
AssertionResult DecodePayloadExpectingFrameSizeError(const char (&buf)[N],
FrameParts expected) {
expected.SetHasFrameSizeError(true);
return DecodePayloadExpectingError(buf, expected);
}
template <size_t N>
AssertionResult DecodePayloadExpectingFrameSizeError(
const char (&buf)[N], const Http2FrameHeader& header) {
return DecodePayloadExpectingFrameSizeError(buf, FrameParts(header));
}
size_t fast_decode_count_ = 0;
size_t slow_decode_count_ = 0;
uint32_t maximum_payload_size_ = Http2SettingsInfo::DefaultMaxFrameSize();
FramePartsCollectorListener collector_;
std::unique_ptr<Http2FrameDecoder> decoder_;
};
TEST_F(Http2FrameDecoderTest, DataEmpty) {
const char kFrameData[] = {
'\x00', '\x00', '\x00',
'\x00',
'\x00',
'\x00', '\x00', '\x00',
'\x00',
};
Http2FrameHeader header(0, Http2FrameType::DATA, 0, 0);
FrameParts expected(header, "");
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, HeadersEmpty) {
const char kFrameData[] = {
'\x00', '\x00', '\x00',
'\x01',
'\x00',
'\x00', '\x00', '\x00', '\x01',
};
Http2FrameHeader header(0, Http2FrameType::HEADERS, 0, 1);
FrameParts expected(header, "");
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, Priority) {
const char kFrameData[] = {
'\x00', '\x00', '\x05',
'\x02',
'\x00',
'\x00', '\x00', '\x00', '\x02',
'\x80', '\x00', '\x00', '\x01',
'\x10',
};
Http2FrameHeader header(5, Http2FrameType::PRIORITY, 0, 2);
FrameParts expected(header);
expected.SetOptPriority(Http2PriorityFields(1, 17, true));
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, RstStream) {
const char kFrameData[] = {
'\x00', '\x00', '\x04',
'\x03',
'\x00',
'\x00', '\x00', '\x00', '\x01',
'\x00', '\x00', '\x00', '\x01',
};
Http2FrameHeader header(4, Http2FrameType::RST_STREAM, 0, 1);
FrameParts expected(header);
expected.SetOptRstStreamErrorCode(Http2ErrorCode::PROTOCOL_ERROR);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, SettingsEmpty) {
const char kFrameData[] = {
'\x00', '\x00', '\x00',
'\x04',
'\x00',
'\x00', '\x00', '\x00', '\x01',
};
Http2FrameHeader header(0, Http2FrameType::SETTINGS, 0, 1);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, header));
}
TEST_F(Http2FrameDecoderTest, SettingsAck) {
const char kFrameData[] = {
'\x00', '\x00', '\x00',
'\x04',
'\x01',
'\x00', '\x00', '\x00', '\x00',
};
Http2FrameHeader header(0, Http2FrameType::SETTINGS, Http2FrameFlag::ACK, 0);
FrameParts expected(header);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, PushPromiseMinimal) {
const char kFrameData[] = {
'\x00', '\x00', '\x04',
'\x05',
'\x04',
'\x00', '\x00', '\x00',
'\x02',
'\x00', '\x00', '\x00',
'\x01',
};
Http2FrameHeader header(4, Http2FrameType::PUSH_PROMISE,
Http2FrameFlag::END_HEADERS, 2);
FrameParts expected(header, "");
expected.SetOptPushPromise(Http2PushPromiseFields{1});
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, Ping) {
const char kFrameData[] = {
'\x00', '\x00', '\x08',
'\x06',
'\xfe',
'\x00', '\x00', '\x00', '\x00',
's', 'o', 'm', 'e',
'd', 'a', 't', 'a',
};
Http2FrameHeader header(8, Http2FrameType::PING, 0, 0);
FrameParts expected(header);
expected.SetOptPing(
Http2PingFields{{'s', 'o', 'm', 'e', 'd', 'a', 't', 'a'}});
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, PingAck) {
const char kFrameData[] = {
'\x00', '\x00', '\x08',
'\x06',
'\xff',
'\x00', '\x00', '\x00', '\x00',
's', 'o', 'm', 'e',
'd', 'a', 't', 'a',
};
Http2FrameHeader header(8, Http2FrameType::PING, Http2FrameFlag::ACK, 0);
FrameParts expected(header);
expected.SetOptPing(
Http2PingFields{{'s', 'o', 'm', 'e', 'd', 'a', 't', 'a'}});
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, GoAwayMinimal) {
const char kFrameData[] = {
'\x00', '\x00', '\x08',
'\x07',
'\xff',
'\x00', '\x00', '\x00', '\x01',
'\x80', '\x00', '\x00', '\xff',
'\x00', '\x00', '\x00', '\x09',
};
Http2FrameHeader header(8, Http2FrameType::GOAWAY, 0, 1);
FrameParts expected(header);
expected.SetOptGoaway(
Http2GoAwayFields(255, Http2ErrorCode::COMPRESSION_ERROR));
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, WindowUpdate) {
const char kFrameData[] = {
'\x00', '\x00', '\x04',
'\x08',
'\x0f',
'\x00', '\x00', '\x00', '\x01',
'\x80', '\x00', '\x04', '\x00',
};
Http2FrameHeader header(4, Http2FrameType::WINDOW_UPDATE, 0, 1);
FrameParts expected(header);
expected.SetOptWindowUpdateIncrement(1024);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, ContinuationEmpty) {
const char kFrameData[] = {
'\x00', '\x00', '\x00',
'\x09',
'\x00',
'\x00', '\x00', '\x00',
'\x00',
};
Http2FrameHeader header(0, Http2FrameType::CONTINUATION, 0, 0);
FrameParts expected(header);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, AltSvcMinimal) {
const char kFrameData[] = {
'\x00', '\x00', '\x02',
'\x0a',
'\xff',
'\x00', '\x00', '\x00',
'\x00',
'\x00', '\x00',
};
Http2FrameHeader header(2, Http2FrameType::ALTSVC, 0, 0);
FrameParts expected(header);
expected.SetOptAltsvcOriginLength(0);
expected.SetOptAltsvcValueLength(0);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, UnknownEmpty) {
const char kFrameData[] = {
'\x00', '\x00', '\x00',
'\x20',
'\xff',
'\x00', '\x00', '\x00', '\x00',
};
Http2FrameHeader header(0, static_cast<Http2FrameType>(32), 0xff, 0);
FrameParts expected(header);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, DataPayload) {
const char kFrameData[] = {
'\x00', '\x00', '\x03',
'\x00',
'\x80',
'\x00', '\x00', '\x02', '\x02',
'a', 'b', 'c',
};
Http2FrameHeader header(3, Http2FrameType::DATA, 0, 514);
FrameParts expected(header, "abc");
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, HeadersPayload) {
const char kFrameData[] = {
'\x00', '\x00', '\x03',
'\x01',
'\x05',
'\x00', '\x00', '\x00', '\x02',
'a', 'b', 'c',
};
Http2FrameHeader header(
3, Http2FrameType::HEADERS,
Http2FrameFlag::END_STREAM | Http2FrameFlag::END_HEADERS, 2);
FrameParts expected(header, "abc");
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, HeadersPriority) {
const char kFrameData[] = {
'\x00', '\x00', '\x05',
'\x01',
'\x20',
'\x00', '\x00', '\x00', '\x02',
'\x00', '\x00', '\x00', '\x01',
'\xff',
};
Http2FrameHeader header(5, Http2FrameType::HEADERS, Http2FrameFlag::PRIORITY,
2);
FrameParts expected(header);
expected.SetOptPriority(Http2PriorityFields(1, 256, false));
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, Settings) {
const char kFrameData[] = {
'\x00', '\x00', '\x0c',
'\x04',
'\x00',
'\x00', '\x00', '\x00', '\x00',
'\x00', '\x04',
'\x0a', '\x0b', '\x0c', '\x0d',
'\x00', '\x02',
'\x00', '\x00', '\x00', '\x03',
};
Http2FrameHeader header(12, Http2FrameType::SETTINGS, 0, 0);
FrameParts expected(header);
expected.AppendSetting(Http2SettingFields(
Http2SettingsParameter::INITIAL_WINDOW_SIZE, 168496141));
expected.AppendSetting(
Http2SettingFields(Http2SettingsParameter::ENABLE_PUSH, 3));
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, PushPromisePayload) {
const char kFrameData[] = {
'\x00', '\x00', 7,
'\x05',
'\x04',
'\x00', '\x00', '\x00', '\xff',
'\x00', '\x00', '\x01', '\x00',
'a', 'b', 'c',
};
Http2FrameHeader header(7, Http2FrameType::PUSH_PROMISE,
Http2FrameFlag::END_HEADERS, 255);
FrameParts expected(header, "abc");
expected.SetOptPushPromise(Http2PushPromiseFields{256});
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, GoAwayOpaqueData) {
const char kFrameData[] = {
'\x00', '\x00', '\x0e',
'\x07',
'\xff',
'\x80', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x01', '\x00',
'\x00', '\x00', '\x00', '\x03',
'o', 'p', 'a', 'q', 'u', 'e',
};
Http2FrameHeader header(14, Http2FrameType::GOAWAY, 0, 0);
FrameParts expected(header, "opaque");
expected.SetOptGoaway(
Http2GoAwayFields(256, Http2ErrorCode::FLOW_CONTROL_ERROR));
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, ContinuationPayload) {
const char kFrameData[] = {
'\x00', '\x00', '\x03',
'\x09',
'\xff',
'\x00', '\x00', '\x00', '\x02',
'a', 'b', 'c',
};
Http2FrameHeader header(3, Http2FrameType::CONTINUATION,
Http2FrameFlag::END_HEADERS, 2);
FrameParts expected(header, "abc");
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, AltSvcPayload) {
const char kFrameData[] = {
'\x00', '\x00', '\x08',
'\x0a',
'\x00',
'\x00', '\x00', '\x00', '\x02',
'\x00', '\x03',
'a', 'b', 'c',
'd', 'e', 'f',
};
Http2FrameHeader header(8, Http2FrameType::ALTSVC, 0, 2);
FrameParts expected(header);
expected.SetAltSvcExpected("abc", "def");
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, PriorityUpdatePayload) {
const char kFrameData[] = {
'\x00', '\x00', '\x07',
'\x10',
'\x00',
'\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x05',
'a', 'b', 'c',
};
Http2FrameHeader header(7, Http2FrameType::PRIORITY_UPDATE, 0, 0);
FrameParts expected(header, "abc");
expected.SetOptPriorityUpdate(Http2PriorityUpdateFields{5});
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, UnknownPayload) {
const char kFrameData[] = {
'\x00', '\x00', '\x03',
'\x30',
'\x00',
'\x00', '\x00', '\x00', '\x02',
'a', 'b', 'c',
};
Http2FrameHeader header(3, static_cast<Http2FrameType>(48), 0, 2);
FrameParts expected(header, "abc");
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, DataPayloadAndPadding) {
const char kFrameData[] = {
'\x00', '\x00', '\x07',
'\x00',
'\x09',
'\x00', '\x00', '\x00', '\x02',
'\x03',
'a', 'b', 'c',
'\x00', '\x00', '\x00',
};
Http2FrameHeader header(7, Http2FrameType::DATA,
Http2FrameFlag::END_STREAM | Http2FrameFlag::PADDED,
2);
size_t total_pad_length = 4;
FrameParts expected(header, "abc", total_pad_length);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, HeadersPayloadAndPadding) {
const char kFrameData[] = {
'\x00', '\x00', '\x07',
'\x01',
'\x08',
'\x00', '\x00', '\x00', '\x02',
'\x03',
'a', 'b', 'c',
'\x00', '\x00', '\x00',
};
Http2FrameHeader header(7, Http2FrameType::HEADERS, Http2FrameFlag::PADDED,
2);
size_t total_pad_length = 4;
FrameParts expected(header, "abc", total_pad_length);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, HeadersPayloadPriorityAndPadding) {
const char kFrameData[] = {
'\x00', '\x00', '\x0c',
'\x01',
'\xff',
'\x00', '\x00', '\x00', '\x02',
'\x03',
'\x80', '\x00', '\x00', '\x01',
'\x10',
'a', 'b', 'c',
'\x00', '\x00', '\x00',
};
Http2FrameHeader header(12, Http2FrameType::HEADERS,
Http2FrameFlag::END_STREAM |
Http2FrameFlag::END_HEADERS |
Http2FrameFlag::PADDED | Http2FrameFlag::PRIORITY,
2);
size_t total_pad_length = 4;
FrameParts expected(header, "abc", total_pad_length);
expected.SetOptPriority(Http2PriorityFields(1, 17, true));
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, PushPromisePayloadAndPadding) {
const char kFrameData[] = {
'\x00', '\x00', 11,
'\x05',
'\xff',
'\x00', '\x00', '\x00', '\x01',
'\x03',
'\x00', '\x00', '\x00', '\x02',
'a', 'b', 'c',
'\x00', '\x00', '\x00',
};
Http2FrameHeader header(11, Http2FrameType::PUSH_PROMISE,
Http2FrameFlag::END_HEADERS | Http2FrameFlag::PADDED,
1);
size_t total_pad_length = 4;
FrameParts expected(header, "abc", total_pad_length);
expected.SetOptPushPromise(Http2PushPromiseFields{2});
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, DataMissingPadLengthField) {
const char kFrameData[] = {
'\x00', '\x00', '\x00',
'\x00',
'\x08',
'\x00', '\x00', '\x00', '\x01',
};
Http2FrameHeader header(0, Http2FrameType::DATA, Http2FrameFlag::PADDED, 1);
FrameParts expected(header);
expected.SetOptMissingLength(1);
EXPECT_TRUE(DecodePayloadExpectingError(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, HeaderPaddingTooLong) {
const char kFrameData[] = {
'\x00', '\x00', '\x02',
'\x01',
'\x08',
'\x00', '\x01', '\x00', '\x00',
'\xff',
'\x00',
};
Http2FrameHeader header(2, Http2FrameType::HEADERS, Http2FrameFlag::PADDED,
65536);
FrameParts expected(header);
expected.SetOptMissingLength(254);
EXPECT_TRUE(DecodePayloadExpectingError(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, HeaderMissingPriority) {
const char kFrameData[] = {
'\x00', '\x00', '\x04',
'\x01',
'\x20',
'\x00', '\x01', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00',
};
Http2FrameHeader header(4, Http2FrameType::HEADERS, Http2FrameFlag::PRIORITY,
65536);
EXPECT_TRUE(DecodePayloadExpectingFrameSizeError(kFrameData, header));
}
TEST_F(Http2FrameDecoderTest, PriorityTooShort) {
const char kFrameData[] = {
'\x00', '\x00', '\x04',
'\x02',
'\x00',
'\x00', '\x00', '\x00', '\x02',
'\x80', '\x00', '\x00', '\x01',
};
Http2FrameHeader header(4, Http2FrameType::PRIORITY, 0, 2);
EXPECT_TRUE(DecodePayloadExpectingFrameSizeError(kFrameData, header));
}
TEST_F(Http2FrameDecoderTest, RstStreamTooShort) {
const char kFrameData[] = {
'\x00', '\x00', '\x03',
'\x03',
'\x00',
'\x00', '\x00', '\x00', '\x01',
'\x00', '\x00', '\x00',
};
Http2FrameHeader header(3, Http2FrameType::RST_STREAM, 0, 1);
EXPECT_TRUE(DecodePayloadExpectingFrameSizeError(kFrameData, header));
}
TEST_F(Http2FrameDecoderTest, SettingsWrongSize) {
const char kFrameData[] = {
'\x00', '\x00', '\x09',
'\x04',
'\x00',
'\x00', '\x00', '\x00', '\x00',
'\x00', '\x02',
'\x00', '\x00', '\x00', '\x03',
'\x00', '\x04',
'\x00',
};
Http2FrameHeader header(9, Http2FrameType::SETTINGS, 0, 0);
FrameParts expected(header);
expected.AppendSetting(
Http2SettingFields(Http2SettingsParameter::ENABLE_PUSH, 3));
EXPECT_TRUE(DecodePayloadExpectingFrameSizeError(kFrameData, expected));
}
TEST_F(Http2FrameDecoderTest, PushPromiseTooShort) {
const char kFrameData[] = {
'\x00', '\x00', 3,
'\x05',
'\x00',
'\x00', '\x00', '\x00', '\x01',
'\x00', '\x00', '\x00',
};
Http2FrameHeader header(3, Http2FrameType::PUSH_PROMISE, 0, 1);
EXPECT_TRUE(DecodePayloadExpectingFrameSizeError(kFrameData, header));
}
TEST_F(Http2FrameDecoderTest, PushPromisePaddedTruncatedPromise) {
const char kFrameData[] = {
'\x00', '\x00', 4,
'\x05',
'\x08',
'\x00', '\x00', '\x00', '\x01',
'\x00',
'\x00', '\x00', '\x00',
};
Http2FrameHeader header(4, Http2FrameType::PUSH_PROMISE,
Http2FrameFlag::PADDED, 1);
EXPECT_TRUE(DecodePayloadExpectingFrameSizeError(kFrameData, header));
}
TEST_F(Http2FrameDecoderTest, PingTooShort) {
const char kFrameData[] = {
'\x00', '\x00', '\x07',
'\x06',
'\xfe',
'\x00', '\x00', '\x00', '\x00',
's', 'o', 'm', 'e',
'd', 'a', 't',
};
Http2FrameHeader header(7, Http2FrameType::PING, 0, 0);
EXPECT_TRUE(DecodePayloadExpectingFrameSizeError(kFrameData, header));
}
TEST_F(Http2FrameDecoderTest, GoAwayTooShort) {
const char kFrameData[] = {
'\x00', '\x00', '\x00',
'\x07',
'\xff',
'\x00', '\x00', '\x00', '\x00',
};
Http2FrameHeader header(0, Http2FrameType::GOAWAY, 0, 0);
EXPECT_TRUE(DecodePayloadExpectingFrameSizeError(kFrameData, header));
}
TEST_F(Http2FrameDecoderTest, WindowUpdateTooShort) {
const char kFrameData[] = {
'\x00', '\x00', '\x03',
'\x08',
'\x0f',
'\x00', '\x00', '\x00', '\x01',
'\x80', '\x00', '\x04',
};
Http2FrameHeader header(3, Http2FrameType::WINDOW_UPDATE, 0, 1);
EXPECT_TRUE(DecodePayloadExpectingFrameSizeError(kFrameData, header));
}
TEST_F(Http2FrameDecoderTest, AltSvcTruncatedOriginLength) {
const char kFrameData[] = {
'\x00', '\x00', '\x01',
'\x0a',
'\x00',
'\x00', '\x00', '\x00', '\x02',
'\x00',
};
Http2FrameHeader header(1, Http2FrameType::ALTSVC, 0, 2);
EXPECT_TRUE(DecodePayloadExpectingFrameSizeError(kFrameData, header));
}
TEST_F(Http2FrameDecoderTest, AltSvcTruncatedOrigin) {
const char kFrameData[] = {
'\x00', '\x00', '\x05',
'\x0a',
'\x00',
'\x00', '\x00', '\x00', '\x02',
'\x00', '\x04',
'a', 'b', 'c',
};
Http2FrameHeader header(5, Http2FrameType::ALTSVC, 0, 2);
EXPECT_TRUE(DecodePayloadExpectingFrameSizeError(kFrameData, header));
}
TEST_F(Http2FrameDecoderTest, BeyondMaximum) {
maximum_payload_size_ = 2;
const char kFrameData[] = {
'\x00', '\x00', '\x07',
'\x00',
'\x09',
'\x00', '\x00', '\x00', '\x02',
'\x03',
'a', 'b', 'c',
'\x00', '\x00', '\x00',
};
Http2FrameHeader header(7, Http2FrameType::DATA,
Http2FrameFlag::END_STREAM | Http2FrameFlag::PADDED,
2);
FrameParts expected(header);
expected.SetHasFrameSizeError(true);
auto validator = [&expected, this](const DecodeBuffer& input,
DecodeStatus status) -> AssertionResult {
HTTP2_VERIFY_EQ(status, DecodeStatus::kDecodeError);
HTTP2_VERIFY_EQ(input.Offset(), Http2FrameHeader::EncodedSize());
return VerifyCollected(expected);
};
ResetDecodeSpeedCounters();
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(ToStringPiece(kFrameData),
validator));
EXPECT_GT(fast_decode_count_, 0u);
EXPECT_GT(slow_decode_count_, 0u);
}
TEST_F(Http2FrameDecoderTest, PriorityTooLong) {
const char kFrameData[] = {
'\x00', '\x00', '\x06',
'\x02',
'\x00',
'\x00', '\x00', '\x00', '\x02',
'\x80', '\x00', '\x00', '\x01',
'\x10',
'\x00',
};
Http2FrameHeader header(6, Http2FrameType::PRIORITY, 0, 2);
EXPECT_TRUE(DecodePayloadExpectingFrameSizeError(kFrameData, header));
}
TEST_F(Http2FrameDecoderTest, RstStreamTooLong) {
const char kFrameData[] = {
'\x00', '\x00', '\x05',
'\x03',
'\x00',
'\x00', '\x00', '\x00', '\x01',
'\x00', '\x00', '\x00', '\x01',
'\x00',
};
Http2FrameHeader header(5, Http2FrameType::RST_STREAM, 0, 1);
EXPECT_TRUE(DecodePayloadExpectingFrameSizeError(kFrameData, header));
}
TEST_F(Http2FrameDecoderTest, SettingsAckTooLong) {
const char kFrameData[] = {
'\x00', '\x00', '\x06',
'\x04',
'\x01',
'\x00', '\x00', '\x00', '\x00',
'\x00', '\x00',
'\x00', '\x00', '\x00', '\x00',
};
Http2FrameHeader header(6, Http2FrameType::SETTINGS, Http2FrameFlag::ACK, 0);
EXPECT_TRUE(DecodePayloadExpectingFrameSizeError(kFrameData, header));
}
TEST_F(Http2FrameDecoderTest, PingAckTooLong) {
const char kFrameData[] = {
'\x00', '\x00', '\x09',
'\x06',
'\xff',
'\x00', '\x00', '\x00', '\x00',
's', 'o', 'm', 'e',
'd', 'a', 't', 'a',
'\x00',
};
Http2FrameHeader header(9, Http2FrameType::PING, Http2FrameFlag::ACK, 0);
EXPECT_TRUE(DecodePayloadExpectingFrameSizeError(kFrameData, header));
}
TEST_F(Http2FrameDecoderTest, WindowUpdateTooLong) {
const char kFrameData[] = {
'\x00', '\x00', '\x05',
'\x08',
'\x0f',
'\x00', '\x00', '\x00', '\x01',
'\x80', '\x00', '\x04', '\x00',
'\x00',
};
Http2FrameHeader header(5, Http2FrameType::WINDOW_UPDATE, 0, 1);
EXPECT_TRUE(DecodePayloadExpectingFrameSizeError(kFrameData, header));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/http2_frame_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/http2_frame_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
23e3a29e-ec56-4902-a008-341d667cbec3 | cpp | google/quiche | connect_udp_tunnel | quiche/quic/tools/connect_udp_tunnel.cc | quiche/quic/tools/connect_udp_tunnel_test.cc | #include "quiche/quic/tools/connect_udp_tunnel.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_server_id.h"
#include "quiche/quic/core/socket_factory.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/tools/quic_backend_response.h"
#include "quiche/quic/tools/quic_name_lookup.h"
#include "quiche/quic/tools/quic_simple_server_backend.h"
#include "quiche/common/http/http_header_block.h"
#include "quiche/common/masque/connect_udp_datagram_payload.h"
#include "quiche/common/platform/api/quiche_googleurl.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_mem_slice.h"
#include "quiche/common/platform/api/quiche_url_utils.h"
#include "quiche/common/structured_headers.h"
namespace quic {
namespace structured_headers = quiche::structured_headers;
namespace {
constexpr size_t kReadSize = 4 * 1024;
std::optional<QuicServerId> ValidateAndParseTargetFromPath(
absl::string_view path) {
std::string canonicalized_path_str;
url::StdStringCanonOutput canon_output(&canonicalized_path_str);
url::Component path_component;
url::CanonicalizePath(path.data(), url::Component(0, path.size()),
&canon_output, &path_component);
if (!path_component.is_nonempty()) {
QUICHE_DVLOG(1) << "CONNECT-UDP request with non-canonicalizable path: "
<< path;
return std::nullopt;
}
canon_output.Complete();
absl::string_view canonicalized_path =
absl::string_view(canonicalized_path_str)
.substr(path_component.begin, path_component.len);
std::vector<absl::string_view> path_split =
absl::StrSplit(canonicalized_path, '/');
if (path_split.size() != 7 || !path_split[0].empty() ||
path_split[1] != ".well-known" || path_split[2] != "masque" ||
path_split[3] != "udp" || path_split[4].empty() ||
path_split[5].empty() || !path_split[6].empty()) {
QUICHE_DVLOG(1) << "CONNECT-UDP request with bad path: "
<< canonicalized_path;
return std::nullopt;
}
std::optional<std::string> decoded_host =
quiche::AsciiUrlDecode(path_split[4]);
if (!decoded_host.has_value()) {
QUICHE_DVLOG(1) << "CONNECT-UDP request with undecodable host: "
<< path_split[4];
return std::nullopt;
}
QUICHE_DCHECK(!decoded_host->empty());
std::optional<std::string> decoded_port =
quiche::AsciiUrlDecode(path_split[5]);
if (!decoded_port.has_value()) {
QUICHE_DVLOG(1) << "CONNECT-UDP request with undecodable port: "
<< path_split[5];
return std::nullopt;
}
QUICHE_DCHECK(!decoded_port->empty());
int parsed_port_number = url::ParsePort(
decoded_port->data(), url::Component(0, decoded_port->size()));
if (parsed_port_number <= 0) {
QUICHE_DVLOG(1) << "CONNECT-UDP request with bad port: " << *decoded_port;
return std::nullopt;
}
QUICHE_DCHECK_LE(parsed_port_number, std::numeric_limits<uint16_t>::max());
return QuicServerId(*decoded_host, static_cast<uint16_t>(parsed_port_number));
}
std::optional<QuicServerId> ValidateHeadersAndGetTarget(
const quiche::HttpHeaderBlock& request_headers) {
QUICHE_DCHECK(request_headers.contains(":method"));
QUICHE_DCHECK(request_headers.find(":method")->second == "CONNECT");
QUICHE_DCHECK(request_headers.contains(":protocol"));
QUICHE_DCHECK(request_headers.find(":protocol")->second == "connect-udp");
auto authority_it = request_headers.find(":authority");
if (authority_it == request_headers.end() || authority_it->second.empty()) {
QUICHE_DVLOG(1) << "CONNECT-UDP request missing authority";
return std::nullopt;
}
auto scheme_it = request_headers.find(":scheme");
if (scheme_it == request_headers.end() || scheme_it->second.empty()) {
QUICHE_DVLOG(1) << "CONNECT-UDP request missing scheme";
return std::nullopt;
} else if (scheme_it->second != "https") {
QUICHE_DVLOG(1) << "CONNECT-UDP request contains unexpected scheme: "
<< scheme_it->second;
return std::nullopt;
}
auto path_it = request_headers.find(":path");
if (path_it == request_headers.end() || path_it->second.empty()) {
QUICHE_DVLOG(1) << "CONNECT-UDP request missing path";
return std::nullopt;
}
std::optional<QuicServerId> target_server_id =
ValidateAndParseTargetFromPath(path_it->second);
return target_server_id;
}
bool ValidateTarget(
const QuicServerId& target,
const absl::flat_hash_set<QuicServerId>& acceptable_targets) {
if (acceptable_targets.contains(target)) {
return true;
}
QUICHE_DVLOG(1)
<< "CONNECT-UDP request target is not an acceptable allow-listed target: "
<< target.ToHostPortString();
return false;
}
}
ConnectUdpTunnel::ConnectUdpTunnel(
QuicSimpleServerBackend::RequestHandler* client_stream_request_handler,
SocketFactory* socket_factory, std::string server_label,
absl::flat_hash_set<QuicServerId> acceptable_targets)
: acceptable_targets_(std::move(acceptable_targets)),
socket_factory_(socket_factory),
server_label_(std::move(server_label)),
client_stream_request_handler_(client_stream_request_handler) {
QUICHE_DCHECK(client_stream_request_handler_);
QUICHE_DCHECK(socket_factory_);
QUICHE_DCHECK(!server_label_.empty());
}
ConnectUdpTunnel::~ConnectUdpTunnel() {
QUICHE_DCHECK(!IsTunnelOpenToTarget());
QUICHE_DCHECK(!receive_started_);
QUICHE_DCHECK(!datagram_visitor_registered_);
}
void ConnectUdpTunnel::OpenTunnel(
const quiche::HttpHeaderBlock& request_headers) {
QUICHE_DCHECK(!IsTunnelOpenToTarget());
std::optional<QuicServerId> target =
ValidateHeadersAndGetTarget(request_headers);
if (!target.has_value()) {
TerminateClientStream(
"invalid request headers",
QuicResetStreamError::FromIetf(QuicHttp3ErrorCode::MESSAGE_ERROR));
return;
}
if (!ValidateTarget(*target, acceptable_targets_)) {
SendErrorResponse("403", "destination_ip_prohibited",
"disallowed proxy target");
return;
}
QuicSocketAddress address = tools::LookupAddress(AF_UNSPEC, *target);
if (!address.IsInitialized()) {
SendErrorResponse("500", "dns_error", "host resolution error");
return;
}
target_socket_ = socket_factory_->CreateConnectingUdpClientSocket(
address,
0,
0,
this);
QUICHE_DCHECK(target_socket_);
absl::Status connect_result = target_socket_->ConnectBlocking();
if (!connect_result.ok()) {
SendErrorResponse(
"502", "destination_ip_unroutable",
absl::StrCat("UDP socket error: ", connect_result.ToString()));
return;
}
QUICHE_DVLOG(1) << "CONNECT-UDP tunnel opened from stream "
<< client_stream_request_handler_->stream_id() << " to "
<< target->ToHostPortString();
client_stream_request_handler_->GetStream()->RegisterHttp3DatagramVisitor(
this);
datagram_visitor_registered_ = true;
SendConnectResponse();
BeginAsyncReadFromTarget();
}
bool ConnectUdpTunnel::IsTunnelOpenToTarget() const { return !!target_socket_; }
void ConnectUdpTunnel::OnClientStreamClose() {
QUICHE_CHECK(client_stream_request_handler_);
QUICHE_DVLOG(1) << "CONNECT-UDP stream "
<< client_stream_request_handler_->stream_id() << " closed";
if (datagram_visitor_registered_) {
client_stream_request_handler_->GetStream()
->UnregisterHttp3DatagramVisitor();
datagram_visitor_registered_ = false;
}
client_stream_request_handler_ = nullptr;
if (IsTunnelOpenToTarget()) {
target_socket_->Disconnect();
}
target_socket_.reset();
}
void ConnectUdpTunnel::ConnectComplete(absl::Status ) {
QUICHE_NOTREACHED();
}
void ConnectUdpTunnel::ReceiveComplete(
absl::StatusOr<quiche::QuicheMemSlice> data) {
QUICHE_DCHECK(IsTunnelOpenToTarget());
QUICHE_DCHECK(receive_started_);
receive_started_ = false;
if (!data.ok()) {
if (client_stream_request_handler_) {
QUICHE_LOG(WARNING) << "Error receiving CONNECT-UDP data from target: "
<< data.status();
} else {
QUICHE_DVLOG(1) << "Error receiving CONNECT-UDP data from target after "
"stream already closed.";
}
return;
}
QUICHE_DCHECK(client_stream_request_handler_);
quiche::ConnectUdpDatagramUdpPacketPayload payload(data->AsStringView());
client_stream_request_handler_->GetStream()->SendHttp3Datagram(
payload.Serialize());
BeginAsyncReadFromTarget();
}
void ConnectUdpTunnel::SendComplete(absl::Status ) {
QUICHE_NOTREACHED();
}
void ConnectUdpTunnel::OnHttp3Datagram(QuicStreamId stream_id,
absl::string_view payload) {
QUICHE_DCHECK(IsTunnelOpenToTarget());
QUICHE_DCHECK_EQ(stream_id, client_stream_request_handler_->stream_id());
QUICHE_DCHECK(!payload.empty());
std::unique_ptr<quiche::ConnectUdpDatagramPayload> parsed_payload =
quiche::ConnectUdpDatagramPayload::Parse(payload);
if (!parsed_payload) {
QUICHE_DVLOG(1) << "Ignoring HTTP Datagram payload, due to inability to "
"parse as CONNECT-UDP payload.";
return;
}
switch (parsed_payload->GetType()) {
case quiche::ConnectUdpDatagramPayload::Type::kUdpPacket:
SendUdpPacketToTarget(parsed_payload->GetUdpProxyingPayload());
break;
case quiche::ConnectUdpDatagramPayload::Type::kUnknown:
QUICHE_DVLOG(1)
<< "Ignoring HTTP Datagram payload with unrecognized context ID.";
}
}
void ConnectUdpTunnel::BeginAsyncReadFromTarget() {
QUICHE_DCHECK(IsTunnelOpenToTarget());
QUICHE_DCHECK(client_stream_request_handler_);
QUICHE_DCHECK(!receive_started_);
receive_started_ = true;
target_socket_->ReceiveAsync(kReadSize);
}
void ConnectUdpTunnel::SendUdpPacketToTarget(absl::string_view packet) {
absl::Status send_result = target_socket_->SendBlocking(std::string(packet));
if (!send_result.ok()) {
QUICHE_LOG(WARNING) << "Error sending CONNECT-UDP datagram to target: "
<< send_result;
}
}
void ConnectUdpTunnel::SendConnectResponse() {
QUICHE_DCHECK(IsTunnelOpenToTarget());
QUICHE_DCHECK(client_stream_request_handler_);
quiche::HttpHeaderBlock response_headers;
response_headers[":status"] = "200";
std::optional<std::string> capsule_protocol_value =
structured_headers::SerializeItem(structured_headers::Item(true));
QUICHE_CHECK(capsule_protocol_value.has_value());
response_headers["Capsule-Protocol"] = *capsule_protocol_value;
QuicBackendResponse response;
response.set_headers(std::move(response_headers));
response.set_response_type(QuicBackendResponse::INCOMPLETE_RESPONSE);
client_stream_request_handler_->OnResponseBackendComplete(&response);
}
void ConnectUdpTunnel::SendErrorResponse(absl::string_view status,
absl::string_view proxy_status_error,
absl::string_view error_details) {
QUICHE_DCHECK(!status.empty());
QUICHE_DCHECK(!proxy_status_error.empty());
QUICHE_DCHECK(!error_details.empty());
QUICHE_DCHECK(client_stream_request_handler_);
#ifndef NDEBUG
int status_num = 0;
bool is_num = absl::SimpleAtoi(status, &status_num);
QUICHE_DCHECK(is_num);
QUICHE_DCHECK_GE(status_num, 100);
QUICHE_DCHECK_LT(status_num, 600);
QUICHE_DCHECK(status_num < 200 || status_num >= 300);
#endif
quiche::HttpHeaderBlock headers;
headers[":status"] = status;
structured_headers::Item proxy_status_item(server_label_);
structured_headers::Item proxy_status_error_item(
std::string{proxy_status_error});
structured_headers::Item proxy_status_details_item(
std::string{error_details});
structured_headers::ParameterizedMember proxy_status_member(
std::move(proxy_status_item),
{{"error", std::move(proxy_status_error_item)},
{"details", std::move(proxy_status_details_item)}});
std::optional<std::string> proxy_status_value =
structured_headers::SerializeList({proxy_status_member});
QUICHE_CHECK(proxy_status_value.has_value());
headers["Proxy-Status"] = *proxy_status_value;
QuicBackendResponse response;
response.set_headers(std::move(headers));
client_stream_request_handler_->OnResponseBackendComplete(&response);
}
void ConnectUdpTunnel::TerminateClientStream(
absl::string_view error_description, QuicResetStreamError error_code) {
QUICHE_DCHECK(client_stream_request_handler_);
std::string error_description_str =
error_description.empty() ? ""
: absl::StrCat(" due to ", error_description);
QUICHE_DVLOG(1) << "Terminating CONNECT stream "
<< client_stream_request_handler_->stream_id()
<< " with error code " << error_code.ietf_application_code()
<< error_description_str;
client_stream_request_handler_->TerminateStreamWithError(error_code);
}
} | #include "quiche/quic/tools/connect_udp_tunnel.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/connecting_client_socket.h"
#include "quiche/quic/core/http/quic_spdy_stream.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/socket_factory.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/platform/api/quic_test_loopback.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/quic/tools/quic_simple_server_backend.h"
#include "quiche/common/masque/connect_udp_datagram_payload.h"
#include "quiche/common/platform/api/quiche_googleurl.h"
#include "quiche/common/platform/api/quiche_mem_slice.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/platform/api/quiche_url_utils.h"
namespace quic::test {
namespace {
using ::testing::_;
using ::testing::AnyOf;
using ::testing::Eq;
using ::testing::Ge;
using ::testing::Gt;
using ::testing::HasSubstr;
using ::testing::InvokeWithoutArgs;
using ::testing::IsEmpty;
using ::testing::Matcher;
using ::testing::NiceMock;
using ::testing::Pair;
using ::testing::Property;
using ::testing::Return;
using ::testing::StrictMock;
using ::testing::UnorderedElementsAre;
constexpr QuicStreamId kStreamId = 100;
class MockStream : public QuicSpdyStream {
public:
explicit MockStream(QuicSpdySession* spdy_session)
: QuicSpdyStream(kStreamId, spdy_session, BIDIRECTIONAL) {}
void OnBodyAvailable() override {}
MOCK_METHOD(MessageStatus, SendHttp3Datagram, (absl::string_view data),
(override));
};
class MockRequestHandler : public QuicSimpleServerBackend::RequestHandler {
public:
QuicConnectionId connection_id() const override {
return TestConnectionId(41212);
}
QuicStreamId stream_id() const override { return kStreamId; }
std::string peer_host() const override { return "127.0.0.1"; }
MOCK_METHOD(QuicSpdyStream*, GetStream, (), (override));
MOCK_METHOD(void, OnResponseBackendComplete,
(const QuicBackendResponse* response), (override));
MOCK_METHOD(void, SendStreamData, (absl::string_view data, bool close_stream),
(override));
MOCK_METHOD(void, TerminateStreamWithError, (QuicResetStreamError error),
(override));
};
class MockSocketFactory : public SocketFactory {
public:
MOCK_METHOD(std::unique_ptr<ConnectingClientSocket>, CreateTcpClientSocket,
(const QuicSocketAddress& peer_address,
QuicByteCount receive_buffer_size,
QuicByteCount send_buffer_size,
ConnectingClientSocket::AsyncVisitor* async_visitor),
(override));
MOCK_METHOD(std::unique_ptr<ConnectingClientSocket>,
CreateConnectingUdpClientSocket,
(const QuicSocketAddress& peer_address,
QuicByteCount receive_buffer_size,
QuicByteCount send_buffer_size,
ConnectingClientSocket::AsyncVisitor* async_visitor),
(override));
};
class MockSocket : public ConnectingClientSocket {
public:
MOCK_METHOD(absl::Status, ConnectBlocking, (), (override));
MOCK_METHOD(void, ConnectAsync, (), (override));
MOCK_METHOD(void, Disconnect, (), (override));
MOCK_METHOD(absl::StatusOr<QuicSocketAddress>, GetLocalAddress, (),
(override));
MOCK_METHOD(absl::StatusOr<quiche::QuicheMemSlice>, ReceiveBlocking,
(QuicByteCount max_size), (override));
MOCK_METHOD(void, ReceiveAsync, (QuicByteCount max_size), (override));
MOCK_METHOD(absl::Status, SendBlocking, (std::string data), (override));
MOCK_METHOD(absl::Status, SendBlocking, (quiche::QuicheMemSlice data),
(override));
MOCK_METHOD(void, SendAsync, (std::string data), (override));
MOCK_METHOD(void, SendAsync, (quiche::QuicheMemSlice data), (override));
};
class ConnectUdpTunnelTest : public quiche::test::QuicheTest {
public:
void SetUp() override {
#if defined(_WIN32)
WSADATA wsa_data;
const WORD version_required = MAKEWORD(2, 2);
ASSERT_EQ(WSAStartup(version_required, &wsa_data), 0);
#endif
auto socket = std::make_unique<StrictMock<MockSocket>>();
socket_ = socket.get();
ON_CALL(socket_factory_,
CreateConnectingUdpClientSocket(
AnyOf(QuicSocketAddress(TestLoopback4(), kAcceptablePort),
QuicSocketAddress(TestLoopback6(), kAcceptablePort)),
_, _, &tunnel_))
.WillByDefault(Return(ByMove(std::move(socket))));
EXPECT_CALL(request_handler_, GetStream()).WillRepeatedly(Return(&stream_));
}
protected:
static constexpr absl::string_view kAcceptableTarget = "localhost";
static constexpr uint16_t kAcceptablePort = 977;
NiceMock<MockQuicConnectionHelper> connection_helper_;
NiceMock<MockAlarmFactory> alarm_factory_;
NiceMock<MockQuicSpdySession> session_{new NiceMock<MockQuicConnection>(
&connection_helper_, &alarm_factory_, Perspective::IS_SERVER)};
StrictMock<MockStream> stream_{&session_};
StrictMock<MockRequestHandler> request_handler_;
NiceMock<MockSocketFactory> socket_factory_;
StrictMock<MockSocket>* socket_;
ConnectUdpTunnel tunnel_{
&request_handler_,
&socket_factory_,
"server_label",
{{std::string(kAcceptableTarget), kAcceptablePort},
{TestLoopback4().ToString(), kAcceptablePort},
{absl::StrCat("[", TestLoopback6().ToString(), "]"), kAcceptablePort}}};
};
TEST_F(ConnectUdpTunnelTest, OpenTunnel) {
EXPECT_CALL(*socket_, ConnectBlocking()).WillOnce(Return(absl::OkStatus()));
EXPECT_CALL(*socket_, ReceiveAsync(Gt(0)));
EXPECT_CALL(*socket_, Disconnect()).WillOnce(InvokeWithoutArgs([this]() {
tunnel_.ReceiveComplete(absl::CancelledError());
}));
EXPECT_CALL(
request_handler_,
OnResponseBackendComplete(
AllOf(Property(&QuicBackendResponse::response_type,
QuicBackendResponse::INCOMPLETE_RESPONSE),
Property(&QuicBackendResponse::headers,
UnorderedElementsAre(Pair(":status", "200"),
Pair("Capsule-Protocol", "?1"))),
Property(&QuicBackendResponse::trailers, IsEmpty()),
Property(&QuicBackendResponse::body, IsEmpty()))));
quiche::HttpHeaderBlock request_headers;
request_headers[":method"] = "CONNECT";
request_headers[":protocol"] = "connect-udp";
request_headers[":authority"] = "proxy.test";
request_headers[":scheme"] = "https";
request_headers[":path"] = absl::StrCat(
"/.well-known/masque/udp/", kAcceptableTarget, "/", kAcceptablePort, "/");
tunnel_.OpenTunnel(request_headers);
EXPECT_TRUE(tunnel_.IsTunnelOpenToTarget());
tunnel_.OnClientStreamClose();
EXPECT_FALSE(tunnel_.IsTunnelOpenToTarget());
}
TEST_F(ConnectUdpTunnelTest, OpenTunnelToIpv4LiteralTarget) {
EXPECT_CALL(*socket_, ConnectBlocking()).WillOnce(Return(absl::OkStatus()));
EXPECT_CALL(*socket_, ReceiveAsync(Gt(0)));
EXPECT_CALL(*socket_, Disconnect()).WillOnce(InvokeWithoutArgs([this]() {
tunnel_.ReceiveComplete(absl::CancelledError());
}));
EXPECT_CALL(
request_handler_,
OnResponseBackendComplete(
AllOf(Property(&QuicBackendResponse::response_type,
QuicBackendResponse::INCOMPLETE_RESPONSE),
Property(&QuicBackendResponse::headers,
UnorderedElementsAre(Pair(":status", "200"),
Pair("Capsule-Protocol", "?1"))),
Property(&QuicBackendResponse::trailers, IsEmpty()),
Property(&QuicBackendResponse::body, IsEmpty()))));
quiche::HttpHeaderBlock request_headers;
request_headers[":method"] = "CONNECT";
request_headers[":protocol"] = "connect-udp";
request_headers[":authority"] = "proxy.test";
request_headers[":scheme"] = "https";
request_headers[":path"] =
absl::StrCat("/.well-known/masque/udp/", TestLoopback4().ToString(), "/",
kAcceptablePort, "/");
tunnel_.OpenTunnel(request_headers);
EXPECT_TRUE(tunnel_.IsTunnelOpenToTarget());
tunnel_.OnClientStreamClose();
EXPECT_FALSE(tunnel_.IsTunnelOpenToTarget());
}
TEST_F(ConnectUdpTunnelTest, OpenTunnelToIpv6LiteralTarget) {
EXPECT_CALL(*socket_, ConnectBlocking()).WillOnce(Return(absl::OkStatus()));
EXPECT_CALL(*socket_, ReceiveAsync(Gt(0)));
EXPECT_CALL(*socket_, Disconnect()).WillOnce(InvokeWithoutArgs([this]() {
tunnel_.ReceiveComplete(absl::CancelledError());
}));
EXPECT_CALL(
request_handler_,
OnResponseBackendComplete(
AllOf(Property(&QuicBackendResponse::response_type,
QuicBackendResponse::INCOMPLETE_RESPONSE),
Property(&QuicBackendResponse::headers,
UnorderedElementsAre(Pair(":status", "200"),
Pair("Capsule-Protocol", "?1"))),
Property(&QuicBackendResponse::trailers, IsEmpty()),
Property(&QuicBackendResponse::body, IsEmpty()))));
std::string path;
ASSERT_TRUE(quiche::ExpandURITemplate(
"/.well-known/masque/udp/{target_host}/{target_port}/",
{{"target_host", absl::StrCat("[", TestLoopback6().ToString(), "]")},
{"target_port", absl::StrCat(kAcceptablePort)}},
&path));
quiche::HttpHeaderBlock request_headers;
request_headers[":method"] = "CONNECT";
request_headers[":protocol"] = "connect-udp";
request_headers[":authority"] = "proxy.test";
request_headers[":scheme"] = "https";
request_headers[":path"] = path;
tunnel_.OpenTunnel(request_headers);
EXPECT_TRUE(tunnel_.IsTunnelOpenToTarget());
tunnel_.OnClientStreamClose();
EXPECT_FALSE(tunnel_.IsTunnelOpenToTarget());
}
TEST_F(ConnectUdpTunnelTest, OpenTunnelWithMalformedRequest) {
EXPECT_CALL(request_handler_,
TerminateStreamWithError(Property(
&QuicResetStreamError::ietf_application_code,
static_cast<uint64_t>(QuicHttp3ErrorCode::MESSAGE_ERROR))));
quiche::HttpHeaderBlock request_headers;
request_headers[":method"] = "CONNECT";
request_headers[":protocol"] = "connect-udp";
request_headers[":authority"] = "proxy.test";
request_headers[":scheme"] = "https";
tunnel_.OpenTunnel(request_headers);
EXPECT_FALSE(tunnel_.IsTunnelOpenToTarget());
tunnel_.OnClientStreamClose();
}
TEST_F(ConnectUdpTunnelTest, OpenTunnelWithUnacceptableTarget) {
EXPECT_CALL(request_handler_,
OnResponseBackendComplete(AllOf(
Property(&QuicBackendResponse::response_type,
QuicBackendResponse::REGULAR_RESPONSE),
Property(&QuicBackendResponse::headers,
UnorderedElementsAre(
Pair(":status", "403"),
Pair("Proxy-Status",
HasSubstr("destination_ip_prohibited")))),
Property(&QuicBackendResponse::trailers, IsEmpty()))));
quiche::HttpHeaderBlock request_headers;
request_headers[":method"] = "CONNECT";
request_headers[":protocol"] = "connect-udp";
request_headers[":authority"] = "proxy.test";
request_headers[":scheme"] = "https";
request_headers[":path"] = "/.well-known/masque/udp/unacceptable.test/100/";
tunnel_.OpenTunnel(request_headers);
EXPECT_FALSE(tunnel_.IsTunnelOpenToTarget());
tunnel_.OnClientStreamClose();
}
TEST_F(ConnectUdpTunnelTest, ReceiveFromTarget) {
static constexpr absl::string_view kData = "\x11\x22\x33\x44\x55";
EXPECT_CALL(*socket_, ConnectBlocking()).WillOnce(Return(absl::OkStatus()));
EXPECT_CALL(*socket_, ReceiveAsync(Ge(kData.size()))).Times(2);
EXPECT_CALL(*socket_, Disconnect()).WillOnce(InvokeWithoutArgs([this]() {
tunnel_.ReceiveComplete(absl::CancelledError());
}));
EXPECT_CALL(request_handler_, OnResponseBackendComplete(_));
EXPECT_CALL(
stream_,
SendHttp3Datagram(
quiche::ConnectUdpDatagramUdpPacketPayload(kData).Serialize()))
.WillOnce(Return(MESSAGE_STATUS_SUCCESS));
quiche::HttpHeaderBlock request_headers;
request_headers[":method"] = "CONNECT";
request_headers[":protocol"] = "connect-udp";
request_headers[":authority"] = "proxy.test";
request_headers[":scheme"] = "https";
request_headers[":path"] = absl::StrCat(
"/.well-known/masque/udp/", kAcceptableTarget, "/", kAcceptablePort, "/");
tunnel_.OpenTunnel(request_headers);
tunnel_.ReceiveComplete(MemSliceFromString(kData));
tunnel_.OnClientStreamClose();
}
TEST_F(ConnectUdpTunnelTest, SendToTarget) {
static constexpr absl::string_view kData = "\x11\x22\x33\x44\x55";
EXPECT_CALL(*socket_, ConnectBlocking()).WillOnce(Return(absl::OkStatus()));
EXPECT_CALL(*socket_, ReceiveAsync(Gt(0)));
EXPECT_CALL(*socket_, SendBlocking(Matcher<std::string>(Eq(kData))))
.WillOnce(Return(absl::OkStatus()));
EXPECT_CALL(*socket_, Disconnect()).WillOnce(InvokeWithoutArgs([this]() {
tunnel_.ReceiveComplete(absl::CancelledError());
}));
EXPECT_CALL(request_handler_, OnResponseBackendComplete(_));
quiche::HttpHeaderBlock request_headers;
request_headers[":method"] = "CONNECT";
request_headers[":protocol"] = "connect-udp";
request_headers[":authority"] = "proxy.test";
request_headers[":scheme"] = "https";
request_headers[":path"] = absl::StrCat(
"/.well-known/masque/udp/", kAcceptableTarget, "/", kAcceptablePort, "/");
tunnel_.OpenTunnel(request_headers);
tunnel_.OnHttp3Datagram(
kStreamId, quiche::ConnectUdpDatagramUdpPacketPayload(kData).Serialize());
tunnel_.OnClientStreamClose();
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/tools/connect_udp_tunnel.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/tools/connect_udp_tunnel_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
79d42034-7362-4f03-af6b-9524fcede270 | cpp | tensorflow/tensorflow | evaluation_delegate_provider | tensorflow/lite/tools/evaluation/evaluation_delegate_provider.cc | tensorflow/lite/tools/evaluation/evaluation_delegate_provider_test.cc | #include "tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h"
#include <string>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/evaluation/utils.h"
#include "tensorflow/lite/tools/logging.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr char kNnapiDelegate[] = "nnapi";
constexpr char kGpuDelegate[] = "gpu";
constexpr char kHexagonDelegate[] = "hexagon";
constexpr char kXnnpackDelegate[] = "xnnpack";
constexpr char kCoremlDelegate[] = "coreml";
}
TfliteInferenceParams::Delegate ParseStringToDelegateType(
const std::string& val) {
if (val == kNnapiDelegate) return TfliteInferenceParams::NNAPI;
if (val == kGpuDelegate) return TfliteInferenceParams::GPU;
if (val == kHexagonDelegate) return TfliteInferenceParams::HEXAGON;
if (val == kXnnpackDelegate) return TfliteInferenceParams::XNNPACK;
if (val == kCoremlDelegate) return TfliteInferenceParams::COREML;
return TfliteInferenceParams::NONE;
}
TfLiteDelegatePtr CreateTfLiteDelegate(const TfliteInferenceParams& params,
std::string* error_msg) {
const auto type = params.delegate();
switch (type) {
case TfliteInferenceParams::NNAPI: {
auto p = CreateNNAPIDelegate();
if (!p && error_msg) *error_msg = "NNAPI not supported";
return p;
}
case TfliteInferenceParams::GPU: {
auto p = CreateGPUDelegate();
if (!p && error_msg) *error_msg = "GPU delegate not supported.";
return p;
}
case TfliteInferenceParams::HEXAGON: {
auto p = CreateHexagonDelegate("",
false);
if (!p && error_msg) {
*error_msg =
"Hexagon delegate is not supported on the platform or required "
"libraries are missing.";
}
return p;
}
case TfliteInferenceParams::XNNPACK: {
auto p = CreateXNNPACKDelegate(params.num_threads(), false);
if (!p && error_msg) *error_msg = "XNNPACK delegate not supported.";
return p;
}
case TfliteInferenceParams::COREML: {
auto p = CreateCoreMlDelegate();
if (!p && error_msg) *error_msg = "CoreML delegate not supported.";
return p;
}
case TfliteInferenceParams::NONE:
return TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {});
default:
if (error_msg) {
*error_msg = "Creation of delegate type: " +
TfliteInferenceParams::Delegate_Name(type) +
" not supported yet.";
}
return TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {});
}
}
DelegateProviders::DelegateProviders()
: delegate_list_util_(¶ms_),
delegates_map_([=]() -> std::unordered_map<std::string, int> {
std::unordered_map<std::string, int> delegates_map;
const auto& providers = delegate_list_util_.providers();
for (int i = 0; i < providers.size(); ++i) {
delegates_map[providers[i]->GetName()] = i;
}
return delegates_map;
}()) {
delegate_list_util_.AddAllDelegateParams();
}
std::vector<Flag> DelegateProviders::GetFlags() {
std::vector<Flag> flags;
delegate_list_util_.AppendCmdlineFlags(flags);
return flags;
}
bool DelegateProviders::InitFromCmdlineArgs(int* argc, const char** argv) {
std::vector<Flag> flags = GetFlags();
bool parse_result = Flags::Parse(argc, argv, flags);
if (!parse_result || params_.Get<bool>("help")) {
std::string usage = Flags::Usage(argv[0], flags);
TFLITE_LOG(ERROR) << usage;
parse_result = false;
}
return parse_result;
}
TfLiteDelegatePtr DelegateProviders::CreateDelegate(
const std::string& name) const {
const auto it = delegates_map_.find(name);
if (it == delegates_map_.end()) {
return TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {});
}
const auto& providers = delegate_list_util_.providers();
return providers[it->second]->CreateTfLiteDelegate(params_);
}
tools::ToolParams DelegateProviders::GetAllParams(
const TfliteInferenceParams& params) const {
tools::ToolParams tool_params;
tool_params.Merge(params_, false);
if (params.has_num_threads()) {
tool_params.Set<int32_t>("num_threads", params.num_threads());
}
const auto type = params.delegate();
switch (type) {
case TfliteInferenceParams::NNAPI:
if (tool_params.HasParam("use_nnapi")) {
tool_params.Set<bool>("use_nnapi", true);
}
break;
case TfliteInferenceParams::GPU:
if (tool_params.HasParam("use_gpu")) {
tool_params.Set<bool>("use_gpu", true);
}
break;
case TfliteInferenceParams::HEXAGON:
if (tool_params.HasParam("use_hexagon")) {
tool_params.Set<bool>("use_hexagon", true);
}
break;
case TfliteInferenceParams::XNNPACK:
if (tool_params.HasParam("use_xnnpack")) {
tool_params.Set<bool>("use_xnnpack", true);
}
if (tool_params.HasParam("xnnpack_force_fp16")) {
tool_params.Set<bool>("xnnpack_force_fp16", true);
}
break;
case TfliteInferenceParams::COREML:
if (tool_params.HasParam("use_coreml")) {
tool_params.Set<bool>("use_coreml", true);
}
break;
default:
break;
}
return tool_params;
}
}
} | #include "tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace evaluation {
namespace {
TEST(EvaluationDelegateProviderTest, ParseStringToDelegateType) {
EXPECT_EQ(TfliteInferenceParams::NNAPI, ParseStringToDelegateType("nnapi"));
EXPECT_EQ(TfliteInferenceParams::GPU, ParseStringToDelegateType("gpu"));
EXPECT_EQ(TfliteInferenceParams::HEXAGON,
ParseStringToDelegateType("hexagon"));
EXPECT_EQ(TfliteInferenceParams::XNNPACK,
ParseStringToDelegateType("xnnpack"));
EXPECT_EQ(TfliteInferenceParams::NONE, ParseStringToDelegateType("Gpu"));
EXPECT_EQ(TfliteInferenceParams::NONE, ParseStringToDelegateType("Testing"));
}
TEST(EvaluationDelegateProviderTest, CreateTfLiteDelegate) {
TfliteInferenceParams params;
params.set_delegate(TfliteInferenceParams::NONE);
EXPECT_TRUE(!CreateTfLiteDelegate(params));
}
TEST(EvaluationDelegateProviderTest, DelegateProvidersParams) {
DelegateProviders providers;
const auto& params = providers.GetAllParams();
EXPECT_TRUE(params.HasParam("use_nnapi"));
EXPECT_TRUE(params.HasParam("use_gpu"));
int argc = 3;
const char* argv[] = {"program_name", "--use_gpu=true",
"--other_undefined_flag=1"};
EXPECT_TRUE(providers.InitFromCmdlineArgs(&argc, argv));
EXPECT_TRUE(params.Get<bool>("use_gpu"));
EXPECT_EQ(2, argc);
EXPECT_EQ("--other_undefined_flag=1", argv[1]);
}
TEST(EvaluationDelegateProviderTest, GetAllParamsWithTfliteInferenceParams) {
DelegateProviders providers;
int argc = 2;
const char* argv[] = {"program_name", "--num_threads=1"};
EXPECT_TRUE(providers.InitFromCmdlineArgs(&argc, argv));
const auto& default_params = providers.GetAllParams();
EXPECT_EQ(1, default_params.Get<int>("num_threads"));
TfliteInferenceParams params;
params.set_delegate(TfliteInferenceParams::NONE);
params.set_num_threads(4);
tools::ToolParams tool_params = providers.GetAllParams(params);
EXPECT_EQ(4, tool_params.Get<int>("num_threads"));
EXPECT_EQ(1, argc);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/evaluation_delegate_provider.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/evaluation_delegate_provider_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9133bc2a-1e12-41ac-bfc8-f5cd94adb14d | cpp | google/arolla | annotation_expr_operators | arolla/expr/annotation_expr_operators.cc | arolla/expr/annotation_expr_operators_test.cc | #include "arolla/expr/annotation_expr_operators.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/base/no_destructor.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
ExprOperatorPtr QTypeAnnotation::Make() {
static const absl::NoDestructor<ExprOperatorPtr> result(
std::make_shared<QTypeAnnotation>(""));
return *result;
}
QTypeAnnotation::QTypeAnnotation(std::string aux_policy)
: ExprOperatorWithFixedSignature(
"annotation.qtype",
ExprOperatorSignature(
{{"expr"}, {"qtype"}},
std::move(aux_policy)),
"QType annotation.",
FingerprintHasher("::arolla::expr::QTypeAnnotation").Finish()) {}
absl::StatusOr<ExprAttributes> QTypeAnnotation::InferAttributes(
absl::Span<const ExprAttributes> inputs) const {
RETURN_IF_ERROR(ValidateOpInputsCount(inputs));
if (!inputs[1].qtype()) {
return inputs[0];
}
if (inputs[1].qtype() != GetQTypeQType()) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected QTYPE, got qtype: %s", inputs[1].qtype()->name()));
}
if (!inputs[1].qvalue()) {
return absl::InvalidArgumentError("`qtype` must be a literal");
}
const QTypePtr output_qtype = inputs[1].qvalue()->UnsafeAs<QTypePtr>();
if (inputs[0].qtype() && inputs[0].qtype() != output_qtype) {
return absl::InvalidArgumentError(
absl::StrFormat("inconsistent annotation.qtype(expr: %s, qtype=%s)",
inputs[0].qtype()->name(), output_qtype->name()));
}
return ExprAttributes(output_qtype, inputs[0].qvalue());
}
ExprOperatorPtr NameAnnotation::Make() {
static const absl::NoDestructor result(std::make_shared<NameAnnotation>(""));
return *result;
}
NameAnnotation::NameAnnotation(std::string aux_policy)
: ExprOperatorWithFixedSignature(
"annotation.name",
ExprOperatorSignature(
{{"expr"}, {"name"}},
std::move(aux_policy)),
"Name annotation.",
FingerprintHasher("::arolla::expr::NameAnnotation").Finish()) {}
absl::StatusOr<ExprAttributes> NameAnnotation::InferAttributes(
absl::Span<const ExprAttributes> inputs) const {
RETURN_IF_ERROR(ValidateOpInputsCount(inputs));
if (inputs[1].qtype() && inputs[1].qtype() != GetQType<Text>()) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected a TEXT literal, got name: %s", inputs[1].qtype()->name()));
}
if (!inputs[1].qvalue()) {
return absl::InvalidArgumentError("`name` must be a TEXT literal");
}
return inputs[0];
}
ExprOperatorPtr ExportAnnotation::Make() {
static const absl::NoDestructor result(std::make_shared<ExportAnnotation>());
return *result;
}
ExportAnnotation::ExportAnnotation()
: ExprOperatorWithFixedSignature(
"annotation.export", ExprOperatorSignature{{"expr"}, {"export_tag"}},
"Side-channel output annotation.",
FingerprintHasher("::arolla::expr::ExportAnnotation").Finish()) {}
absl::StatusOr<ExprAttributes> ExportAnnotation::InferAttributes(
absl::Span<const ExprAttributes> inputs) const {
RETURN_IF_ERROR(ValidateOpInputsCount(inputs));
if (inputs[1].qtype() && inputs[1].qtype() != GetQType<Text>()) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected TEXT, got export_tag: %s", inputs[1].qtype()->name()));
}
if (!inputs[1].qvalue()) {
return absl::InvalidArgumentError("`export_tag` must be a TEXT literal");
}
if (inputs[1].qvalue()->UnsafeAs<Text>().view().empty()) {
return absl::InvalidArgumentError("`export_tag` must be non-empty");
}
return inputs[0];
}
ExprOperatorPtr ExportValueAnnotation::Make() {
static const absl::NoDestructor result(
std::make_shared<ExportValueAnnotation>());
return *result;
}
ExportValueAnnotation::ExportValueAnnotation()
: ExprOperatorWithFixedSignature(
"annotation.export_value",
ExprOperatorSignature{{"expr"}, {"export_tag"}, {"value"}},
"Side-channel output annotation.",
FingerprintHasher("::arolla::expr::ExportValueAnnotation").Finish()) {
}
absl::StatusOr<ExprAttributes> ExportValueAnnotation::InferAttributes(
absl::Span<const ExprAttributes> inputs) const {
RETURN_IF_ERROR(ValidateOpInputsCount(inputs));
if (inputs[1].qtype() && inputs[1].qtype() != GetQType<Text>()) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected TEXT, got export_tag: %s", inputs[1].qtype()->name()));
}
if (!inputs[1].qvalue()) {
return absl::InvalidArgumentError("`export_tag` must be a TEXT literal");
}
if (inputs[1].qvalue()->UnsafeAs<Text>().view().empty()) {
return absl::InvalidArgumentError("`export_tag` must be non-empty");
}
return inputs[0];
}
} | #include "arolla/expr/annotation_expr_operators.h"
#include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/text.h"
namespace arolla::expr {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::testing::EqualsAttr;
TEST(AnnotationExprOperatorsTest, QTypeAnnotation) {
auto annotation_qtype = QTypeAnnotation::Make();
EXPECT_THAT(annotation_qtype->InferAttributes({}),
StatusIs(absl::StatusCode::kInvalidArgument,
"incorrect number of dependencies passed to an operator "
"node: expected 2 but got 0"));
EXPECT_THAT(
annotation_qtype->InferAttributes({ExprAttributes{GetQType<int64_t>()},
ExprAttributes{GetQType<int64_t>()}}),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected QTYPE, got qtype: INT64"));
EXPECT_THAT(
annotation_qtype->InferAttributes({ExprAttributes{GetQType<int64_t>()},
ExprAttributes{GetQType<QTypePtr>()}}),
StatusIs(absl::StatusCode::kInvalidArgument,
"`qtype` must be a literal"));
EXPECT_THAT(annotation_qtype->InferAttributes(
{ExprAttributes{},
ExprAttributes{TypedValue::FromValue(GetQType<int64_t>())}}),
IsOkAndHolds(EqualsAttr(GetQType<int64_t>())));
EXPECT_THAT(annotation_qtype->InferAttributes(
{ExprAttributes{GetQType<int64_t>()},
ExprAttributes{TypedValue::FromValue(GetQType<int64_t>())}}),
IsOkAndHolds(EqualsAttr(GetQType<int64_t>())));
EXPECT_THAT(
annotation_qtype->InferAttributes(
{ExprAttributes{GetQType<int64_t>()},
ExprAttributes{TypedValue::FromValue(GetQType<Text>())}}),
StatusIs(absl::StatusCode::kInvalidArgument,
"inconsistent annotation.qtype(expr: INT64, qtype=TEXT)"));
}
TEST(AnnotationExprOperatorsTest, NameAnnotation) {
auto annotation_name = NameAnnotation::Make();
EXPECT_THAT(annotation_name->InferAttributes({}),
StatusIs(absl::StatusCode::kInvalidArgument,
"incorrect number of dependencies passed to an operator "
"node: expected 2 but got 0"));
EXPECT_THAT(
annotation_name->InferAttributes({ExprAttributes{GetQType<int64_t>()},
ExprAttributes{GetQType<int64_t>()}}),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected a TEXT literal, got name: INT64"));
EXPECT_THAT(annotation_name->InferAttributes(
{ExprAttributes{GetQType<int64_t>()}, ExprAttributes{}}),
StatusIs(absl::StatusCode::kInvalidArgument,
"`name` must be a TEXT literal"));
EXPECT_THAT(
annotation_name->InferAttributes({ExprAttributes{GetQType<int64_t>()},
ExprAttributes{GetQType<Text>()}}),
StatusIs(absl::StatusCode::kInvalidArgument,
"`name` must be a TEXT literal"));
EXPECT_THAT(annotation_name->InferAttributes(
{ExprAttributes{GetQType<int64_t>()},
ExprAttributes{TypedValue::FromValue(Text("foo"))}}),
IsOkAndHolds(EqualsAttr(GetQType<int64_t>())));
}
TEST(AnnotationExprOperatorsTest, ExportAnnotation) {
auto annotation_export = ExportAnnotation::Make();
EXPECT_THAT(annotation_export->InferAttributes({}),
StatusIs(absl::StatusCode::kInvalidArgument,
"incorrect number of dependencies passed to an operator "
"node: expected 2 but got 0"));
EXPECT_THAT(
annotation_export->InferAttributes({ExprAttributes{GetQType<int64_t>()},
ExprAttributes{GetQType<int64_t>()}}),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected TEXT, got export_tag: INT64"));
EXPECT_THAT(
annotation_export->InferAttributes({ExprAttributes{GetQType<int64_t>()},
ExprAttributes{GetQType<Text>()}}),
StatusIs(absl::StatusCode::kInvalidArgument,
"`export_tag` must be a TEXT literal"));
EXPECT_THAT(annotation_export->InferAttributes(
{ExprAttributes{GetQType<int64_t>()}, ExprAttributes{}}),
StatusIs(absl::StatusCode::kInvalidArgument,
"`export_tag` must be a TEXT literal"));
EXPECT_THAT(annotation_export->InferAttributes(
{ExprAttributes{GetQType<int64_t>()},
ExprAttributes{TypedValue::FromValue(Text(""))}}),
StatusIs(absl::StatusCode::kInvalidArgument,
"`export_tag` must be non-empty"));
EXPECT_THAT(annotation_export->InferAttributes(
{ExprAttributes{GetQType<int64_t>()},
ExprAttributes{TypedValue::FromValue(Text("foo"))}}),
IsOkAndHolds(EqualsAttr(GetQType<int64_t>())));
}
TEST(AnnotationExprOperatorsTest, ExportValueAnnotation) {
auto annotation_export_value = ExportValueAnnotation::Make();
EXPECT_THAT(annotation_export_value->InferAttributes({}),
StatusIs(absl::StatusCode::kInvalidArgument,
"incorrect number of dependencies passed to an operator "
"node: expected 3 but got 0"));
EXPECT_THAT(annotation_export_value->InferAttributes(
{ExprAttributes{GetQType<int64_t>()},
ExprAttributes{GetQType<int64_t>()},
ExprAttributes{GetQType<int64_t>()}}),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected TEXT, got export_tag: INT64"));
EXPECT_THAT(annotation_export_value->InferAttributes(
{ExprAttributes{GetQType<int64_t>()}, ExprAttributes{},
ExprAttributes{GetQType<int64_t>()}}),
StatusIs(absl::StatusCode::kInvalidArgument,
"`export_tag` must be a TEXT literal"));
EXPECT_THAT(annotation_export_value->InferAttributes(
{ExprAttributes{GetQType<int64_t>()},
ExprAttributes{GetQType<Text>()},
ExprAttributes{GetQType<int64_t>()}}),
StatusIs(absl::StatusCode::kInvalidArgument,
"`export_tag` must be a TEXT literal"));
EXPECT_THAT(annotation_export_value->InferAttributes(
{ExprAttributes{GetQType<int64_t>()},
ExprAttributes{TypedValue::FromValue(Text(""))},
ExprAttributes{GetQType<int64_t>()}}),
StatusIs(absl::StatusCode::kInvalidArgument,
"`export_tag` must be non-empty"));
EXPECT_THAT(annotation_export_value->InferAttributes(
{ExprAttributes{GetQType<int64_t>()},
ExprAttributes{TypedValue::FromValue(Text("foo"))},
ExprAttributes{GetQType<int64_t>()}}),
IsOkAndHolds(EqualsAttr(GetQType<int64_t>())));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/annotation_expr_operators.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/annotation_expr_operators_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
9660c787-ae03-4bd7-9c11-73f07e914f24 | cpp | tensorflow/tensorflow | unbounded_work_queue | third_party/xla/third_party/tsl/tsl/platform/default/unbounded_work_queue.cc | third_party/xla/third_party/tsl/tsl/platform/unbounded_work_queue_test.cc | #include "tsl/platform/default/unbounded_work_queue.h"
#include "absl/memory/memory.h"
#include "tsl/platform/env.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/numa.h"
namespace tsl {
UnboundedWorkQueue::UnboundedWorkQueue(Env* env, const string& thread_name,
const ThreadOptions& thread_options)
: env_(env), thread_name_(thread_name), thread_options_(thread_options) {}
UnboundedWorkQueue::~UnboundedWorkQueue() {
{
mutex_lock l(work_queue_mu_);
cancelled_ = true;
work_queue_cv_.notify_all();
if (!work_queue_.empty()) {
LOG(ERROR) << "UnboundedWorkQueue named \"" << thread_name_ << "\" was "
<< "deleted with pending work in its queue. This may indicate "
<< "a potential use-after-free bug.";
}
}
{
mutex_lock l(thread_pool_mu_);
thread_pool_.clear();
}
}
void UnboundedWorkQueue::Schedule(WorkFunction fn) {
mutex_lock l(work_queue_mu_);
work_queue_.push_back(std::move(fn));
work_queue_cv_.notify_one();
if (work_queue_.size() > num_idle_threads_) {
Thread* new_thread =
env_->StartThread({}, thread_name_, [this]() { PooledThreadFunc(); });
mutex_lock l(thread_pool_mu_);
thread_pool_.emplace_back(new_thread);
}
}
void UnboundedWorkQueue::PooledThreadFunc() {
if (thread_options_.numa_node != tsl::port::kNUMANoAffinity) {
tsl::port::NUMASetThreadNodeAffinity(thread_options_.numa_node);
}
while (true) {
WorkFunction fn;
{
mutex_lock l(work_queue_mu_);
++num_idle_threads_;
while (!cancelled_ && work_queue_.empty()) {
work_queue_cv_.wait(l);
}
if (cancelled_) {
return;
}
fn = std::move(work_queue_.front());
work_queue_.pop_front();
--num_idle_threads_;
}
fn();
}
}
} | #include "tsl/platform/unbounded_work_queue.h"
#include "absl/memory/memory.h"
#include "tsl/platform/random.h"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
class UnboundedWorkQueueTest : public ::testing::Test {
protected:
UnboundedWorkQueueTest()
: work_queue_(
absl::make_unique<UnboundedWorkQueue>(Env::Default(), "test")) {}
~UnboundedWorkQueueTest() override = default;
void RunMultipleCopiesOfClosure(const int num_closures,
std::function<void()> fn) {
for (int i = 0; i < num_closures; ++i) {
work_queue_->Schedule([this, fn]() {
fn();
mutex_lock l(mu_);
++closure_count_;
cond_var_.notify_all();
});
}
}
void BlockUntilClosuresDone(const int num_closures) {
mutex_lock l(mu_);
while (closure_count_ < num_closures) {
cond_var_.wait(l);
}
}
void ResetQueue() { work_queue_.reset(); }
int NumClosuresExecuted() {
mutex_lock l(mu_);
return closure_count_;
}
private:
mutex mu_;
int closure_count_ TF_GUARDED_BY(mu_) = 0;
condition_variable cond_var_;
std::unique_ptr<UnboundedWorkQueue> work_queue_;
};
TEST_F(UnboundedWorkQueueTest, SingleClosure) {
constexpr int num_closures = 1;
RunMultipleCopiesOfClosure(num_closures, []() {});
BlockUntilClosuresDone(num_closures);
}
TEST_F(UnboundedWorkQueueTest, MultipleClosures) {
constexpr int num_closures = 10;
RunMultipleCopiesOfClosure(num_closures, []() {});
BlockUntilClosuresDone(num_closures);
}
TEST_F(UnboundedWorkQueueTest, MultipleClosuresSleepingRandomly) {
constexpr int num_closures = 1000;
RunMultipleCopiesOfClosure(num_closures, []() {
Env::Default()->SleepForMicroseconds(random::New64() % 10);
});
BlockUntilClosuresDone(num_closures);
}
TEST_F(UnboundedWorkQueueTest, NestedClosures) {
constexpr int num_closures = 10;
RunMultipleCopiesOfClosure(num_closures, [=]() {
RunMultipleCopiesOfClosure(num_closures, []() {});
});
BlockUntilClosuresDone(num_closures * num_closures + num_closures);
}
TEST_F(UnboundedWorkQueueTest, RacyDestructor) {
constexpr int num_closures = 100;
RunMultipleCopiesOfClosure(num_closures, []() {});
ResetQueue();
EXPECT_LE(NumClosuresExecuted(), num_closures);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/default/unbounded_work_queue.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/unbounded_work_queue_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8113d2ce-c1e9-467e-9112-386cd6cca64a | cpp | google/quiche | quiche_intrusive_list | quiche/common/quiche_intrusive_list.h | quiche/common/quiche_intrusive_list_test.cc | #ifndef QUICHE_COMMON_QUICHE_INTRUSIVE_LIST_H_
#define QUICHE_COMMON_QUICHE_INTRUSIVE_LIST_H_
#include <stddef.h>
#include <cstddef>
#include <iterator>
#include "quiche/common/platform/api/quiche_export.h"
namespace quiche {
template <typename T, typename ListID>
class QuicheIntrusiveList;
template <typename T, typename ListID = void>
class QUICHE_EXPORT QuicheIntrusiveLink {
protected:
QuicheIntrusiveLink() : next_(nullptr), prev_(nullptr) {}
#ifndef SWIG
QuicheIntrusiveLink(const QuicheIntrusiveLink&) = delete;
QuicheIntrusiveLink& operator=(const QuicheIntrusiveLink&) = delete;
#endif
private:
friend class QuicheIntrusiveList<T, ListID>;
T* cast_to_derived() { return static_cast<T*>(this); }
const T* cast_to_derived() const { return static_cast<const T*>(this); }
QuicheIntrusiveLink* next_;
QuicheIntrusiveLink* prev_;
};
template <typename T, typename ListID = void>
class QUICHE_EXPORT QuicheIntrusiveList {
template <typename QualifiedT, typename QualifiedLinkT>
class iterator_impl;
public:
typedef T value_type;
typedef value_type* pointer;
typedef const value_type* const_pointer;
typedef value_type& reference;
typedef const value_type& const_reference;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef QuicheIntrusiveLink<T, ListID> link_type;
typedef iterator_impl<T, link_type> iterator;
typedef iterator_impl<const T, const link_type> const_iterator;
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
QuicheIntrusiveList() { clear(); }
#ifndef SWIG
QuicheIntrusiveList(QuicheIntrusiveList&& src) noexcept {
clear();
if (src.empty()) return;
sentinel_link_.next_ = src.sentinel_link_.next_;
sentinel_link_.prev_ = src.sentinel_link_.prev_;
sentinel_link_.prev_->next_ = &sentinel_link_;
sentinel_link_.next_->prev_ = &sentinel_link_;
src.clear();
}
#endif
iterator begin() { return iterator(sentinel_link_.next_); }
const_iterator begin() const { return const_iterator(sentinel_link_.next_); }
iterator end() { return iterator(&sentinel_link_); }
const_iterator end() const { return const_iterator(&sentinel_link_); }
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const {
return const_reverse_iterator(end());
}
reverse_iterator rend() { return reverse_iterator(begin()); }
const_reverse_iterator rend() const {
return const_reverse_iterator(begin());
}
bool empty() const { return (sentinel_link_.next_ == &sentinel_link_); }
size_type size() const { return std::distance(begin(), end()); }
size_type max_size() const { return size_type(-1); }
reference front() { return *begin(); }
const_reference front() const { return *begin(); }
reference back() { return *(--end()); }
const_reference back() const { return *(--end()); }
static iterator insert(iterator position, T* obj) {
return insert_link(position.link(), obj);
}
void push_front(T* obj) { insert(begin(), obj); }
void push_back(T* obj) { insert(end(), obj); }
static iterator erase(T* obj) {
link_type* obj_link = obj;
obj_link->next_->prev_ = obj_link->prev_;
obj_link->prev_->next_ = obj_link->next_;
link_type* next_link = obj_link->next_;
obj_link->next_ = nullptr;
obj_link->prev_ = nullptr;
return iterator(next_link);
}
static iterator erase(iterator position) {
return erase(position.operator->());
}
void pop_front() { erase(begin()); }
void pop_back() { erase(--end()); }
static bool is_linked(const T* obj) {
return obj->link_type::next_ != nullptr;
}
void clear() {
sentinel_link_.next_ = sentinel_link_.prev_ = &sentinel_link_;
}
void swap(QuicheIntrusiveList& x) {
QuicheIntrusiveList tmp;
tmp.splice(tmp.begin(), *this);
this->splice(this->begin(), x);
x.splice(x.begin(), tmp);
}
void splice(iterator pos, QuicheIntrusiveList& src) {
splice(pos, src.begin(), src.end());
}
void splice(iterator pos, iterator i) { splice(pos, i, std::next(i)); }
void splice(iterator pos, iterator first, iterator last) {
if (first == last) return;
link_type* const last_prev = last.link()->prev_;
first.link()->prev_->next_ = last.operator->();
last.link()->prev_ = first.link()->prev_;
first.link()->prev_ = pos.link()->prev_;
pos.link()->prev_->next_ = first.operator->();
last_prev->next_ = pos.operator->();
pos.link()->prev_ = last_prev;
}
private:
static iterator insert_link(link_type* next_link, T* obj) {
link_type* obj_link = obj;
obj_link->next_ = next_link;
link_type* const initial_next_prev = next_link->prev_;
obj_link->prev_ = initial_next_prev;
initial_next_prev->next_ = obj_link;
next_link->prev_ = obj_link;
return iterator(obj_link);
}
template <typename QualifiedT, typename QualifiedLinkT>
class QUICHE_EXPORT iterator_impl {
public:
using iterator_category = std::bidirectional_iterator_tag;
using value_type = QualifiedT;
using difference_type = std::ptrdiff_t;
using pointer = QualifiedT*;
using reference = QualifiedT&;
iterator_impl() = default;
iterator_impl(QualifiedLinkT* link) : link_(link) {}
iterator_impl(const iterator_impl& x) = default;
iterator_impl& operator=(const iterator_impl& x) = default;
template <typename U, typename V>
iterator_impl(const iterator_impl<U, V>& x) : link_(x.link_) {}
template <typename U, typename V>
bool operator==(const iterator_impl<U, V>& x) const {
return link_ == x.link_;
}
template <typename U, typename V>
bool operator!=(const iterator_impl<U, V>& x) const {
return link_ != x.link_;
}
reference operator*() const { return *operator->(); }
pointer operator->() const { return link_->cast_to_derived(); }
QualifiedLinkT* link() const { return link_; }
#ifndef SWIG
iterator_impl& operator++() {
link_ = link_->next_;
return *this;
}
iterator_impl operator++(int ) {
iterator_impl tmp = *this;
++*this;
return tmp;
}
iterator_impl& operator--() {
link_ = link_->prev_;
return *this;
}
iterator_impl operator--(int ) {
iterator_impl tmp = *this;
--*this;
return tmp;
}
#endif
private:
template <typename U, typename V>
friend class iterator_impl;
QualifiedLinkT* link_ = nullptr;
};
link_type sentinel_link_;
QuicheIntrusiveList(const QuicheIntrusiveList&);
void operator=(const QuicheIntrusiveList&);
};
}
#endif | #include "quiche/common/quiche_intrusive_list.h"
#include <algorithm>
#include <iterator>
#include <list>
#include <string>
#include <utility>
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace test {
struct ListId2 {};
struct TestItem : public QuicheIntrusiveLink<TestItem>,
public QuicheIntrusiveLink<TestItem, ListId2> {
int n;
};
typedef QuicheIntrusiveList<TestItem> TestList;
typedef std::list<TestItem *> CanonicalList;
void swap(TestItem &a, TestItem &b) {
using std::swap;
swap(a.n, b.n);
}
class IntrusiveListTest : public quiche::test::QuicheTest {
protected:
void CheckLists() {
CheckLists(l1, ll1);
if (quiche::test::QuicheTest::HasFailure()) return;
CheckLists(l2, ll2);
}
void CheckLists(const TestList &list_a, const CanonicalList &list_b) {
ASSERT_EQ(list_a.size(), list_b.size());
TestList::const_iterator it_a = list_a.begin();
CanonicalList::const_iterator it_b = list_b.begin();
while (it_a != list_a.end()) {
EXPECT_EQ(&*it_a++, *it_b++);
}
EXPECT_EQ(list_a.end(), it_a);
EXPECT_EQ(list_b.end(), it_b);
}
void PrepareLists(int num_elems_1, int num_elems_2 = 0) {
FillLists(&l1, &ll1, e, num_elems_1);
FillLists(&l2, &ll2, e + num_elems_1, num_elems_2);
}
void FillLists(TestList *list_a, CanonicalList *list_b, TestItem *elems,
int num_elems) {
list_a->clear();
list_b->clear();
for (int i = 0; i < num_elems; ++i) {
list_a->push_back(elems + i);
list_b->push_back(elems + i);
}
CheckLists(*list_a, *list_b);
}
TestItem e[10];
TestList l1, l2;
CanonicalList ll1, ll2;
};
TEST(NewIntrusiveListTest, Basic) {
TestList list1;
EXPECT_EQ(sizeof(QuicheIntrusiveLink<TestItem>), sizeof(void *) * 2);
for (int i = 0; i < 10; ++i) {
TestItem *e = new TestItem;
e->n = i;
list1.push_front(e);
}
EXPECT_EQ(list1.size(), 10u);
std::reverse(list1.begin(), list1.end());
EXPECT_EQ(list1.size(), 10u);
const TestList &clist1 = list1;
int i = 0;
TestList::iterator iter = list1.begin();
for (; iter != list1.end(); ++iter, ++i) {
EXPECT_EQ(iter->n, i);
}
EXPECT_EQ(iter, clist1.end());
EXPECT_NE(iter, clist1.begin());
i = 0;
iter = list1.begin();
for (; iter != list1.end(); ++iter, ++i) {
EXPECT_EQ(iter->n, i);
}
EXPECT_EQ(iter, clist1.end());
EXPECT_NE(iter, clist1.begin());
EXPECT_EQ(list1.front().n, 0);
EXPECT_EQ(list1.back().n, 9);
TestList list2;
list2.swap(list1);
EXPECT_EQ(list1.size(), 0u);
EXPECT_EQ(list2.size(), 10u);
const TestList &clist2 = list2;
TestList::reverse_iterator riter = list2.rbegin();
i = 9;
for (; riter != list2.rend(); ++riter, --i) {
EXPECT_EQ(riter->n, i);
}
EXPECT_EQ(riter, clist2.rend());
EXPECT_NE(riter, clist2.rbegin());
riter = list2.rbegin();
i = 9;
for (; riter != list2.rend(); ++riter, --i) {
EXPECT_EQ(riter->n, i);
}
EXPECT_EQ(riter, clist2.rend());
EXPECT_NE(riter, clist2.rbegin());
while (!list2.empty()) {
TestItem *e = &list2.front();
list2.pop_front();
delete e;
}
}
TEST(NewIntrusiveListTest, Erase) {
TestList l;
TestItem *e[10];
for (int i = 0; i < 10; ++i) {
e[i] = new TestItem;
l.push_front(e[i]);
}
for (int i = 0; i < 10; ++i) {
EXPECT_EQ(l.size(), (10u - i));
TestList::iterator iter = l.erase(e[i]);
EXPECT_NE(iter, TestList::iterator(e[i]));
EXPECT_EQ(l.size(), (10u - i - 1));
delete e[i];
}
}
TEST(NewIntrusiveListTest, Insert) {
TestList l;
TestList::iterator iter = l.end();
TestItem *e[10];
for (int i = 9; i >= 0; --i) {
e[i] = new TestItem;
iter = l.insert(iter, e[i]);
EXPECT_EQ(&(*iter), e[i]);
}
EXPECT_EQ(l.size(), 10u);
iter = l.begin();
for (TestItem *item : e) {
EXPECT_EQ(&(*iter), item);
iter = l.erase(item);
delete item;
}
}
TEST(NewIntrusiveListTest, Move) {
{
TestList src;
TestList dest(std::move(src));
EXPECT_TRUE(dest.empty());
}
{
TestItem e;
TestList src;
src.push_front(&e);
TestList dest(std::move(src));
EXPECT_TRUE(src.empty());
ASSERT_THAT(dest.size(), 1);
EXPECT_THAT(&dest.front(), &e);
EXPECT_THAT(&dest.back(), &e);
}
{
TestItem items[10];
TestList src;
for (TestItem &e : items) src.push_back(&e);
TestList dest(std::move(src));
EXPECT_TRUE(src.empty());
ASSERT_THAT(dest.size(), 10);
int i = 0;
for (TestItem &e : dest) {
EXPECT_THAT(&e, &items[i++]) << " for index " << i;
}
}
}
TEST(NewIntrusiveListTest, StaticInsertErase) {
TestList l;
TestItem e[2];
TestList::iterator i = l.begin();
TestList::insert(i, &e[0]);
TestList::insert(&e[0], &e[1]);
TestList::erase(&e[0]);
TestList::erase(TestList::iterator(&e[1]));
EXPECT_TRUE(l.empty());
}
TEST_F(IntrusiveListTest, Splice) {
QuicheIntrusiveList<TestItem, ListId2> secondary_list;
for (int i = 0; i < 3; ++i) {
secondary_list.push_back(&e[i]);
}
for (int l1_count = 0; l1_count < 3; ++l1_count) {
for (int l2_count = 0; l2_count < 3; ++l2_count) {
for (int pos = 0; pos <= l1_count; ++pos) {
for (int first = 0; first <= l2_count; ++first) {
for (int last = first; last <= l2_count; ++last) {
PrepareLists(l1_count, l2_count);
l1.splice(std::next(l1.begin(), pos), std::next(l2.begin(), first),
std::next(l2.begin(), last));
ll1.splice(std::next(ll1.begin(), pos), ll2,
std::next(ll2.begin(), first),
std::next(ll2.begin(), last));
CheckLists();
ASSERT_EQ(3u, secondary_list.size());
for (int i = 0; i < 3; ++i) {
EXPECT_EQ(&e[i], &*std::next(secondary_list.begin(), i));
}
}
}
}
}
}
}
struct BaseLinkId {};
struct DerivedLinkId {};
struct AbstractBase : public QuicheIntrusiveLink<AbstractBase, BaseLinkId> {
virtual ~AbstractBase() = 0;
virtual std::string name() { return "AbstractBase"; }
};
AbstractBase::~AbstractBase() {}
struct DerivedClass : public QuicheIntrusiveLink<DerivedClass, DerivedLinkId>,
public AbstractBase {
~DerivedClass() override {}
std::string name() override { return "DerivedClass"; }
};
struct VirtuallyDerivedBaseClass : public virtual AbstractBase {
~VirtuallyDerivedBaseClass() override = 0;
std::string name() override { return "VirtuallyDerivedBaseClass"; }
};
VirtuallyDerivedBaseClass::~VirtuallyDerivedBaseClass() {}
struct VirtuallyDerivedClassA
: public QuicheIntrusiveLink<VirtuallyDerivedClassA, DerivedLinkId>,
public virtual VirtuallyDerivedBaseClass {
~VirtuallyDerivedClassA() override {}
std::string name() override { return "VirtuallyDerivedClassA"; }
};
struct NonceClass {
virtual ~NonceClass() {}
int data_;
};
struct VirtuallyDerivedClassB
: public QuicheIntrusiveLink<VirtuallyDerivedClassB, DerivedLinkId>,
public virtual NonceClass,
public virtual VirtuallyDerivedBaseClass {
~VirtuallyDerivedClassB() override {}
std::string name() override { return "VirtuallyDerivedClassB"; }
};
struct VirtuallyDerivedClassC
: public QuicheIntrusiveLink<VirtuallyDerivedClassC, DerivedLinkId>,
public virtual AbstractBase,
public virtual NonceClass,
public virtual VirtuallyDerivedBaseClass {
~VirtuallyDerivedClassC() override {}
std::string name() override { return "VirtuallyDerivedClassC"; }
};
namespace templated_base_link {
template <typename T>
struct AbstractBase : public QuicheIntrusiveLink<T> {
virtual ~AbstractBase() = 0;
};
template <typename T>
AbstractBase<T>::~AbstractBase() {}
struct DerivedClass : public AbstractBase<DerivedClass> {
int n;
};
}
TEST(NewIntrusiveListTest, HandleInheritanceHierarchies) {
{
QuicheIntrusiveList<DerivedClass, DerivedLinkId> list;
DerivedClass elements[2];
EXPECT_TRUE(list.empty());
list.push_back(&elements[0]);
EXPECT_EQ(1u, list.size());
list.push_back(&elements[1]);
EXPECT_EQ(2u, list.size());
list.pop_back();
EXPECT_EQ(1u, list.size());
list.pop_back();
EXPECT_TRUE(list.empty());
}
{
QuicheIntrusiveList<VirtuallyDerivedClassA, DerivedLinkId> list;
VirtuallyDerivedClassA elements[2];
EXPECT_TRUE(list.empty());
list.push_back(&elements[0]);
EXPECT_EQ(1u, list.size());
list.push_back(&elements[1]);
EXPECT_EQ(2u, list.size());
list.pop_back();
EXPECT_EQ(1u, list.size());
list.pop_back();
EXPECT_TRUE(list.empty());
}
{
QuicheIntrusiveList<VirtuallyDerivedClassC, DerivedLinkId> list;
VirtuallyDerivedClassC elements[2];
EXPECT_TRUE(list.empty());
list.push_back(&elements[0]);
EXPECT_EQ(1u, list.size());
list.push_back(&elements[1]);
EXPECT_EQ(2u, list.size());
list.pop_back();
EXPECT_EQ(1u, list.size());
list.pop_back();
EXPECT_TRUE(list.empty());
}
{
QuicheIntrusiveList<AbstractBase, BaseLinkId> list;
DerivedClass d1;
VirtuallyDerivedClassA d2;
VirtuallyDerivedClassB d3;
VirtuallyDerivedClassC d4;
EXPECT_TRUE(list.empty());
list.push_back(&d1);
EXPECT_EQ(1u, list.size());
list.push_back(&d2);
EXPECT_EQ(2u, list.size());
list.push_back(&d3);
EXPECT_EQ(3u, list.size());
list.push_back(&d4);
EXPECT_EQ(4u, list.size());
QuicheIntrusiveList<AbstractBase, BaseLinkId>::iterator it = list.begin();
EXPECT_EQ("DerivedClass", (it++)->name());
EXPECT_EQ("VirtuallyDerivedClassA", (it++)->name());
EXPECT_EQ("VirtuallyDerivedClassB", (it++)->name());
EXPECT_EQ("VirtuallyDerivedClassC", (it++)->name());
}
{
QuicheIntrusiveList<templated_base_link::DerivedClass> list;
templated_base_link::DerivedClass elements[2];
EXPECT_TRUE(list.empty());
list.push_back(&elements[0]);
EXPECT_EQ(1u, list.size());
list.push_back(&elements[1]);
EXPECT_EQ(2u, list.size());
list.pop_back();
EXPECT_EQ(1u, list.size());
list.pop_back();
EXPECT_TRUE(list.empty());
}
}
class IntrusiveListTagTypeTest : public quiche::test::QuicheTest {
protected:
struct Tag {};
class Element : public QuicheIntrusiveLink<Element, Tag> {};
};
TEST_F(IntrusiveListTagTypeTest, TagTypeListID) {
QuicheIntrusiveList<Element, Tag> list;
{
Element e;
list.push_back(&e);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_intrusive_list.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_intrusive_list_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
12d35529-f1f7-471b-bef8-c74ef2f32ec8 | cpp | google/quiche | quic_config | quiche/quic/core/quic_config.cc | quiche/quic/core/quic_config_test.cc | #include "quiche/quic/core/quic_config.h"
#include <algorithm>
#include <cstring>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/crypto_handshake_message.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_socket_address_coder.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
namespace quic {
QuicErrorCode ReadUint32(const CryptoHandshakeMessage& msg, QuicTag tag,
QuicConfigPresence presence, uint32_t default_value,
uint32_t* out, std::string* error_details) {
QUICHE_DCHECK(error_details != nullptr);
QuicErrorCode error = msg.GetUint32(tag, out);
switch (error) {
case QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND:
if (presence == PRESENCE_REQUIRED) {
*error_details = "Missing " + QuicTagToString(tag);
break;
}
error = QUIC_NO_ERROR;
*out = default_value;
break;
case QUIC_NO_ERROR:
break;
default:
*error_details = "Bad " + QuicTagToString(tag);
break;
}
return error;
}
QuicConfigValue::QuicConfigValue(QuicTag tag, QuicConfigPresence presence)
: tag_(tag), presence_(presence) {}
QuicConfigValue::~QuicConfigValue() {}
QuicFixedUint32::QuicFixedUint32(QuicTag tag, QuicConfigPresence presence)
: QuicConfigValue(tag, presence),
has_send_value_(false),
has_receive_value_(false) {}
QuicFixedUint32::~QuicFixedUint32() {}
bool QuicFixedUint32::HasSendValue() const { return has_send_value_; }
uint32_t QuicFixedUint32::GetSendValue() const {
QUIC_BUG_IF(quic_bug_12743_1, !has_send_value_)
<< "No send value to get for tag:" << QuicTagToString(tag_);
return send_value_;
}
void QuicFixedUint32::SetSendValue(uint32_t value) {
has_send_value_ = true;
send_value_ = value;
}
bool QuicFixedUint32::HasReceivedValue() const { return has_receive_value_; }
uint32_t QuicFixedUint32::GetReceivedValue() const {
QUIC_BUG_IF(quic_bug_12743_2, !has_receive_value_)
<< "No receive value to get for tag:" << QuicTagToString(tag_);
return receive_value_;
}
void QuicFixedUint32::SetReceivedValue(uint32_t value) {
has_receive_value_ = true;
receive_value_ = value;
}
void QuicFixedUint32::ToHandshakeMessage(CryptoHandshakeMessage* out) const {
if (tag_ == 0) {
QUIC_BUG(quic_bug_12743_3)
<< "This parameter does not support writing to CryptoHandshakeMessage";
return;
}
if (has_send_value_) {
out->SetValue(tag_, send_value_);
}
}
QuicErrorCode QuicFixedUint32::ProcessPeerHello(
const CryptoHandshakeMessage& peer_hello, HelloType ,
std::string* error_details) {
QUICHE_DCHECK(error_details != nullptr);
if (tag_ == 0) {
*error_details =
"This parameter does not support reading from CryptoHandshakeMessage";
QUIC_BUG(quic_bug_10575_1) << *error_details;
return QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND;
}
QuicErrorCode error = peer_hello.GetUint32(tag_, &receive_value_);
switch (error) {
case QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND:
if (presence_ == PRESENCE_OPTIONAL) {
return QUIC_NO_ERROR;
}
*error_details = "Missing " + QuicTagToString(tag_);
break;
case QUIC_NO_ERROR:
has_receive_value_ = true;
break;
default:
*error_details = "Bad " + QuicTagToString(tag_);
break;
}
return error;
}
QuicFixedUint62::QuicFixedUint62(QuicTag name, QuicConfigPresence presence)
: QuicConfigValue(name, presence),
has_send_value_(false),
has_receive_value_(false) {}
QuicFixedUint62::~QuicFixedUint62() {}
bool QuicFixedUint62::HasSendValue() const { return has_send_value_; }
uint64_t QuicFixedUint62::GetSendValue() const {
if (!has_send_value_) {
QUIC_BUG(quic_bug_10575_2)
<< "No send value to get for tag:" << QuicTagToString(tag_);
return 0;
}
return send_value_;
}
void QuicFixedUint62::SetSendValue(uint64_t value) {
if (value > quiche::kVarInt62MaxValue) {
QUIC_BUG(quic_bug_10575_3) << "QuicFixedUint62 invalid value " << value;
value = quiche::kVarInt62MaxValue;
}
has_send_value_ = true;
send_value_ = value;
}
bool QuicFixedUint62::HasReceivedValue() const { return has_receive_value_; }
uint64_t QuicFixedUint62::GetReceivedValue() const {
if (!has_receive_value_) {
QUIC_BUG(quic_bug_10575_4)
<< "No receive value to get for tag:" << QuicTagToString(tag_);
return 0;
}
return receive_value_;
}
void QuicFixedUint62::SetReceivedValue(uint64_t value) {
has_receive_value_ = true;
receive_value_ = value;
}
void QuicFixedUint62::ToHandshakeMessage(CryptoHandshakeMessage* out) const {
if (!has_send_value_) {
return;
}
uint32_t send_value32;
if (send_value_ > std::numeric_limits<uint32_t>::max()) {
QUIC_BUG(quic_bug_10575_5) << "Attempting to send " << send_value_
<< " for tag:" << QuicTagToString(tag_);
send_value32 = std::numeric_limits<uint32_t>::max();
} else {
send_value32 = static_cast<uint32_t>(send_value_);
}
out->SetValue(tag_, send_value32);
}
QuicErrorCode QuicFixedUint62::ProcessPeerHello(
const CryptoHandshakeMessage& peer_hello, HelloType ,
std::string* error_details) {
QUICHE_DCHECK(error_details != nullptr);
uint32_t receive_value32;
QuicErrorCode error = peer_hello.GetUint32(tag_, &receive_value32);
receive_value_ = receive_value32;
switch (error) {
case QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND:
if (presence_ == PRESENCE_OPTIONAL) {
return QUIC_NO_ERROR;
}
*error_details = "Missing " + QuicTagToString(tag_);
break;
case QUIC_NO_ERROR:
has_receive_value_ = true;
break;
default:
*error_details = "Bad " + QuicTagToString(tag_);
break;
}
return error;
}
QuicFixedStatelessResetToken::QuicFixedStatelessResetToken(
QuicTag tag, QuicConfigPresence presence)
: QuicConfigValue(tag, presence),
has_send_value_(false),
has_receive_value_(false) {}
QuicFixedStatelessResetToken::~QuicFixedStatelessResetToken() {}
bool QuicFixedStatelessResetToken::HasSendValue() const {
return has_send_value_;
}
const StatelessResetToken& QuicFixedStatelessResetToken::GetSendValue() const {
QUIC_BUG_IF(quic_bug_12743_4, !has_send_value_)
<< "No send value to get for tag:" << QuicTagToString(tag_);
return send_value_;
}
void QuicFixedStatelessResetToken::SetSendValue(
const StatelessResetToken& value) {
has_send_value_ = true;
send_value_ = value;
}
bool QuicFixedStatelessResetToken::HasReceivedValue() const {
return has_receive_value_;
}
const StatelessResetToken& QuicFixedStatelessResetToken::GetReceivedValue()
const {
QUIC_BUG_IF(quic_bug_12743_5, !has_receive_value_)
<< "No receive value to get for tag:" << QuicTagToString(tag_);
return receive_value_;
}
void QuicFixedStatelessResetToken::SetReceivedValue(
const StatelessResetToken& value) {
has_receive_value_ = true;
receive_value_ = value;
}
void QuicFixedStatelessResetToken::ToHandshakeMessage(
CryptoHandshakeMessage* out) const {
if (has_send_value_) {
out->SetValue(tag_, send_value_);
}
}
QuicErrorCode QuicFixedStatelessResetToken::ProcessPeerHello(
const CryptoHandshakeMessage& peer_hello, HelloType ,
std::string* error_details) {
QUICHE_DCHECK(error_details != nullptr);
QuicErrorCode error =
peer_hello.GetStatelessResetToken(tag_, &receive_value_);
switch (error) {
case QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND:
if (presence_ == PRESENCE_OPTIONAL) {
return QUIC_NO_ERROR;
}
*error_details = "Missing " + QuicTagToString(tag_);
break;
case QUIC_NO_ERROR:
has_receive_value_ = true;
break;
default:
*error_details = "Bad " + QuicTagToString(tag_);
break;
}
return error;
}
QuicFixedTagVector::QuicFixedTagVector(QuicTag name,
QuicConfigPresence presence)
: QuicConfigValue(name, presence),
has_send_values_(false),
has_receive_values_(false) {}
QuicFixedTagVector::QuicFixedTagVector(const QuicFixedTagVector& other) =
default;
QuicFixedTagVector::~QuicFixedTagVector() {}
bool QuicFixedTagVector::HasSendValues() const { return has_send_values_; }
const QuicTagVector& QuicFixedTagVector::GetSendValues() const {
QUIC_BUG_IF(quic_bug_12743_6, !has_send_values_)
<< "No send values to get for tag:" << QuicTagToString(tag_);
return send_values_;
}
void QuicFixedTagVector::SetSendValues(const QuicTagVector& values) {
has_send_values_ = true;
send_values_ = values;
}
bool QuicFixedTagVector::HasReceivedValues() const {
return has_receive_values_;
}
const QuicTagVector& QuicFixedTagVector::GetReceivedValues() const {
QUIC_BUG_IF(quic_bug_12743_7, !has_receive_values_)
<< "No receive value to get for tag:" << QuicTagToString(tag_);
return receive_values_;
}
void QuicFixedTagVector::SetReceivedValues(const QuicTagVector& values) {
has_receive_values_ = true;
receive_values_ = values;
}
void QuicFixedTagVector::ToHandshakeMessage(CryptoHandshakeMessage* out) const {
if (has_send_values_) {
out->SetVector(tag_, send_values_);
}
}
QuicErrorCode QuicFixedTagVector::ProcessPeerHello(
const CryptoHandshakeMessage& peer_hello, HelloType ,
std::string* error_details) {
QUICHE_DCHECK(error_details != nullptr);
QuicTagVector values;
QuicErrorCode error = peer_hello.GetTaglist(tag_, &values);
switch (error) {
case QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND:
if (presence_ == PRESENCE_OPTIONAL) {
return QUIC_NO_ERROR;
}
*error_details = "Missing " + QuicTagToString(tag_);
break;
case QUIC_NO_ERROR:
QUIC_DVLOG(1) << "Received Connection Option tags from receiver.";
has_receive_values_ = true;
receive_values_.insert(receive_values_.end(), values.begin(),
values.end());
break;
default:
*error_details = "Bad " + QuicTagToString(tag_);
break;
}
return error;
}
QuicFixedSocketAddress::QuicFixedSocketAddress(QuicTag tag,
QuicConfigPresence presence)
: QuicConfigValue(tag, presence),
has_send_value_(false),
has_receive_value_(false) {}
QuicFixedSocketAddress::~QuicFixedSocketAddress() {}
bool QuicFixedSocketAddress::HasSendValue() const { return has_send_value_; }
const QuicSocketAddress& QuicFixedSocketAddress::GetSendValue() const {
QUIC_BUG_IF(quic_bug_12743_8, !has_send_value_)
<< "No send value to get for tag:" << QuicTagToString(tag_);
return send_value_;
}
void QuicFixedSocketAddress::SetSendValue(const QuicSocketAddress& value) {
has_send_value_ = true;
send_value_ = value;
}
void QuicFixedSocketAddress::ClearSendValue() {
has_send_value_ = false;
send_value_ = QuicSocketAddress();
}
bool QuicFixedSocketAddress::HasReceivedValue() const {
return has_receive_value_;
}
const QuicSocketAddress& QuicFixedSocketAddress::GetReceivedValue() const {
QUIC_BUG_IF(quic_bug_12743_9, !has_receive_value_)
<< "No receive value to get for tag:" << QuicTagToString(tag_);
return receive_value_;
}
void QuicFixedSocketAddress::SetReceivedValue(const QuicSocketAddress& value) {
has_receive_value_ = true;
receive_value_ = value;
}
void QuicFixedSocketAddress::ToHandshakeMessage(
CryptoHandshakeMessage* out) const {
if (has_send_value_) {
QuicSocketAddressCoder address_coder(send_value_);
out->SetStringPiece(tag_, address_coder.Encode());
}
}
QuicErrorCode QuicFixedSocketAddress::ProcessPeerHello(
const CryptoHandshakeMessage& peer_hello, HelloType ,
std::string* error_details) {
absl::string_view address;
if (!peer_hello.GetStringPiece(tag_, &address)) {
if (presence_ == PRESENCE_REQUIRED) {
*error_details = "Missing " + QuicTagToString(tag_);
return QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND;
}
} else {
QuicSocketAddressCoder address_coder;
if (address_coder.Decode(address.data(), address.length())) {
SetReceivedValue(
QuicSocketAddress(address_coder.ip(), address_coder.port()));
}
}
return QUIC_NO_ERROR;
}
QuicConfig::QuicConfig()
: negotiated_(false),
max_time_before_crypto_handshake_(QuicTime::Delta::Zero()),
max_idle_time_before_crypto_handshake_(QuicTime::Delta::Zero()),
max_undecryptable_packets_(0),
connection_options_(kCOPT, PRESENCE_OPTIONAL),
client_connection_options_(kCLOP, PRESENCE_OPTIONAL),
max_idle_timeout_to_send_(QuicTime::Delta::Infinite()),
max_bidirectional_streams_(kMIBS, PRESENCE_REQUIRED),
max_unidirectional_streams_(kMIUS, PRESENCE_OPTIONAL),
bytes_for_connection_id_(kTCID, PRESENCE_OPTIONAL),
initial_round_trip_time_us_(kIRTT, PRESENCE_OPTIONAL),
initial_max_stream_data_bytes_incoming_bidirectional_(0,
PRESENCE_OPTIONAL),
initial_max_stream_data_bytes_outgoing_bidirectional_(0,
PRESENCE_OPTIONAL),
initial_max_stream_data_bytes_unidirectional_(0, PRESENCE_OPTIONAL),
initial_stream_flow_control_window_bytes_(kSFCW, PRESENCE_OPTIONAL),
initial_session_flow_control_window_bytes_(kCFCW, PRESENCE_OPTIONAL),
connection_migration_disabled_(kNCMR, PRESENCE_OPTIONAL),
alternate_server_address_ipv6_(kASAD, PRESENCE_OPTIONAL),
alternate_server_address_ipv4_(kASAD, PRESENCE_OPTIONAL),
stateless_reset_token_(kSRST, PRESENCE_OPTIONAL),
max_ack_delay_ms_(kMAD, PRESENCE_OPTIONAL),
min_ack_delay_ms_(0, PRESENCE_OPTIONAL),
ack_delay_exponent_(kADE, PRESENCE_OPTIONAL),
max_udp_payload_size_(0, PRESENCE_OPTIONAL),
max_datagram_frame_size_(0, PRESENCE_OPTIONAL),
active_connection_id_limit_(0, PRESENCE_OPTIONAL) {
SetDefaults();
}
QuicConfig::QuicConfig(const QuicConfig& other) = default;
QuicConfig::~QuicConfig() {}
bool QuicConfig::SetInitialReceivedConnectionOptions(
const QuicTagVector& tags) {
if (HasReceivedConnectionOptions()) {
return false;
}
connection_options_.SetReceivedValues(tags);
return true;
}
void QuicConfig::SetConnectionOptionsToSend(
const QuicTagVector& connection_options) {
connection_options_.SetSendValues(connection_options);
}
void QuicConfig::AddConnectionOptionsToSend(
const QuicTagVector& connection_options) {
if (!connection_options_.HasSendValues()) {
SetConnectionOptionsToSend(connection_options);
return;
}
const QuicTagVector& existing_connection_options = SendConnectionOptions();
QuicTagVector connection_options_to_send;
connection_options_to_send.reserve(existing_connection_options.size() +
connection_options.size());
connection_options_to_send.assign(existing_connection_options.begin(),
existing_connection_options.end());
connection_options_to_send.insert(connection_options_to_send.end(),
connection_options.begin(),
connection_options.end());
SetConnectionOptionsToSend(connection_options_to_send);
}
void QuicConfig::SetGoogleHandshakeMessageToSend(std::string message) {
google_handshake_message_to_send_ = std::move(message);
}
const std::optional<std::string>&
QuicConfig::GetReceivedGoogleHandshakeMessage() const {
return received_google_handshake_message_;
}
bool QuicConfig::HasReceivedConnectionOptions() const {
return connection_options_.HasReceivedValues();
}
const QuicTagVector& QuicConfig::ReceivedConnectionOptions() const {
return connection_options_.GetReceivedValues();
}
bool QuicConfig::HasSendConnectionOptions() const {
return connection_options_.HasSendValues();
}
const QuicTagVector& QuicConfig::SendConnectionOptions() const {
return connection_options_.GetSendValues();
}
bool QuicConfig::HasClientSentConnectionOption(QuicTag tag,
Perspective perspective) const {
if (perspective == Perspective::IS_SERVER) {
if (HasReceivedConnectionOptions() &&
ContainsQuicTag(ReceivedConnectionOptions(), tag)) {
return true;
}
} else if (HasSendConnectionOptions() &&
ContainsQuicTag(SendConnectionOptions(), tag)) {
return true;
}
return false;
}
void QuicConfig::SetClientConnectionOptions(
const QuicTagVector& client_connection_options) {
client_connection_options_.SetSendValues(client_connection_options);
}
bool QuicConfig::HasClientRequestedIndependentOption(
QuicTag tag, Perspective perspective) const {
if (perspective == Perspective::IS_SERVER) {
return (HasReceivedConnectionOptions() &&
ContainsQuicTag(ReceivedConnectionOptions(), tag));
}
return (client_connection_options_.HasSendValues() &&
ContainsQuicTag(client_connection_options_.GetSendValues(), tag));
}
const QuicTagVector& QuicConfig::ClientRequestedIndependentOptions(
Perspective perspective) const {
static const QuicTagVector* no_options = new QuicTagVector;
if (perspective == Perspective::IS_SERVER) {
return HasReceivedConnectionOptions() ? ReceivedConnectionOptions()
: *no_options;
}
return client_connection_options_.HasSendValues()
? client_connection_options_.GetSendValues()
: *no_options;
}
void QuicConfig::SetIdleNetworkTimeout(QuicTime::Delta idle_network_timeout) {
if (idle_network_timeout.ToMicroseconds() <= 0) {
QUIC_BUG(quic_bug_10575_6)
<< "Invalid idle network timeout " << idle_network_timeout;
return;
}
max_idle_timeout_to_send_ = idle_network_timeout;
}
QuicTime::Delta QuicConfig::IdleNetworkTimeout() const {
if (!received_max_idle_timeout_.has_value()) {
return max_idle_timeout_to_send_;
}
return *received_max_idle_timeout_;
}
void QuicConfig::SetMaxBidirectionalStreamsToSend(uint32_t max_streams) {
max_bidirectional_streams_.SetSendValue(max_streams);
}
uint32_t QuicConfig::GetMaxBidirectionalStreamsToSend() const {
return max_bidirectional_streams_.GetSendValue();
}
bool QuicConfig::HasReceivedMaxBidirectionalStreams() const {
return max_bidirectional_streams_.HasReceivedValue();
}
uint32_t QuicConfig::ReceivedMaxBidirectionalStreams() const {
return max_bidirectional_streams_.GetReceivedValue();
}
void QuicConfig::SetMaxUnidirectionalStreamsToSend(uint32_t max_streams) {
max_unidirectional_streams_.SetSendValue(max_streams);
}
uint32_t QuicConfig::GetMaxUnidirectionalStreamsToSend() const {
return max_unidirectional_streams_.GetSendValue();
}
bool QuicConfig::HasReceivedMaxUnidirectionalStreams() const {
return max_unidirectional_streams_.HasReceivedValue();
}
uint32_t QuicConfig::ReceivedMaxUnidirectionalStreams() const {
return max_unidirectional_streams_.GetReceivedValue();
}
void QuicConfig::SetMaxAckDelayToSendMs(uint32_t max_ack_delay_ms) {
max_ack_delay_ms_.SetSendValue(max_ack_delay_ms);
}
uint32_t QuicConfig::GetMaxAckDelayToSendMs() const {
return max_ack_delay_ms_.GetSendValue();
}
bool QuicConfig::HasReceivedMaxAckDelayMs() const {
return max_ack_delay_ms_.HasReceivedValue();
}
uint32_t QuicConfig::ReceivedMaxAckDelayMs() const {
return max_ack_delay_ms_.GetReceivedValue();
}
void QuicConfig::SetMinAckDelayMs(uint32_t min_ack_delay_ms) {
min_ack_delay_ms_.SetSendValue(min_ack_delay_ms);
}
uint32_t QuicConfig::GetMinAckDelayToSendMs() const {
return min_ack_delay_ms_.GetSendValue();
}
bool QuicConfig::HasReceivedMinAckDelayMs() const {
return min_ack_delay_ms_.HasReceivedValue();
}
uint32_t QuicConfig::ReceivedMinAckDelayMs() const {
return min_ack_delay_ms_.GetReceivedValue();
}
void QuicConfig::SetAckDelayExponentToSend(uint32_t exponent) {
ack_delay_exponent_.SetSendValue(exponent);
}
uint32_t QuicConfig::GetAckDelayExponentToSend() const {
return ack_delay_exponent_.GetSendValue();
}
bool QuicConfig::HasReceivedAckDelayExponent() const {
return ack_delay_exponent_.HasReceivedValue();
}
uint32_t QuicConfig::ReceivedAckDelayExponent() const {
return ack_delay_exponent_.GetReceivedValue();
}
void QuicConfig::SetMaxPacketSizeToSend(uint64_t max_udp_payload_size) {
max_udp_payload_size_.SetSendValue(max_udp_payload_size);
}
uint64_t QuicConfig::GetMaxPacketSizeToSend() const {
return max_udp_payload_size_.GetSendValue();
}
bool QuicConfig::HasReceivedMaxPacketSize() const {
return max_udp_payload_size_.HasReceivedValue();
}
uint64_t QuicConfig::ReceivedMaxPacketSize() const {
return max_udp_payload_size_.GetReceivedValue();
}
void QuicConfig::SetMaxDatagramFrameSizeToSend(
uint64_t max_datagram_frame_size) {
max_datagram_frame_size_.SetSendValue(max_datagram_frame_size);
}
uint64_t QuicConfig::GetMaxDatagramFrameSizeToSend() const {
return max_datagram_frame_size_.GetSendValue();
}
bool QuicConfig::HasReceivedMaxDatagramFrameSize() const {
return max_datagram_frame_size_.HasReceivedValue();
}
uint64_t QuicConfig::ReceivedMaxDatagramFrameSize() const {
return max_datagram_frame_size_.GetReceivedValue();
}
void QuicConfig::SetActiveConnectionIdLimitToSend(
uint64_t active_connection_id_limit) {
active_connection_id_limit_.SetSendValue(active_connection_id_limit);
}
uint64_t QuicConfig::GetActiveConnectionIdLimitToSend() const {
return active_connection_id_limit_.GetSendValue();
}
bool QuicConfig::HasReceivedActiveConnectionIdLimit() const {
return active_connection_id_limit_.HasReceivedValue();
}
uint64_t QuicConfig::ReceivedActiveConnectionIdLimit() const {
return active_connection_id_limit_.GetReceivedValue();
}
bool QuicConfig::HasSetBytesForConnectionIdToSend() const {
return bytes_for_connection_id_.HasSendValue();
}
void QuicConfig::SetBytesForConnectionIdToSend(uint32_t bytes) {
bytes_for_connection_id_.SetSendValue(bytes);
}
bool QuicConfig::HasReceivedBytesForConnectionId() const {
return bytes_for_connection_id_.HasReceivedValue();
}
uint32_t QuicConfig::ReceivedBytesForConnectionId() const {
return bytes_for_connection_id_.GetReceivedValue();
}
void QuicConfig::SetInitialRoundTripTimeUsToSend(uint64_t rtt) {
initial_round_trip_time_us_.SetSendValue(rtt);
}
bool QuicConfig::HasReceivedInitialRoundTripTimeUs() const {
return initial_round_trip_time_us_.HasReceivedValue();
}
uint64_t QuicConfig::ReceivedInitialRoundTripTimeUs() const {
return initial_round_trip_time_us_.GetReceivedValue();
}
bool QuicConfig::HasInitialRoundTripTimeUsToSend() const {
return initial_round_trip_time_us_.HasSendValue();
}
uint64_t QuicConfig::GetInitialRoundTripTimeUsToSend() const {
return initial_round_trip_time_us_.GetSendValue();
}
void QuicConfig::SetInitialStreamFlowControlWindowToSend(
uint64_t window_bytes) {
if (window_bytes < kMinimumFlowControlSendWindow) {
QUIC_BUG(quic_bug_10575_7)
<< "Initial stream flow control receive window (" << window_bytes
<< ") cannot be set lower than minimum ("
<< kMinimumFlowControlSendWindow << ").";
window_bytes = kMinimumFlowControlSendWindow;
}
initial_stream_flow_control_window_bytes_.SetSendValue(window_bytes);
}
uint64_t QuicConfig::GetInitialStreamFlowControlWindowToSend() const {
return initial_stream_flow_control_window_bytes_.GetSendValue();
}
bool QuicConfig::HasReceivedInitialStreamFlowControlWindowBytes() const {
return initial_stream_flow_control_window_bytes_.HasReceivedValue();
}
uint64_t QuicConfig::ReceivedInitialStreamFlowControlWindowBytes() const {
return initial_stream_flow_control_window_bytes_.GetReceivedValue();
}
void QuicConfig::SetInitialMaxStreamDataBytesIncomingBidirectionalToSend(
uint64_t window_bytes) {
initial_max_stream_data_bytes_incoming_bidirectional_.SetSendValue(
window_bytes);
}
uint64_t QuicConfig::GetInitialMaxStreamDataBytesIncomingBidirectionalToSend()
const {
if (initial_max_stream_data_bytes_incoming_bidirectional_.HasSendValue()) {
return initial_max_stream_data_bytes_incoming_bidirectional_.GetSendValue();
}
return initial_stream_flow_control_window_bytes_.GetSendValue();
}
bool QuicConfig::HasReceivedInitialMaxStreamDataBytesIncomingBidirectional()
const {
return initial_max_stream_data_bytes_incoming_bidirectional_
.HasReceivedValue();
}
uint64_t QuicConfig::ReceivedInitialMaxStreamDataBytesIncomingBidirectional()
const {
return initial_max_stream_data_bytes_incoming_bidirectional_
.GetReceivedValue();
}
void QuicConfig::SetInitialMaxStreamDataBytesOutgoingBidirectionalToSend(
uint64_t window_bytes) {
initial_max_stream_data_bytes_outgoing_bidirectional_.SetSendValue(
window_bytes);
}
uint64_t QuicConfig::GetInitialMaxStreamDataBytesOutgoingBidirectionalToSend()
const {
if (initial_max_stream_data_bytes_outgoing_bidirectional_.HasSendValue()) {
return initial_max_stream_data_bytes_outgoing_bidirectional_.GetSendValue();
}
return initial_stream_flow_control_window_bytes_.GetSendValue();
}
bool QuicConfig::HasReceivedInitialMaxStreamDataBytesOutgoingBidirectional()
const {
return initial_max_stream_data_bytes_outgoing_bidirectional_
.HasReceivedValue();
}
uint64_t QuicConfig::ReceivedInitialMaxStreamDataBytesOutgoingBidirectional()
const {
return initial_max_stream_data_bytes_outgoing_bidirectional_
.GetReceivedValue();
}
void QuicConfig::SetInitialMaxStreamDataBytesUnidirectionalToSend(
uint64_t window_bytes) {
initial_max_stream_data_bytes_unidirectional_.SetSendValue(window_bytes);
}
uint64_t QuicConfig::GetInitialMaxStreamDataBytesUnidirectionalToSend() const {
if (initial_max_stream_data_bytes_unidirectional_.HasSendValue()) {
return initial_max_stream_data_bytes_unidirectional_.GetSendValue();
}
return initial_stream_flow_control_window_bytes_.GetSendValue();
}
bool QuicConfig::HasReceivedInitialMaxStreamDataBytesUnidirectional() const {
return initial_max_stream_data_bytes_unidirectional_.HasReceivedValue();
}
uint64_t QuicConfig::ReceivedInitialMaxStreamDataBytesUnidirectional() const {
return initial_max_stream_data_bytes_unidirectional_.GetReceivedValue();
}
void QuicConfig::SetInitialSessionFlowControlWindowToSend(
uint64_t window_bytes) {
if (window_bytes < kMinimumFlowControlSendWindow) {
QUIC_BUG(quic_bug_10575_8)
<< "Initial session flow control receive window (" << window_bytes
<< ") cannot be set lower than default ("
<< kMinimumFlowControlSendWindow << ").";
window_bytes = kMinimumFlowControlSendWindow;
}
initial_session_flow_control_window_bytes_.SetSendValue(window_bytes);
}
uint64_t QuicConfig::GetInitialSessionFlowControlWindowToSend() const {
return initial_session_flow_control_window_bytes_.GetSendValue();
}
bool QuicConfig::HasReceivedInitialSessionFlowControlWindowBytes() const {
return initial_session_flow_control_window_bytes_.HasReceivedValue();
}
uint64_t QuicConfig::ReceivedInitialSessionFlowControlWindowBytes() const {
return initial_session_flow_control_window_bytes_.GetReceivedValue();
}
void QuicConfig::SetDisableConnectionMigration() {
connection_migration_disabled_.SetSendValue(1);
}
bool QuicConfig::DisableConnectionMigration() const {
return connection_migration_disabled_.HasReceivedValue();
}
void QuicConfig::SetIPv6AlternateServerAddressToSend(
const QuicSocketAddress& alternate_server_address_ipv6) {
if (!alternate_server_address_ipv6.Normalized().host().IsIPv6()) {
QUIC_BUG(quic_bug_10575_9)
<< "Cannot use SetIPv6AlternateServerAddressToSend with "
<< alternate_server_address_ipv6;
return;
}
alternate_server_address_ipv6_.SetSendValue(alternate_server_address_ipv6);
}
bool QuicConfig::HasReceivedIPv6AlternateServerAddress() const {
return alternate_server_address_ipv6_.HasReceivedValue();
}
const QuicSocketAddress& QuicConfig::ReceivedIPv6AlternateServerAddress()
const {
return alternate_server_address_ipv6_.GetReceivedValue();
}
void QuicConfig::SetIPv4AlternateServerAddressToSend(
const QuicSocketAddress& alternate_server_address_ipv4) {
if (!alternate_server_address_ipv4.host().IsIPv4()) {
QUIC_BUG(quic_bug_10575_11)
<< "Cannot use SetIPv4AlternateServerAddressToSend with "
<< alternate_server_address_ipv4;
return;
}
alternate_server_address_ipv4_.SetSendValue(alternate_server_address_ipv4);
}
bool QuicConfig::HasReceivedIPv4AlternateServerAddress() const {
return alternate_server_address_ipv4_.HasReceivedValue();
}
const QuicSocketAddress& QuicConfig::ReceivedIPv4AlternateServerAddress()
const {
return alternate_server_address_ipv4_.GetReceivedValue();
}
void QuicConfig::SetPreferredAddressConnectionIdAndTokenToSend(
const QuicConnectionId& connection_id,
const StatelessResetToken& stateless_reset_token) {
if ((!alternate_server_address_ipv4_.HasSendValue() &&
!alternate_server_address_ipv6_.HasSendValue()) ||
preferred_address_connection_id_and_token_.has_value()) {
QUIC_BUG(quic_bug_10575_17)
<< "Can not send connection ID and token for preferred address";
return;
}
preferred_address_connection_id_and_token_ =
std::make_pair(connection_id, stateless_reset_token);
}
bool QuicConfig::HasReceivedPreferredAddressConnectionIdAndToken() const {
return (HasReceivedIPv6AlternateServerAddress() ||
HasReceivedIPv4AlternateServerAddress()) &&
preferred_address_connection_id_and_token_.has_value();
}
const std::pair<QuicConnectionId, StatelessResetToken>&
QuicConfig::ReceivedPreferredAddressConnectionIdAndToken() const {
QUICHE_DCHECK(HasReceivedPreferredAddressConnectionIdAndToken());
return *preferred_address_connection_id_and_token_;
}
void QuicConfig::SetOriginalConnectionIdToSend(
const QuicConnectionId& original_destination_connection_id) {
original_destination_connection_id_to_send_ =
original_destination_connection_id;
}
bool QuicConfig::HasReceivedOriginalConnectionId() const {
return received_original_destination_connection_id_.has_value();
}
QuicConnectionId QuicConfig::ReceivedOriginalConnectionId() const {
if (!HasReceivedOriginalConnectionId()) {
QUIC_BUG(quic_bug_10575_13) << "No received original connection ID";
return EmptyQuicConnectionId();
}
return *received_original_destination_connection_id_;
}
void QuicConfig::SetInitialSourceConnectionIdToSend(
const QuicConnectionId& initial_source_connection_id) {
initial_source_connection_id_to_send_ = initial_source_connection_id;
}
bool QuicConfig::HasReceivedInitialSourceConnectionId() const {
return received_initial_source_connection_id_.has_value();
}
QuicConnectionId QuicConfig::ReceivedInitialSourceConnectionId() const {
if (!HasReceivedInitialSourceConnectionId()) {
QUIC_BUG(quic_bug_10575_14) << "No received initial source connection ID";
return EmptyQuicConnectionId();
}
return *received_initial_source_connection_id_;
}
void QuicConfig::SetRetrySourceConnectionIdToSend(
const QuicConnectionId& retry_source_connection_id) {
retry_source_connection_id_to_send_ = retry_source_connection_id;
}
bool QuicConfig::HasReceivedRetrySourceConnectionId() const {
return received_retry_source_connection_id_.has_value();
}
QuicConnectionId QuicConfig::ReceivedRetrySourceConnectionId() const {
if (!HasReceivedRetrySourceConnectionId()) {
QUIC_BUG(quic_bug_10575_15) << "No received retry source connection ID";
return EmptyQuicConnectionId();
}
return *received_retry_source_connection_id_;
}
void QuicConfig::SetStatelessResetTokenToSend(
const StatelessResetToken& stateless_reset_token) {
stateless_reset_token_.SetSendValue(stateless_reset_token);
}
bool QuicConfig::HasStatelessResetTokenToSend() const {
return stateless_reset_token_.HasSendValue();
}
bool QuicConfig::HasReceivedStatelessResetToken() const {
return stateless_reset_token_.HasReceivedValue();
}
const StatelessResetToken& QuicConfig::ReceivedStatelessResetToken() const {
return stateless_reset_token_.GetReceivedValue();
}
bool QuicConfig::negotiated() const { return negotiated_; }
void QuicConfig::SetCreateSessionTagIndicators(QuicTagVector tags) {
create_session_tag_indicators_ = std::move(tags);
}
const QuicTagVector& QuicConfig::create_session_tag_indicators() const {
return create_session_tag_indicators_;
}
void QuicConfig::SetDefaults() {
SetIdleNetworkTimeout(QuicTime::Delta::FromSeconds(kMaximumIdleTimeoutSecs));
SetMaxBidirectionalStreamsToSend(kDefaultMaxStreamsPerConnection);
SetMaxUnidirectionalStreamsToSend(kDefaultMaxStreamsPerConnection);
max_time_before_crypto_handshake_ =
QuicTime::Delta::FromSeconds(kMaxTimeForCryptoHandshakeSecs);
max_idle_time_before_crypto_handshake_ =
QuicTime::Delta::FromSeconds(kInitialIdleTimeoutSecs);
max_undecryptable_packets_ = kDefaultMaxUndecryptablePackets;
SetInitialStreamFlowControlWindowToSend(kMinimumFlowControlSendWindow);
SetInitialSessionFlowControlWindowToSend(kMinimumFlowControlSendWindow);
SetMaxAckDelayToSendMs(GetDefaultDelayedAckTimeMs());
SetAckDelayExponentToSend(kDefaultAckDelayExponent);
SetMaxPacketSizeToSend(kMaxIncomingPacketSize);
SetMaxDatagramFrameSizeToSend(kMaxAcceptedDatagramFrameSize);
SetReliableStreamReset(false);
}
void QuicConfig::ToHandshakeMessage(
CryptoHandshakeMessage* out, QuicTransportVersion transport_version) const {
QuicFixedUint32 max_idle_timeout_seconds(kICSL, PRESENCE_REQUIRED);
uint32_t max_idle_timeout_to_send_seconds =
max_idle_timeout_to_send_.ToSeconds();
if (received_max_idle_timeout_.has_value() &&
received_max_idle_timeout_->ToSeconds() <
max_idle_timeout_to_send_seconds) {
max_idle_timeout_to_send_seconds = received_max_idle_timeout_->ToSeconds();
}
max_idle_timeout_seconds.SetSendValue(max_idle_timeout_to_send_seconds);
max_idle_timeout_seconds.ToHandshakeMessage(out);
max_bidirectional_streams_.ToHandshakeMessage(out);
if (VersionHasIetfQuicFrames(transport_version)) {
max_unidirectional_streams_.ToHandshakeMessage(out);
ack_delay_exponent_.ToHandshakeMessage(out);
}
if (max_ack_delay_ms_.GetSendValue() != GetDefaultDelayedAckTimeMs()) {
max_ack_delay_ms_.ToHandshakeMessage(out);
}
bytes_for_connection_id_.ToHandshakeMessage(out);
initial_round_trip_time_us_.ToHandshakeMessage(out);
initial_stream_flow_control_window_bytes_.ToHandshakeMessage(out);
initial_session_flow_control_window_bytes_.ToHandshakeMessage(out);
connection_migration_disabled_.ToHandshakeMessage(out);
connection_options_.ToHandshakeMessage(out);
if (alternate_server_address_ipv6_.HasSendValue()) {
alternate_server_address_ipv6_.ToHandshakeMessage(out);
} else {
alternate_server_address_ipv4_.ToHandshakeMessage(out);
}
stateless_reset_token_.ToHandshakeMessage(out);
}
QuicErrorCode QuicConfig::ProcessPeerHello(
const CryptoHandshakeMessage& peer_hello, HelloType hello_type,
std::string* error_details) {
QUICHE_DCHECK(error_details != nullptr);
QuicErrorCode error = QUIC_NO_ERROR;
if (error == QUIC_NO_ERROR) {
QuicFixedUint32 max_idle_timeout_seconds(kICSL, PRESENCE_REQUIRED);
error = max_idle_timeout_seconds.ProcessPeerHello(peer_hello, hello_type,
error_details);
if (error == QUIC_NO_ERROR) {
if (max_idle_timeout_seconds.GetReceivedValue() >
max_idle_timeout_to_send_.ToSeconds()) {
if (hello_type == SERVER) {
error = QUIC_INVALID_NEGOTIATED_VALUE;
*error_details =
"Invalid value received for " + QuicTagToString(kICSL);
}
} else {
received_max_idle_timeout_ = QuicTime::Delta::FromSeconds(
max_idle_timeout_seconds.GetReceivedValue());
}
}
}
if (error == QUIC_NO_ERROR) {
error = max_bidirectional_streams_.ProcessPeerHello(peer_hello, hello_type,
error_details);
}
if (error == QUIC_NO_ERROR) {
error = max_unidirectional_streams_.ProcessPeerHello(peer_hello, hello_type,
error_details);
}
if (error == QUIC_NO_ERROR) {
error = bytes_for_connection_id_.ProcessPeerHello(peer_hello, hello_type,
error_details);
}
if (error == QUIC_NO_ERROR) {
error = initial_round_trip_time_us_.ProcessPeerHello(peer_hello, hello_type,
error_details);
}
if (error == QUIC_NO_ERROR) {
error = initial_stream_flow_control_window_bytes_.ProcessPeerHello(
peer_hello, hello_type, error_details);
}
if (error == QUIC_NO_ERROR) {
error = initial_session_flow_control_window_bytes_.ProcessPeerHello(
peer_hello, hello_type, error_details);
}
if (error == QUIC_NO_ERROR) {
error = connection_migration_disabled_.ProcessPeerHello(
peer_hello, hello_type, error_details);
}
if (error == QUIC_NO_ERROR) {
error = connection_options_.ProcessPeerHello(peer_hello, hello_type,
error_details);
}
if (error == QUIC_NO_ERROR) {
QuicFixedSocketAddress alternate_server_address(kASAD, PRESENCE_OPTIONAL);
error = alternate_server_address.ProcessPeerHello(peer_hello, hello_type,
error_details);
if (error == QUIC_NO_ERROR && alternate_server_address.HasReceivedValue()) {
const QuicSocketAddress& received_address =
alternate_server_address.GetReceivedValue();
if (received_address.host().IsIPv6()) {
alternate_server_address_ipv6_.SetReceivedValue(received_address);
} else if (received_address.host().IsIPv4()) {
alternate_server_address_ipv4_.SetReceivedValue(received_address);
}
}
}
if (error == QUIC_NO_ERROR) {
error = stateless_reset_token_.ProcessPeerHello(peer_hello, hello_type,
error_details);
}
if (error == QUIC_NO_ERROR) {
error = max_ack_delay_ms_.ProcessPeerHello(peer_hello, hello_type,
error_details);
}
if (error == QUIC_NO_ERROR) {
error = ack_delay_exponent_.ProcessPeerHello(peer_hello, hello_type,
error_details);
}
if (error == QUIC_NO_ERROR) {
negotiated_ = true;
}
return error;
}
bool QuicConfig::FillTransportParameters(TransportParameters* params) const {
if (original_destination_connection_id_to_send_.has_value()) {
params->original_destination_connection_id =
*original_destination_connection_id_to_send_;
}
params->max_idle_timeout_ms.set_value(
max_idle_timeout_to_send_.ToMilliseconds());
if (stateless_reset_token_.HasSendValue()) {
StatelessResetToken stateless_reset_token =
stateless_reset_token_.GetSendValue();
params->stateless_reset_token.assign(
reinterpret_cast<const char*>(&stateless_reset_token),
reinterpret_cast<const char*>(&stateless_reset_token) +
sizeof(stateless_reset_token));
}
params->max_udp_payload_size.set_value(GetMaxPacketSizeToSend());
params->max_datagram_frame_size.set_value(GetMaxDatagramFrameSizeToSend());
params->initial_max_data.set_value(
GetInitialSessionFlowControlWindowToSend());
params->initial_max_stream_data_bidi_local.set_value(
GetInitialMaxStreamDataBytesOutgoingBidirectionalToSend());
params->initial_max_stream_data_bidi_remote.set_value(
GetInitialMaxStreamDataBytesIncomingBidirectionalToSend());
params->initial_max_stream_data_uni.set_value(
GetInitialMaxStreamDataBytesUnidirectionalToSend());
params->initial_max_streams_bidi.set_value(
GetMaxBidirectionalStreamsToSend());
params->initial_max_streams_uni.set_value(
GetMaxUnidirectionalStreamsToSend());
params->max_ack_delay.set_value(GetMaxAckDelayToSendMs());
if (min_ack_delay_ms_.HasSendValue()) {
params->min_ack_delay_us.set_value(min_ack_delay_ms_.GetSendValue() *
kNumMicrosPerMilli);
}
params->ack_delay_exponent.set_value(GetAckDelayExponentToSend());
params->disable_active_migration =
connection_migration_disabled_.HasSendValue() &&
connection_migration_disabled_.GetSendValue() != 0;
if (alternate_server_address_ipv6_.HasSendValue() ||
alternate_server_address_ipv4_.HasSendValue()) {
TransportParameters::PreferredAddress preferred_address;
if (alternate_server_address_ipv6_.HasSendValue()) {
preferred_address.ipv6_socket_address =
alternate_server_address_ipv6_.GetSendValue();
}
if (alternate_server_address_ipv4_.HasSendValue()) {
preferred_address.ipv4_socket_address =
alternate_server_address_ipv4_.GetSendValue();
}
if (preferred_address_connection_id_and_token_) {
preferred_address.connection_id =
preferred_address_connection_id_and_token_->first;
auto* begin = reinterpret_cast<const char*>(
&preferred_address_connection_id_and_token_->second);
auto* end =
begin + sizeof(preferred_address_connection_id_and_token_->second);
preferred_address.stateless_reset_token.assign(begin, end);
}
params->preferred_address =
std::make_unique<TransportParameters::PreferredAddress>(
preferred_address);
}
if (active_connection_id_limit_.HasSendValue()) {
params->active_connection_id_limit.set_value(
active_connection_id_limit_.GetSendValue());
}
if (initial_source_connection_id_to_send_.has_value()) {
params->initial_source_connection_id =
*initial_source_connection_id_to_send_;
}
if (retry_source_connection_id_to_send_.has_value()) {
params->retry_source_connection_id = *retry_source_connection_id_to_send_;
}
if (initial_round_trip_time_us_.HasSendValue()) {
params->initial_round_trip_time_us.set_value(
initial_round_trip_time_us_.GetSendValue());
}
if (connection_options_.HasSendValues() &&
!connection_options_.GetSendValues().empty()) {
params->google_connection_options = connection_options_.GetSendValues();
}
if (google_handshake_message_to_send_.has_value()) {
params->google_handshake_message = google_handshake_message_to_send_;
}
params->reliable_stream_reset = reliable_stream_reset_;
params->custom_parameters = custom_transport_parameters_to_send_;
return true;
}
QuicErrorCode QuicConfig::ProcessTransportParameters(
const TransportParameters& params, bool is_resumption,
std::string* error_details) {
if (!is_resumption && params.original_destination_connection_id.has_value()) {
received_original_destination_connection_id_ =
*params.original_destination_connection_id;
}
if (params.max_idle_timeout_ms.value() > 0 &&
params.max_idle_timeout_ms.value() <
static_cast<uint64_t>(max_idle_timeout_to_send_.ToMilliseconds())) {
received_max_idle_timeout_ =
QuicTime::Delta::FromMilliseconds(params.max_idle_timeout_ms.value());
}
if (!is_resumption && !params.stateless_reset_token.empty()) {
StatelessResetToken stateless_reset_token;
if (params.stateless_reset_token.size() != sizeof(stateless_reset_token)) {
QUIC_BUG(quic_bug_10575_16) << "Bad stateless reset token length "
<< params.stateless_reset_token.size();
*error_details = "Bad stateless reset token length";
return QUIC_INTERNAL_ERROR;
}
memcpy(&stateless_reset_token, params.stateless_reset_token.data(),
params.stateless_reset_token.size());
stateless_reset_token_.SetReceivedValue(stateless_reset_token);
}
if (params.max_udp_payload_size.IsValid()) {
max_udp_payload_size_.SetReceivedValue(params.max_udp_payload_size.value());
}
if (params.max_datagram_frame_size.IsValid()) {
max_datagram_frame_size_.SetReceivedValue(
params.max_datagram_frame_size.value());
}
initial_session_flow_control_window_bytes_.SetReceivedValue(
params.initial_max_data.value());
max_bidirectional_streams_.SetReceivedValue(
std::min<uint64_t>(params.initial_max_streams_bidi.value(),
std::numeric_limits<uint32_t>::max()));
max_unidirectional_streams_.SetReceivedValue(
std::min<uint64_t>(params.initial_max_streams_uni.value(),
std::numeric_limits<uint32_t>::max()));
initial_max_stream_data_bytes_incoming_bidirectional_.SetReceivedValue(
params.initial_max_stream_data_bidi_local.value());
initial_max_stream_data_bytes_outgoing_bidirectional_.SetReceivedValue(
params.initial_max_stream_data_bidi_remote.value());
initial_max_stream_data_bytes_unidirectional_.SetReceivedValue(
params.initial_max_stream_data_uni.value());
if (!is_resumption) {
max_ack_delay_ms_.SetReceivedValue(params.max_ack_delay.value());
if (params.ack_delay_exponent.IsValid()) {
ack_delay_exponent_.SetReceivedValue(params.ack_delay_exponent.value());
}
if (params.preferred_address != nullptr) {
if (params.preferred_address->ipv6_socket_address.port() != 0) {
alternate_server_address_ipv6_.SetReceivedValue(
params.preferred_address->ipv6_socket_address);
}
if (params.preferred_address->ipv4_socket_address.port() != 0) {
alternate_server_address_ipv4_.SetReceivedValue(
params.preferred_address->ipv4_socket_address);
}
if (!params.preferred_address->connection_id.IsEmpty()) {
preferred_address_connection_id_and_token_ = std::make_pair(
params.preferred_address->connection_id,
*reinterpret_cast<const StatelessResetToken*>(
¶ms.preferred_address->stateless_reset_token.front()));
}
}
if (params.min_ack_delay_us.value() != 0) {
if (params.min_ack_delay_us.value() >
params.max_ack_delay.value() * kNumMicrosPerMilli) {
*error_details = "MinAckDelay is greater than MaxAckDelay.";
return IETF_QUIC_PROTOCOL_VIOLATION;
}
min_ack_delay_ms_.SetReceivedValue(params.min_ack_delay_us.value() /
kNumMicrosPerMilli);
}
}
if (params.disable_active_migration) {
connection_migration_disabled_.SetReceivedValue(1u);
}
active_connection_id_limit_.SetReceivedValue(
params.active_connection_id_limit.value());
if (!is_resumption) {
if (params.initial_source_connection_id.has_value()) {
received_initial_source_connection_id_ =
*params.initial_source_connection_id;
}
if (params.retry_source_connection_id.has_value()) {
received_retry_source_connection_id_ = *params.retry_source_connection_id;
}
}
if (params.initial_round_trip_time_us.value() > 0) {
initial_round_trip_time_us_.SetReceivedValue(
params.initial_round_trip_time_us.value());
}
if (params.google_connection_options.has_value()) {
connection_options_.SetReceivedValues(*params.google_connection_options);
}
if (params.google_handshake_message.has_value()) {
received_google_handshake_message_ = params.google_handshake_message;
}
received_custom_transport_parameters_ = params.custom_parameters;
if (reliable_stream_reset_) {
reliable_stream_reset_ = params.reliable_stream_reset;
}
if (!is_resumption) {
negotiated_ = true;
}
*error_details = "";
return QUIC_NO_ERROR;
}
void QuicConfig::ClearGoogleHandshakeMessage() {
google_handshake_message_to_send_.reset();
received_google_handshake_message_.reset();
}
std::optional<QuicSocketAddress> QuicConfig::GetPreferredAddressToSend(
quiche::IpAddressFamily address_family) const {
if (alternate_server_address_ipv6_.HasSendValue() &&
address_family == quiche::IpAddressFamily::IP_V6) {
return alternate_server_address_ipv6_.GetSendValue();
}
if (alternate_server_address_ipv4_.HasSendValue() &&
address_family == quiche::IpAddressFamily::IP_V4) {
return alternate_server_address_ipv4_.GetSendValue();
}
return std::nullopt;
}
void QuicConfig::SetIPv4AlternateServerAddressForDNat(
const QuicSocketAddress& alternate_server_address_ipv4_to_send,
const QuicSocketAddress& mapped_alternate_server_address_ipv4) {
SetIPv4AlternateServerAddressToSend(alternate_server_address_ipv4_to_send);
mapped_alternate_server_address_ipv4_ = mapped_alternate_server_address_ipv4;
}
void QuicConfig::SetIPv6AlternateServerAddressForDNat(
const QuicSocketAddress& alternate_server_address_ipv6_to_send,
const QuicSocketAddress& mapped_alternate_server_address_ipv6) {
SetIPv6AlternateServerAddressToSend(alternate_server_address_ipv6_to_send);
mapped_alternate_server_address_ipv6_ = mapped_alternate_server_address_ipv6;
}
std::optional<QuicSocketAddress> QuicConfig::GetMappedAlternativeServerAddress(
quiche::IpAddressFamily address_family) const {
if (mapped_alternate_server_address_ipv6_.has_value() &&
address_family == quiche::IpAddressFamily::IP_V6) {
return *mapped_alternate_server_address_ipv6_;
}
if (mapped_alternate_server_address_ipv4_.has_value() &&
address_family == quiche::IpAddressFamily::IP_V4) {
return *mapped_alternate_server_address_ipv4_;
}
return GetPreferredAddressToSend(address_family);
}
void QuicConfig::ClearAlternateServerAddressToSend(
quiche::IpAddressFamily address_family) {
if (address_family == quiche::IpAddressFamily::IP_V4) {
alternate_server_address_ipv4_.ClearSendValue();
} else if (address_family == quiche::IpAddressFamily::IP_V6) {
alternate_server_address_ipv6_.ClearSendValue();
}
}
bool QuicConfig::SupportsServerPreferredAddress(Perspective perspective) const {
return HasClientSentConnectionOption(kSPAD, perspective) ||
GetQuicFlag(quic_always_support_server_preferred_address);
}
void QuicConfig::SetReliableStreamReset(bool reliable_stream_reset) {
reliable_stream_reset_ = reliable_stream_reset;
}
bool QuicConfig::SupportsReliableStreamReset() const {
return reliable_stream_reset_;
}
} | #include "quiche/quic/core/quic_config.h"
#include <memory>
#include <string>
#include <utility>
#include "quiche/quic/core/crypto/crypto_handshake_message.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/crypto/transport_parameters.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_config_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
namespace quic {
namespace test {
namespace {
class QuicConfigTest : public QuicTestWithParam<ParsedQuicVersion> {
public:
QuicConfigTest() : version_(GetParam()) {}
protected:
ParsedQuicVersion version_;
QuicConfig config_;
};
INSTANTIATE_TEST_SUITE_P(QuicConfigTests, QuicConfigTest,
::testing::ValuesIn(AllSupportedVersions()),
::testing::PrintToStringParamName());
TEST_P(QuicConfigTest, SetDefaults) {
EXPECT_EQ(kMinimumFlowControlSendWindow,
config_.GetInitialStreamFlowControlWindowToSend());
EXPECT_EQ(kMinimumFlowControlSendWindow,
config_.GetInitialMaxStreamDataBytesIncomingBidirectionalToSend());
EXPECT_EQ(kMinimumFlowControlSendWindow,
config_.GetInitialMaxStreamDataBytesOutgoingBidirectionalToSend());
EXPECT_EQ(kMinimumFlowControlSendWindow,
config_.GetInitialMaxStreamDataBytesUnidirectionalToSend());
EXPECT_FALSE(config_.HasReceivedInitialStreamFlowControlWindowBytes());
EXPECT_FALSE(
config_.HasReceivedInitialMaxStreamDataBytesIncomingBidirectional());
EXPECT_FALSE(
config_.HasReceivedInitialMaxStreamDataBytesOutgoingBidirectional());
EXPECT_FALSE(config_.HasReceivedInitialMaxStreamDataBytesUnidirectional());
EXPECT_EQ(kMaxIncomingPacketSize, config_.GetMaxPacketSizeToSend());
EXPECT_FALSE(config_.HasReceivedMaxPacketSize());
}
TEST_P(QuicConfigTest, AutoSetIetfFlowControl) {
EXPECT_EQ(kMinimumFlowControlSendWindow,
config_.GetInitialStreamFlowControlWindowToSend());
EXPECT_EQ(kMinimumFlowControlSendWindow,
config_.GetInitialMaxStreamDataBytesIncomingBidirectionalToSend());
EXPECT_EQ(kMinimumFlowControlSendWindow,
config_.GetInitialMaxStreamDataBytesOutgoingBidirectionalToSend());
EXPECT_EQ(kMinimumFlowControlSendWindow,
config_.GetInitialMaxStreamDataBytesUnidirectionalToSend());
static const uint32_t kTestWindowSize = 1234567;
config_.SetInitialStreamFlowControlWindowToSend(kTestWindowSize);
EXPECT_EQ(kTestWindowSize, config_.GetInitialStreamFlowControlWindowToSend());
EXPECT_EQ(kTestWindowSize,
config_.GetInitialMaxStreamDataBytesIncomingBidirectionalToSend());
EXPECT_EQ(kTestWindowSize,
config_.GetInitialMaxStreamDataBytesOutgoingBidirectionalToSend());
EXPECT_EQ(kTestWindowSize,
config_.GetInitialMaxStreamDataBytesUnidirectionalToSend());
static const uint32_t kTestWindowSizeTwo = 2345678;
config_.SetInitialMaxStreamDataBytesIncomingBidirectionalToSend(
kTestWindowSizeTwo);
EXPECT_EQ(kTestWindowSize, config_.GetInitialStreamFlowControlWindowToSend());
EXPECT_EQ(kTestWindowSizeTwo,
config_.GetInitialMaxStreamDataBytesIncomingBidirectionalToSend());
EXPECT_EQ(kTestWindowSize,
config_.GetInitialMaxStreamDataBytesOutgoingBidirectionalToSend());
EXPECT_EQ(kTestWindowSize,
config_.GetInitialMaxStreamDataBytesUnidirectionalToSend());
}
TEST_P(QuicConfigTest, ToHandshakeMessage) {
if (version_.UsesTls()) {
return;
}
config_.SetInitialStreamFlowControlWindowToSend(
kInitialStreamFlowControlWindowForTest);
config_.SetInitialSessionFlowControlWindowToSend(
kInitialSessionFlowControlWindowForTest);
config_.SetIdleNetworkTimeout(QuicTime::Delta::FromSeconds(5));
CryptoHandshakeMessage msg;
config_.ToHandshakeMessage(&msg, version_.transport_version);
uint32_t value;
QuicErrorCode error = msg.GetUint32(kICSL, &value);
EXPECT_THAT(error, IsQuicNoError());
EXPECT_EQ(5u, value);
error = msg.GetUint32(kSFCW, &value);
EXPECT_THAT(error, IsQuicNoError());
EXPECT_EQ(kInitialStreamFlowControlWindowForTest, value);
error = msg.GetUint32(kCFCW, &value);
EXPECT_THAT(error, IsQuicNoError());
EXPECT_EQ(kInitialSessionFlowControlWindowForTest, value);
}
TEST_P(QuicConfigTest, ProcessClientHello) {
if (version_.UsesTls()) {
return;
}
const uint32_t kTestMaxAckDelayMs =
static_cast<uint32_t>(GetDefaultDelayedAckTimeMs() + 1);
QuicConfig client_config;
QuicTagVector cgst;
cgst.push_back(kQBIC);
client_config.SetIdleNetworkTimeout(
QuicTime::Delta::FromSeconds(2 * kMaximumIdleTimeoutSecs));
client_config.SetInitialRoundTripTimeUsToSend(10 * kNumMicrosPerMilli);
client_config.SetInitialStreamFlowControlWindowToSend(
2 * kInitialStreamFlowControlWindowForTest);
client_config.SetInitialSessionFlowControlWindowToSend(
2 * kInitialSessionFlowControlWindowForTest);
QuicTagVector copt;
copt.push_back(kTBBR);
client_config.SetConnectionOptionsToSend(copt);
client_config.SetMaxAckDelayToSendMs(kTestMaxAckDelayMs);
CryptoHandshakeMessage msg;
client_config.ToHandshakeMessage(&msg, version_.transport_version);
std::string error_details;
QuicTagVector initial_received_options;
initial_received_options.push_back(kIW50);
EXPECT_TRUE(
config_.SetInitialReceivedConnectionOptions(initial_received_options));
EXPECT_FALSE(
config_.SetInitialReceivedConnectionOptions(initial_received_options))
<< "You can only set initial options once.";
const QuicErrorCode error =
config_.ProcessPeerHello(msg, CLIENT, &error_details);
EXPECT_FALSE(
config_.SetInitialReceivedConnectionOptions(initial_received_options))
<< "You cannot set initial options after the hello.";
EXPECT_THAT(error, IsQuicNoError());
EXPECT_TRUE(config_.negotiated());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kMaximumIdleTimeoutSecs),
config_.IdleNetworkTimeout());
EXPECT_EQ(10 * kNumMicrosPerMilli, config_.ReceivedInitialRoundTripTimeUs());
EXPECT_TRUE(config_.HasReceivedConnectionOptions());
EXPECT_EQ(2u, config_.ReceivedConnectionOptions().size());
EXPECT_EQ(config_.ReceivedConnectionOptions()[0], kIW50);
EXPECT_EQ(config_.ReceivedConnectionOptions()[1], kTBBR);
EXPECT_EQ(config_.ReceivedInitialStreamFlowControlWindowBytes(),
2 * kInitialStreamFlowControlWindowForTest);
EXPECT_EQ(config_.ReceivedInitialSessionFlowControlWindowBytes(),
2 * kInitialSessionFlowControlWindowForTest);
EXPECT_TRUE(config_.HasReceivedMaxAckDelayMs());
EXPECT_EQ(kTestMaxAckDelayMs, config_.ReceivedMaxAckDelayMs());
EXPECT_FALSE(
config_.HasReceivedInitialMaxStreamDataBytesIncomingBidirectional());
EXPECT_FALSE(
config_.HasReceivedInitialMaxStreamDataBytesOutgoingBidirectional());
EXPECT_FALSE(config_.HasReceivedInitialMaxStreamDataBytesUnidirectional());
}
TEST_P(QuicConfigTest, ProcessServerHello) {
if (version_.UsesTls()) {
return;
}
QuicIpAddress host;
host.FromString("127.0.3.1");
const QuicSocketAddress kTestServerAddress = QuicSocketAddress(host, 1234);
const StatelessResetToken kTestStatelessResetToken{
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f};
const uint32_t kTestMaxAckDelayMs =
static_cast<uint32_t>(GetDefaultDelayedAckTimeMs() + 1);
QuicConfig server_config;
QuicTagVector cgst;
cgst.push_back(kQBIC);
server_config.SetIdleNetworkTimeout(
QuicTime::Delta::FromSeconds(kMaximumIdleTimeoutSecs / 2));
server_config.SetInitialRoundTripTimeUsToSend(10 * kNumMicrosPerMilli);
server_config.SetInitialStreamFlowControlWindowToSend(
2 * kInitialStreamFlowControlWindowForTest);
server_config.SetInitialSessionFlowControlWindowToSend(
2 * kInitialSessionFlowControlWindowForTest);
server_config.SetIPv4AlternateServerAddressToSend(kTestServerAddress);
server_config.SetStatelessResetTokenToSend(kTestStatelessResetToken);
server_config.SetMaxAckDelayToSendMs(kTestMaxAckDelayMs);
CryptoHandshakeMessage msg;
server_config.ToHandshakeMessage(&msg, version_.transport_version);
std::string error_details;
const QuicErrorCode error =
config_.ProcessPeerHello(msg, SERVER, &error_details);
EXPECT_THAT(error, IsQuicNoError());
EXPECT_TRUE(config_.negotiated());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kMaximumIdleTimeoutSecs / 2),
config_.IdleNetworkTimeout());
EXPECT_EQ(10 * kNumMicrosPerMilli, config_.ReceivedInitialRoundTripTimeUs());
EXPECT_EQ(config_.ReceivedInitialStreamFlowControlWindowBytes(),
2 * kInitialStreamFlowControlWindowForTest);
EXPECT_EQ(config_.ReceivedInitialSessionFlowControlWindowBytes(),
2 * kInitialSessionFlowControlWindowForTest);
EXPECT_TRUE(config_.HasReceivedIPv4AlternateServerAddress());
EXPECT_EQ(kTestServerAddress, config_.ReceivedIPv4AlternateServerAddress());
EXPECT_FALSE(config_.HasReceivedIPv6AlternateServerAddress());
EXPECT_TRUE(config_.HasReceivedStatelessResetToken());
EXPECT_EQ(kTestStatelessResetToken, config_.ReceivedStatelessResetToken());
EXPECT_TRUE(config_.HasReceivedMaxAckDelayMs());
EXPECT_EQ(kTestMaxAckDelayMs, config_.ReceivedMaxAckDelayMs());
EXPECT_FALSE(
config_.HasReceivedInitialMaxStreamDataBytesIncomingBidirectional());
EXPECT_FALSE(
config_.HasReceivedInitialMaxStreamDataBytesOutgoingBidirectional());
EXPECT_FALSE(config_.HasReceivedInitialMaxStreamDataBytesUnidirectional());
}
TEST_P(QuicConfigTest, MissingOptionalValuesInCHLO) {
if (version_.UsesTls()) {
return;
}
CryptoHandshakeMessage msg;
msg.SetValue(kICSL, 1);
msg.SetValue(kICSL, 1);
msg.SetValue(kMIBS, 1);
std::string error_details;
const QuicErrorCode error =
config_.ProcessPeerHello(msg, CLIENT, &error_details);
EXPECT_THAT(error, IsQuicNoError());
EXPECT_TRUE(config_.negotiated());
}
TEST_P(QuicConfigTest, MissingOptionalValuesInSHLO) {
if (version_.UsesTls()) {
return;
}
CryptoHandshakeMessage msg;
msg.SetValue(kICSL, 1);
msg.SetValue(kMIBS, 1);
std::string error_details;
const QuicErrorCode error =
config_.ProcessPeerHello(msg, SERVER, &error_details);
EXPECT_THAT(error, IsQuicNoError());
EXPECT_TRUE(config_.negotiated());
}
TEST_P(QuicConfigTest, MissingValueInCHLO) {
if (version_.UsesTls()) {
return;
}
CryptoHandshakeMessage msg;
std::string error_details;
const QuicErrorCode error =
config_.ProcessPeerHello(msg, CLIENT, &error_details);
EXPECT_THAT(error, IsError(QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND));
}
TEST_P(QuicConfigTest, MissingValueInSHLO) {
if (version_.UsesTls()) {
return;
}
CryptoHandshakeMessage msg;
std::string error_details;
const QuicErrorCode error =
config_.ProcessPeerHello(msg, SERVER, &error_details);
EXPECT_THAT(error, IsError(QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND));
}
TEST_P(QuicConfigTest, OutOfBoundSHLO) {
if (version_.UsesTls()) {
return;
}
QuicConfig server_config;
server_config.SetIdleNetworkTimeout(
QuicTime::Delta::FromSeconds(2 * kMaximumIdleTimeoutSecs));
CryptoHandshakeMessage msg;
server_config.ToHandshakeMessage(&msg, version_.transport_version);
std::string error_details;
const QuicErrorCode error =
config_.ProcessPeerHello(msg, SERVER, &error_details);
EXPECT_THAT(error, IsError(QUIC_INVALID_NEGOTIATED_VALUE));
}
TEST_P(QuicConfigTest, InvalidFlowControlWindow) {
QuicConfig config;
const uint64_t kInvalidWindow = kMinimumFlowControlSendWindow - 1;
EXPECT_QUIC_BUG(
config.SetInitialStreamFlowControlWindowToSend(kInvalidWindow),
"Initial stream flow control receive window");
EXPECT_EQ(kMinimumFlowControlSendWindow,
config.GetInitialStreamFlowControlWindowToSend());
}
TEST_P(QuicConfigTest, HasClientSentConnectionOption) {
if (version_.UsesTls()) {
return;
}
QuicConfig client_config;
QuicTagVector copt;
copt.push_back(kTBBR);
client_config.SetConnectionOptionsToSend(copt);
EXPECT_TRUE(client_config.HasClientSentConnectionOption(
kTBBR, Perspective::IS_CLIENT));
CryptoHandshakeMessage msg;
client_config.ToHandshakeMessage(&msg, version_.transport_version);
std::string error_details;
const QuicErrorCode error =
config_.ProcessPeerHello(msg, CLIENT, &error_details);
EXPECT_THAT(error, IsQuicNoError());
EXPECT_TRUE(config_.negotiated());
EXPECT_TRUE(config_.HasReceivedConnectionOptions());
EXPECT_EQ(1u, config_.ReceivedConnectionOptions().size());
EXPECT_TRUE(
config_.HasClientSentConnectionOption(kTBBR, Perspective::IS_SERVER));
}
TEST_P(QuicConfigTest, DontSendClientConnectionOptions) {
if (version_.UsesTls()) {
return;
}
QuicConfig client_config;
QuicTagVector copt;
copt.push_back(kTBBR);
client_config.SetClientConnectionOptions(copt);
CryptoHandshakeMessage msg;
client_config.ToHandshakeMessage(&msg, version_.transport_version);
std::string error_details;
const QuicErrorCode error =
config_.ProcessPeerHello(msg, CLIENT, &error_details);
EXPECT_THAT(error, IsQuicNoError());
EXPECT_TRUE(config_.negotiated());
EXPECT_FALSE(config_.HasReceivedConnectionOptions());
}
TEST_P(QuicConfigTest, HasClientRequestedIndependentOption) {
if (version_.UsesTls()) {
return;
}
QuicConfig client_config;
QuicTagVector client_opt;
client_opt.push_back(kRENO);
QuicTagVector copt;
copt.push_back(kTBBR);
client_config.SetClientConnectionOptions(client_opt);
client_config.SetConnectionOptionsToSend(copt);
EXPECT_TRUE(client_config.HasClientSentConnectionOption(
kTBBR, Perspective::IS_CLIENT));
EXPECT_TRUE(client_config.HasClientRequestedIndependentOption(
kRENO, Perspective::IS_CLIENT));
EXPECT_FALSE(client_config.HasClientRequestedIndependentOption(
kTBBR, Perspective::IS_CLIENT));
CryptoHandshakeMessage msg;
client_config.ToHandshakeMessage(&msg, version_.transport_version);
std::string error_details;
const QuicErrorCode error =
config_.ProcessPeerHello(msg, CLIENT, &error_details);
EXPECT_THAT(error, IsQuicNoError());
EXPECT_TRUE(config_.negotiated());
EXPECT_TRUE(config_.HasReceivedConnectionOptions());
EXPECT_EQ(1u, config_.ReceivedConnectionOptions().size());
EXPECT_FALSE(config_.HasClientRequestedIndependentOption(
kRENO, Perspective::IS_SERVER));
EXPECT_TRUE(config_.HasClientRequestedIndependentOption(
kTBBR, Perspective::IS_SERVER));
}
TEST_P(QuicConfigTest, IncomingLargeIdleTimeoutTransportParameter) {
if (!version_.UsesTls()) {
return;
}
config_.SetIdleNetworkTimeout(quic::QuicTime::Delta::FromSeconds(60));
TransportParameters params;
params.max_idle_timeout_ms.set_value(120000);
std::string error_details = "foobar";
EXPECT_THAT(config_.ProcessTransportParameters(
params, false, &error_details),
IsQuicNoError());
EXPECT_EQ("", error_details);
EXPECT_EQ(quic::QuicTime::Delta::FromSeconds(60),
config_.IdleNetworkTimeout());
}
TEST_P(QuicConfigTest, ReceivedInvalidMinAckDelayInTransportParameter) {
if (!version_.UsesTls()) {
return;
}
TransportParameters params;
params.max_ack_delay.set_value(25 );
params.min_ack_delay_us.set_value(25 * kNumMicrosPerMilli + 1);
std::string error_details = "foobar";
EXPECT_THAT(config_.ProcessTransportParameters(
params, false, &error_details),
IsError(IETF_QUIC_PROTOCOL_VIOLATION));
EXPECT_EQ("MinAckDelay is greater than MaxAckDelay.", error_details);
params.max_ack_delay.set_value(25 );
params.min_ack_delay_us.set_value(25 * kNumMicrosPerMilli);
EXPECT_THAT(config_.ProcessTransportParameters(
params, false, &error_details),
IsQuicNoError());
EXPECT_TRUE(error_details.empty());
}
TEST_P(QuicConfigTest, FillTransportParams) {
if (!version_.UsesTls()) {
return;
}
const std::string kFakeGoogleHandshakeMessage = "Fake handshake message";
config_.SetInitialMaxStreamDataBytesIncomingBidirectionalToSend(
2 * kMinimumFlowControlSendWindow);
config_.SetInitialMaxStreamDataBytesOutgoingBidirectionalToSend(
3 * kMinimumFlowControlSendWindow);
config_.SetInitialMaxStreamDataBytesUnidirectionalToSend(
4 * kMinimumFlowControlSendWindow);
config_.SetMaxPacketSizeToSend(kMaxPacketSizeForTest);
config_.SetMaxDatagramFrameSizeToSend(kMaxDatagramFrameSizeForTest);
config_.SetActiveConnectionIdLimitToSend(kActiveConnectionIdLimitForTest);
config_.SetOriginalConnectionIdToSend(TestConnectionId(0x1111));
config_.SetInitialSourceConnectionIdToSend(TestConnectionId(0x2222));
config_.SetRetrySourceConnectionIdToSend(TestConnectionId(0x3333));
config_.SetMinAckDelayMs(kDefaultMinAckDelayTimeMs);
config_.SetGoogleHandshakeMessageToSend(kFakeGoogleHandshakeMessage);
config_.SetReliableStreamReset(true);
QuicIpAddress host;
host.FromString("127.0.3.1");
QuicSocketAddress kTestServerAddress = QuicSocketAddress(host, 1234);
QuicConnectionId new_connection_id = TestConnectionId(5);
StatelessResetToken new_stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(new_connection_id);
config_.SetIPv4AlternateServerAddressToSend(kTestServerAddress);
QuicSocketAddress kTestServerAddressV6 =
QuicSocketAddress(QuicIpAddress::Any6(), 1234);
config_.SetIPv6AlternateServerAddressToSend(kTestServerAddressV6);
config_.SetPreferredAddressConnectionIdAndTokenToSend(
new_connection_id, new_stateless_reset_token);
config_.ClearAlternateServerAddressToSend(quiche::IpAddressFamily::IP_V6);
EXPECT_TRUE(config_.GetPreferredAddressToSend(quiche::IpAddressFamily::IP_V4)
.has_value());
EXPECT_FALSE(config_.GetPreferredAddressToSend(quiche::IpAddressFamily::IP_V6)
.has_value());
TransportParameters params;
config_.FillTransportParameters(¶ms);
EXPECT_EQ(2 * kMinimumFlowControlSendWindow,
params.initial_max_stream_data_bidi_remote.value());
EXPECT_EQ(3 * kMinimumFlowControlSendWindow,
params.initial_max_stream_data_bidi_local.value());
EXPECT_EQ(4 * kMinimumFlowControlSendWindow,
params.initial_max_stream_data_uni.value());
EXPECT_EQ(static_cast<uint64_t>(kMaximumIdleTimeoutSecs * 1000),
params.max_idle_timeout_ms.value());
EXPECT_EQ(kMaxPacketSizeForTest, params.max_udp_payload_size.value());
EXPECT_EQ(kMaxDatagramFrameSizeForTest,
params.max_datagram_frame_size.value());
EXPECT_EQ(kActiveConnectionIdLimitForTest,
params.active_connection_id_limit.value());
ASSERT_TRUE(params.original_destination_connection_id.has_value());
EXPECT_EQ(TestConnectionId(0x1111),
params.original_destination_connection_id.value());
ASSERT_TRUE(params.initial_source_connection_id.has_value());
EXPECT_EQ(TestConnectionId(0x2222),
params.initial_source_connection_id.value());
ASSERT_TRUE(params.retry_source_connection_id.has_value());
EXPECT_EQ(TestConnectionId(0x3333),
params.retry_source_connection_id.value());
EXPECT_EQ(
static_cast<uint64_t>(kDefaultMinAckDelayTimeMs) * kNumMicrosPerMilli,
params.min_ack_delay_us.value());
EXPECT_EQ(params.preferred_address->ipv4_socket_address, kTestServerAddress);
EXPECT_EQ(params.preferred_address->ipv6_socket_address,
QuicSocketAddress(QuicIpAddress::Any6(), 0));
EXPECT_EQ(*reinterpret_cast<StatelessResetToken*>(
¶ms.preferred_address->stateless_reset_token.front()),
new_stateless_reset_token);
EXPECT_EQ(kFakeGoogleHandshakeMessage, params.google_handshake_message);
EXPECT_TRUE(params.reliable_stream_reset);
}
TEST_P(QuicConfigTest, DNATPreferredAddress) {
QuicIpAddress host_v4;
host_v4.FromString("127.0.3.1");
QuicSocketAddress server_address_v4 = QuicSocketAddress(host_v4, 1234);
QuicSocketAddress expected_server_address_v4 =
QuicSocketAddress(host_v4, 1235);
QuicIpAddress host_v6;
host_v6.FromString("2001:db8:0::1");
QuicSocketAddress server_address_v6 = QuicSocketAddress(host_v6, 1234);
QuicSocketAddress expected_server_address_v6 =
QuicSocketAddress(host_v6, 1235);
config_.SetIPv4AlternateServerAddressForDNat(server_address_v4,
expected_server_address_v4);
config_.SetIPv6AlternateServerAddressForDNat(server_address_v6,
expected_server_address_v6);
EXPECT_EQ(server_address_v4,
config_.GetPreferredAddressToSend(quiche::IpAddressFamily::IP_V4));
EXPECT_EQ(server_address_v6,
config_.GetPreferredAddressToSend(quiche::IpAddressFamily::IP_V6));
EXPECT_EQ(expected_server_address_v4,
config_.GetMappedAlternativeServerAddress(
quiche::IpAddressFamily::IP_V4));
EXPECT_EQ(expected_server_address_v6,
config_.GetMappedAlternativeServerAddress(
quiche::IpAddressFamily::IP_V6));
}
TEST_P(QuicConfigTest, FillTransportParamsNoV4PreferredAddress) {
if (!version_.UsesTls()) {
return;
}
QuicIpAddress host;
host.FromString("127.0.3.1");
QuicSocketAddress kTestServerAddress = QuicSocketAddress(host, 1234);
QuicConnectionId new_connection_id = TestConnectionId(5);
StatelessResetToken new_stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(new_connection_id);
config_.SetIPv4AlternateServerAddressToSend(kTestServerAddress);
QuicSocketAddress kTestServerAddressV6 =
QuicSocketAddress(QuicIpAddress::Any6(), 1234);
config_.SetIPv6AlternateServerAddressToSend(kTestServerAddressV6);
config_.SetPreferredAddressConnectionIdAndTokenToSend(
new_connection_id, new_stateless_reset_token);
config_.ClearAlternateServerAddressToSend(quiche::IpAddressFamily::IP_V4);
EXPECT_FALSE(config_.GetPreferredAddressToSend(quiche::IpAddressFamily::IP_V4)
.has_value());
config_.ClearAlternateServerAddressToSend(quiche::IpAddressFamily::IP_V4);
TransportParameters params;
config_.FillTransportParameters(¶ms);
EXPECT_EQ(params.preferred_address->ipv4_socket_address,
QuicSocketAddress(QuicIpAddress::Any4(), 0));
EXPECT_EQ(params.preferred_address->ipv6_socket_address,
kTestServerAddressV6);
}
TEST_P(QuicConfigTest, SupportsServerPreferredAddress) {
SetQuicFlag(quic_always_support_server_preferred_address, true);
EXPECT_TRUE(config_.SupportsServerPreferredAddress(Perspective::IS_CLIENT));
EXPECT_TRUE(config_.SupportsServerPreferredAddress(Perspective::IS_SERVER));
SetQuicFlag(quic_always_support_server_preferred_address, false);
EXPECT_FALSE(config_.SupportsServerPreferredAddress(Perspective::IS_CLIENT));
EXPECT_FALSE(config_.SupportsServerPreferredAddress(Perspective::IS_SERVER));
QuicTagVector copt;
copt.push_back(kSPAD);
config_.SetConnectionOptionsToSend(copt);
EXPECT_TRUE(config_.SupportsServerPreferredAddress(Perspective::IS_CLIENT));
EXPECT_FALSE(config_.SupportsServerPreferredAddress(Perspective::IS_SERVER));
config_.SetInitialReceivedConnectionOptions(copt);
EXPECT_TRUE(config_.SupportsServerPreferredAddress(Perspective::IS_CLIENT));
EXPECT_TRUE(config_.SupportsServerPreferredAddress(Perspective::IS_SERVER));
}
TEST_P(QuicConfigTest, AddConnectionOptionsToSend) {
QuicTagVector copt;
copt.push_back(kNOIP);
copt.push_back(kFPPE);
config_.AddConnectionOptionsToSend(copt);
ASSERT_TRUE(config_.HasSendConnectionOptions());
EXPECT_TRUE(quic::ContainsQuicTag(config_.SendConnectionOptions(), kNOIP));
EXPECT_TRUE(quic::ContainsQuicTag(config_.SendConnectionOptions(), kFPPE));
copt.clear();
copt.push_back(kSPAD);
copt.push_back(kSPA2);
config_.AddConnectionOptionsToSend(copt);
ASSERT_EQ(4, config_.SendConnectionOptions().size());
EXPECT_TRUE(quic::ContainsQuicTag(config_.SendConnectionOptions(), kNOIP));
EXPECT_TRUE(quic::ContainsQuicTag(config_.SendConnectionOptions(), kFPPE));
EXPECT_TRUE(quic::ContainsQuicTag(config_.SendConnectionOptions(), kSPAD));
EXPECT_TRUE(quic::ContainsQuicTag(config_.SendConnectionOptions(), kSPA2));
}
TEST_P(QuicConfigTest, ProcessTransportParametersServer) {
if (!version_.UsesTls()) {
return;
}
const std::string kFakeGoogleHandshakeMessage = "Fake handshake message";
TransportParameters params;
params.initial_max_stream_data_bidi_local.set_value(
2 * kMinimumFlowControlSendWindow);
params.initial_max_stream_data_bidi_remote.set_value(
3 * kMinimumFlowControlSendWindow);
params.initial_max_stream_data_uni.set_value(4 *
kMinimumFlowControlSendWindow);
params.max_udp_payload_size.set_value(kMaxPacketSizeForTest);
params.max_datagram_frame_size.set_value(kMaxDatagramFrameSizeForTest);
params.initial_max_streams_bidi.set_value(kDefaultMaxStreamsPerConnection);
params.stateless_reset_token = CreateStatelessResetTokenForTest();
params.max_ack_delay.set_value(kMaxAckDelayForTest);
params.min_ack_delay_us.set_value(kMinAckDelayUsForTest);
params.ack_delay_exponent.set_value(kAckDelayExponentForTest);
params.active_connection_id_limit.set_value(kActiveConnectionIdLimitForTest);
params.original_destination_connection_id = TestConnectionId(0x1111);
params.initial_source_connection_id = TestConnectionId(0x2222);
params.retry_source_connection_id = TestConnectionId(0x3333);
params.google_handshake_message = kFakeGoogleHandshakeMessage;
std::string error_details;
EXPECT_THAT(config_.ProcessTransportParameters(
params, true, &error_details),
IsQuicNoError())
<< error_details;
EXPECT_FALSE(config_.negotiated());
ASSERT_TRUE(
config_.HasReceivedInitialMaxStreamDataBytesIncomingBidirectional());
EXPECT_EQ(2 * kMinimumFlowControlSendWindow,
config_.ReceivedInitialMaxStreamDataBytesIncomingBidirectional());
ASSERT_TRUE(
config_.HasReceivedInitialMaxStreamDataBytesOutgoingBidirectional());
EXPECT_EQ(3 * kMinimumFlowControlSendWindow,
config_.ReceivedInitialMaxStreamDataBytesOutgoingBidirectional());
ASSERT_TRUE(config_.HasReceivedInitialMaxStreamDataBytesUnidirectional());
EXPECT_EQ(4 * kMinimumFlowControlSendWindow,
config_.ReceivedInitialMaxStreamDataBytesUnidirectional());
ASSERT_TRUE(config_.HasReceivedMaxPacketSize());
EXPECT_EQ(kMaxPacketSizeForTest, config_.ReceivedMaxPacketSize());
ASSERT_TRUE(config_.HasReceivedMaxDatagramFrameSize());
EXPECT_EQ(kMaxDatagramFrameSizeForTest,
config_.ReceivedMaxDatagramFrameSize());
ASSERT_TRUE(config_.HasReceivedMaxBidirectionalStreams());
EXPECT_EQ(kDefaultMaxStreamsPerConnection,
config_.ReceivedMaxBidirectionalStreams());
EXPECT_FALSE(config_.DisableConnectionMigration());
EXPECT_FALSE(config_.HasReceivedStatelessResetToken());
EXPECT_FALSE(config_.HasReceivedMaxAckDelayMs());
EXPECT_FALSE(config_.HasReceivedAckDelayExponent());
EXPECT_FALSE(config_.HasReceivedMinAckDelayMs());
EXPECT_FALSE(config_.HasReceivedOriginalConnectionId());
EXPECT_FALSE(config_.HasReceivedInitialSourceConnectionId());
EXPECT_FALSE(config_.HasReceivedRetrySourceConnectionId());
params.initial_max_stream_data_bidi_local.set_value(
2 * kMinimumFlowControlSendWindow + 1);
params.initial_max_stream_data_bidi_remote.set_value(
4 * kMinimumFlowControlSendWindow);
params.initial_max_stream_data_uni.set_value(5 *
kMinimumFlowControlSendWindow);
params.max_udp_payload_size.set_value(2 * kMaxPacketSizeForTest);
params.max_datagram_frame_size.set_value(2 * kMaxDatagramFrameSizeForTest);
params.initial_max_streams_bidi.set_value(2 *
kDefaultMaxStreamsPerConnection);
params.disable_active_migration = true;
EXPECT_THAT(config_.ProcessTransportParameters(
params, false, &error_details),
IsQuicNoError())
<< error_details;
EXPECT_TRUE(config_.negotiated());
ASSERT_TRUE(
config_.HasReceivedInitialMaxStreamDataBytesIncomingBidirectional());
EXPECT_EQ(2 * kMinimumFlowControlSendWindow + 1,
config_.ReceivedInitialMaxStreamDataBytesIncomingBidirectional());
ASSERT_TRUE(
config_.HasReceivedInitialMaxStreamDataBytesOutgoingBidirectional());
EXPECT_EQ(4 * kMinimumFlowControlSendWindow,
config_.ReceivedInitialMaxStreamDataBytesOutgoingBidirectional());
ASSERT_TRUE(config_.HasReceivedInitialMaxStreamDataBytesUnidirectional());
EXPECT_EQ(5 * kMinimumFlowControlSendWindow,
config_.ReceivedInitialMaxStreamDataBytesUnidirectional());
ASSERT_TRUE(config_.HasReceivedMaxPacketSize());
EXPECT_EQ(2 * kMaxPacketSizeForTest, config_.ReceivedMaxPacketSize());
ASSERT_TRUE(config_.HasReceivedMaxDatagramFrameSize());
EXPECT_EQ(2 * kMaxDatagramFrameSizeForTest,
config_.ReceivedMaxDatagramFrameSize());
ASSERT_TRUE(config_.HasReceivedMaxBidirectionalStreams());
EXPECT_EQ(2 * kDefaultMaxStreamsPerConnection,
config_.ReceivedMaxBidirectionalStreams());
EXPECT_TRUE(config_.DisableConnectionMigration());
ASSERT_TRUE(config_.HasReceivedStatelessResetToken());
ASSERT_TRUE(config_.HasReceivedMaxAckDelayMs());
EXPECT_EQ(config_.ReceivedMaxAckDelayMs(), kMaxAckDelayForTest);
ASSERT_TRUE(config_.HasReceivedMinAckDelayMs());
EXPECT_EQ(config_.ReceivedMinAckDelayMs(),
kMinAckDelayUsForTest / kNumMicrosPerMilli);
ASSERT_TRUE(config_.HasReceivedAckDelayExponent());
EXPECT_EQ(config_.ReceivedAckDelayExponent(), kAckDelayExponentForTest);
ASSERT_TRUE(config_.HasReceivedActiveConnectionIdLimit());
EXPECT_EQ(config_.ReceivedActiveConnectionIdLimit(),
kActiveConnectionIdLimitForTest);
ASSERT_TRUE(config_.HasReceivedOriginalConnectionId());
EXPECT_EQ(config_.ReceivedOriginalConnectionId(), TestConnectionId(0x1111));
ASSERT_TRUE(config_.HasReceivedInitialSourceConnectionId());
EXPECT_EQ(config_.ReceivedInitialSourceConnectionId(),
TestConnectionId(0x2222));
ASSERT_TRUE(config_.HasReceivedRetrySourceConnectionId());
EXPECT_EQ(config_.ReceivedRetrySourceConnectionId(),
TestConnectionId(0x3333));
EXPECT_EQ(kFakeGoogleHandshakeMessage,
config_.GetReceivedGoogleHandshakeMessage());
}
TEST_P(QuicConfigTest, DisableMigrationTransportParameter) {
if (!version_.UsesTls()) {
return;
}
TransportParameters params;
params.disable_active_migration = true;
std::string error_details;
EXPECT_THAT(config_.ProcessTransportParameters(
params, false, &error_details),
IsQuicNoError());
EXPECT_TRUE(config_.DisableConnectionMigration());
}
TEST_P(QuicConfigTest, SendPreferredIPv4Address) {
if (!version_.UsesTls()) {
return;
}
EXPECT_FALSE(config_.HasReceivedPreferredAddressConnectionIdAndToken());
TransportParameters params;
QuicIpAddress host;
host.FromString("::ffff:192.0.2.128");
QuicSocketAddress kTestServerAddress = QuicSocketAddress(host, 1234);
QuicConnectionId new_connection_id = TestConnectionId(5);
StatelessResetToken new_stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(new_connection_id);
auto preferred_address =
std::make_unique<TransportParameters::PreferredAddress>();
preferred_address->ipv6_socket_address = kTestServerAddress;
preferred_address->connection_id = new_connection_id;
preferred_address->stateless_reset_token.assign(
reinterpret_cast<const char*>(&new_stateless_reset_token),
reinterpret_cast<const char*>(&new_stateless_reset_token) +
sizeof(new_stateless_reset_token));
params.preferred_address = std::move(preferred_address);
std::string error_details;
EXPECT_THAT(config_.ProcessTransportParameters(
params, false, &error_details),
IsQuicNoError());
EXPECT_TRUE(config_.HasReceivedIPv6AlternateServerAddress());
EXPECT_EQ(config_.ReceivedIPv6AlternateServerAddress(), kTestServerAddress);
EXPECT_TRUE(config_.HasReceivedPreferredAddressConnectionIdAndToken());
const std::pair<QuicConnectionId, StatelessResetToken>&
preferred_address_connection_id_and_token =
config_.ReceivedPreferredAddressConnectionIdAndToken();
EXPECT_EQ(preferred_address_connection_id_and_token.first, new_connection_id);
EXPECT_EQ(preferred_address_connection_id_and_token.second,
new_stateless_reset_token);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_config.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_config_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
cf56abb7-7215-4d48-bfa7-541690f4acc7 | cpp | abseil/abseil-cpp | exception_safety_testing | absl/base/internal/exception_safety_testing.cc | absl/base/exception_safety_testing_test.cc | #include "absl/base/internal/exception_safety_testing.h"
#ifdef ABSL_HAVE_EXCEPTIONS
#include "gtest/gtest.h"
#include "absl/meta/type_traits.h"
namespace testing {
exceptions_internal::NoThrowTag nothrow_ctor;
exceptions_internal::StrongGuaranteeTagType strong_guarantee;
exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester() {
return {};
}
namespace exceptions_internal {
int countdown = -1;
ConstructorTracker* ConstructorTracker::current_tracker_instance_ = nullptr;
void MaybeThrow(absl::string_view msg, bool throw_bad_alloc) {
if (countdown-- == 0) {
if (throw_bad_alloc) throw TestBadAllocException(msg);
throw TestException(msg);
}
}
testing::AssertionResult FailureMessage(const TestException& e,
int countdown) noexcept {
return testing::AssertionFailure() << "Exception thrown from " << e.what();
}
std::string GetSpecString(TypeSpec spec) {
std::string out;
absl::string_view sep;
const auto append = [&](absl::string_view s) {
absl::StrAppend(&out, sep, s);
sep = " | ";
};
if (static_cast<bool>(TypeSpec::kNoThrowCopy & spec)) {
append("kNoThrowCopy");
}
if (static_cast<bool>(TypeSpec::kNoThrowMove & spec)) {
append("kNoThrowMove");
}
if (static_cast<bool>(TypeSpec::kNoThrowNew & spec)) {
append("kNoThrowNew");
}
return out;
}
std::string GetSpecString(AllocSpec spec) {
return static_cast<bool>(AllocSpec::kNoThrowAllocate & spec)
? "kNoThrowAllocate"
: "";
}
}
}
#endif | #include "absl/base/internal/exception_safety_testing.h"
#ifdef ABSL_HAVE_EXCEPTIONS
#include <cstddef>
#include <exception>
#include <iostream>
#include <list>
#include <type_traits>
#include <vector>
#include "gtest/gtest-spi.h"
#include "gtest/gtest.h"
#include "absl/memory/memory.h"
namespace testing {
namespace {
using ::testing::exceptions_internal::SetCountdown;
using ::testing::exceptions_internal::TestException;
using ::testing::exceptions_internal::UnsetCountdown;
template <typename F>
void ExpectNoThrow(const F& f) {
try {
f();
} catch (const TestException& e) {
ADD_FAILURE() << "Unexpected exception thrown from " << e.what();
}
}
TEST(ThrowingValueTest, Throws) {
SetCountdown();
EXPECT_THROW(ThrowingValue<> bomb, TestException);
SetCountdown(2);
ExpectNoThrow([]() { ThrowingValue<> bomb; });
ExpectNoThrow([]() { ThrowingValue<> bomb; });
EXPECT_THROW(ThrowingValue<> bomb, TestException);
UnsetCountdown();
}
template <typename F>
void TestOp(const F& f) {
ExpectNoThrow(f);
SetCountdown();
EXPECT_THROW(f(), TestException);
UnsetCountdown();
}
TEST(ThrowingValueTest, ThrowingCtors) {
ThrowingValue<> bomb;
TestOp([]() { ThrowingValue<> bomb(1); });
TestOp([&]() { ThrowingValue<> bomb1 = bomb; });
TestOp([&]() { ThrowingValue<> bomb1 = std::move(bomb); });
}
TEST(ThrowingValueTest, ThrowingAssignment) {
ThrowingValue<> bomb, bomb1;
TestOp([&]() { bomb = bomb1; });
TestOp([&]() { bomb = std::move(bomb1); });
{
ThrowingValue<> lhs(39), rhs(42);
ThrowingValue<> lhs_copy(lhs);
SetCountdown();
EXPECT_THROW(lhs = rhs, TestException);
UnsetCountdown();
EXPECT_NE(lhs, rhs);
EXPECT_NE(lhs_copy, lhs);
}
{
ThrowingValue<> lhs(39), rhs(42);
ThrowingValue<> lhs_copy(lhs), rhs_copy(rhs);
SetCountdown();
EXPECT_THROW(lhs = std::move(rhs), TestException);
UnsetCountdown();
EXPECT_NE(lhs, rhs_copy);
EXPECT_NE(lhs_copy, lhs);
}
}
TEST(ThrowingValueTest, ThrowingComparisons) {
ThrowingValue<> bomb1, bomb2;
TestOp([&]() { return bomb1 == bomb2; });
TestOp([&]() { return bomb1 != bomb2; });
TestOp([&]() { return bomb1 < bomb2; });
TestOp([&]() { return bomb1 <= bomb2; });
TestOp([&]() { return bomb1 > bomb2; });
TestOp([&]() { return bomb1 >= bomb2; });
}
TEST(ThrowingValueTest, ThrowingArithmeticOps) {
ThrowingValue<> bomb1(1), bomb2(2);
TestOp([&bomb1]() { +bomb1; });
TestOp([&bomb1]() { -bomb1; });
TestOp([&bomb1]() { ++bomb1; });
TestOp([&bomb1]() { bomb1++; });
TestOp([&bomb1]() { --bomb1; });
TestOp([&bomb1]() { bomb1--; });
TestOp([&]() { bomb1 + bomb2; });
TestOp([&]() { bomb1 - bomb2; });
TestOp([&]() { bomb1* bomb2; });
TestOp([&]() { bomb1 / bomb2; });
TestOp([&]() { bomb1 << 1; });
TestOp([&]() { bomb1 >> 1; });
}
TEST(ThrowingValueTest, ThrowingLogicalOps) {
ThrowingValue<> bomb1, bomb2;
TestOp([&bomb1]() { !bomb1; });
TestOp([&]() { bomb1&& bomb2; });
TestOp([&]() { bomb1 || bomb2; });
}
TEST(ThrowingValueTest, ThrowingBitwiseOps) {
ThrowingValue<> bomb1, bomb2;
TestOp([&bomb1]() { ~bomb1; });
TestOp([&]() { bomb1 & bomb2; });
TestOp([&]() { bomb1 | bomb2; });
TestOp([&]() { bomb1 ^ bomb2; });
}
TEST(ThrowingValueTest, ThrowingCompoundAssignmentOps) {
ThrowingValue<> bomb1(1), bomb2(2);
TestOp([&]() { bomb1 += bomb2; });
TestOp([&]() { bomb1 -= bomb2; });
TestOp([&]() { bomb1 *= bomb2; });
TestOp([&]() { bomb1 /= bomb2; });
TestOp([&]() { bomb1 %= bomb2; });
TestOp([&]() { bomb1 &= bomb2; });
TestOp([&]() { bomb1 |= bomb2; });
TestOp([&]() { bomb1 ^= bomb2; });
TestOp([&]() { bomb1 *= bomb2; });
}
TEST(ThrowingValueTest, ThrowingStreamOps) {
ThrowingValue<> bomb;
TestOp([&]() {
std::istringstream stream;
stream >> bomb;
});
TestOp([&]() {
std::stringstream stream;
stream << bomb;
});
}
TEST(ThrowingValueTest, StreamOpsOutput) {
using ::testing::TypeSpec;
exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown);
EXPECT_NONFATAL_FAILURE(
{
using Thrower = ThrowingValue<TypeSpec{}>;
auto thrower = Thrower(123);
thrower.~Thrower();
},
"ThrowingValue<>(123)");
EXPECT_NONFATAL_FAILURE(
{
using Thrower = ThrowingValue<TypeSpec::kNoThrowCopy>;
auto thrower = Thrower(234);
thrower.~Thrower();
},
"ThrowingValue<kNoThrowCopy>(234)");
EXPECT_NONFATAL_FAILURE(
{
using Thrower =
ThrowingValue<TypeSpec::kNoThrowMove | TypeSpec::kNoThrowNew>;
auto thrower = Thrower(345);
thrower.~Thrower();
},
"ThrowingValue<kNoThrowMove | kNoThrowNew>(345)");
EXPECT_NONFATAL_FAILURE(
{
using Thrower = ThrowingValue<static_cast<TypeSpec>(-1)>;
auto thrower = Thrower(456);
thrower.~Thrower();
},
"ThrowingValue<kNoThrowCopy | kNoThrowMove | kNoThrowNew>(456)");
}
template <typename F>
void TestAllocatingOp(const F& f) {
ExpectNoThrow(f);
SetCountdown();
EXPECT_THROW(f(), exceptions_internal::TestBadAllocException);
UnsetCountdown();
}
TEST(ThrowingValueTest, ThrowingAllocatingOps) {
TestAllocatingOp([]() { return absl::make_unique<ThrowingValue<>>(1); });
TestAllocatingOp([]() { return absl::make_unique<ThrowingValue<>[]>(2); });
}
TEST(ThrowingValueTest, NonThrowingMoveCtor) {
ThrowingValue<TypeSpec::kNoThrowMove> nothrow_ctor;
SetCountdown();
ExpectNoThrow([¬hrow_ctor]() {
ThrowingValue<TypeSpec::kNoThrowMove> nothrow1 = std::move(nothrow_ctor);
});
UnsetCountdown();
}
TEST(ThrowingValueTest, NonThrowingMoveAssign) {
ThrowingValue<TypeSpec::kNoThrowMove> nothrow_assign1, nothrow_assign2;
SetCountdown();
ExpectNoThrow([¬hrow_assign1, ¬hrow_assign2]() {
nothrow_assign1 = std::move(nothrow_assign2);
});
UnsetCountdown();
}
TEST(ThrowingValueTest, ThrowingCopyCtor) {
ThrowingValue<> tv;
TestOp([&]() { ThrowingValue<> tv_copy(tv); });
}
TEST(ThrowingValueTest, ThrowingCopyAssign) {
ThrowingValue<> tv1, tv2;
TestOp([&]() { tv1 = tv2; });
}
TEST(ThrowingValueTest, NonThrowingCopyCtor) {
ThrowingValue<TypeSpec::kNoThrowCopy> nothrow_ctor;
SetCountdown();
ExpectNoThrow([¬hrow_ctor]() {
ThrowingValue<TypeSpec::kNoThrowCopy> nothrow1(nothrow_ctor);
});
UnsetCountdown();
}
TEST(ThrowingValueTest, NonThrowingCopyAssign) {
ThrowingValue<TypeSpec::kNoThrowCopy> nothrow_assign1, nothrow_assign2;
SetCountdown();
ExpectNoThrow([¬hrow_assign1, ¬hrow_assign2]() {
nothrow_assign1 = nothrow_assign2;
});
UnsetCountdown();
}
TEST(ThrowingValueTest, ThrowingSwap) {
ThrowingValue<> bomb1, bomb2;
TestOp([&]() { std::swap(bomb1, bomb2); });
}
TEST(ThrowingValueTest, NonThrowingSwap) {
ThrowingValue<TypeSpec::kNoThrowMove> bomb1, bomb2;
ExpectNoThrow([&]() { std::swap(bomb1, bomb2); });
}
TEST(ThrowingValueTest, NonThrowingAllocation) {
ThrowingValue<TypeSpec::kNoThrowNew>* allocated;
ThrowingValue<TypeSpec::kNoThrowNew>* array;
ExpectNoThrow([&allocated]() {
allocated = new ThrowingValue<TypeSpec::kNoThrowNew>(1);
delete allocated;
});
ExpectNoThrow([&array]() {
array = new ThrowingValue<TypeSpec::kNoThrowNew>[2];
delete[] array;
});
}
TEST(ThrowingValueTest, NonThrowingDelete) {
auto* allocated = new ThrowingValue<>(1);
auto* array = new ThrowingValue<>[2];
SetCountdown();
ExpectNoThrow([allocated]() { delete allocated; });
SetCountdown();
ExpectNoThrow([array]() { delete[] array; });
UnsetCountdown();
}
TEST(ThrowingValueTest, NonThrowingPlacementDelete) {
constexpr int kArrayLen = 2;
constexpr size_t kExtraSpaceLen = sizeof(size_t) * 2;
alignas(ThrowingValue<>) unsigned char buf[sizeof(ThrowingValue<>)];
alignas(ThrowingValue<>) unsigned char
array_buf[kExtraSpaceLen + sizeof(ThrowingValue<>[kArrayLen])];
auto* placed = new (&buf) ThrowingValue<>(1);
auto placed_array = new (&array_buf) ThrowingValue<>[kArrayLen];
auto* placed_array_end = reinterpret_cast<unsigned char*>(placed_array) +
sizeof(ThrowingValue<>[kArrayLen]);
EXPECT_LE(placed_array_end, array_buf + sizeof(array_buf));
SetCountdown();
ExpectNoThrow([placed, &buf]() {
placed->~ThrowingValue<>();
ThrowingValue<>::operator delete(placed, &buf);
});
SetCountdown();
ExpectNoThrow([&, placed_array]() {
for (int i = 0; i < kArrayLen; ++i) placed_array[i].~ThrowingValue<>();
ThrowingValue<>::operator delete[](placed_array, &array_buf);
});
UnsetCountdown();
}
TEST(ThrowingValueTest, NonThrowingDestructor) {
auto* allocated = new ThrowingValue<>();
SetCountdown();
ExpectNoThrow([allocated]() { delete allocated; });
UnsetCountdown();
}
TEST(ThrowingBoolTest, ThrowingBool) {
ThrowingBool t = true;
if (t) {
}
EXPECT_TRUE(t);
TestOp([&]() { (void)!t; });
}
TEST(ThrowingAllocatorTest, MemoryManagement) {
ThrowingAllocator<int> int_alloc;
int* ip = int_alloc.allocate(1);
int_alloc.deallocate(ip, 1);
int* i_array = int_alloc.allocate(2);
int_alloc.deallocate(i_array, 2);
ThrowingAllocator<ThrowingValue<>> tv_alloc;
ThrowingValue<>* ptr = tv_alloc.allocate(1);
tv_alloc.deallocate(ptr, 1);
ThrowingValue<>* tv_array = tv_alloc.allocate(2);
tv_alloc.deallocate(tv_array, 2);
}
TEST(ThrowingAllocatorTest, CallsGlobalNew) {
ThrowingAllocator<ThrowingValue<>, AllocSpec::kNoThrowAllocate> nothrow_alloc;
ThrowingValue<>* ptr;
SetCountdown();
ExpectNoThrow([&]() { ptr = nothrow_alloc.allocate(1); });
nothrow_alloc.deallocate(ptr, 1);
UnsetCountdown();
}
TEST(ThrowingAllocatorTest, ThrowingConstructors) {
ThrowingAllocator<int> int_alloc;
int* ip = nullptr;
SetCountdown();
EXPECT_THROW(ip = int_alloc.allocate(1), TestException);
ExpectNoThrow([&]() { ip = int_alloc.allocate(1); });
*ip = 1;
SetCountdown();
EXPECT_THROW(int_alloc.construct(ip, 2), TestException);
EXPECT_EQ(*ip, 1);
int_alloc.deallocate(ip, 1);
UnsetCountdown();
}
TEST(ThrowingAllocatorTest, NonThrowingConstruction) {
{
ThrowingAllocator<int, AllocSpec::kNoThrowAllocate> int_alloc;
int* ip = nullptr;
SetCountdown();
ExpectNoThrow([&]() { ip = int_alloc.allocate(1); });
SetCountdown();
ExpectNoThrow([&]() { int_alloc.construct(ip, 2); });
EXPECT_EQ(*ip, 2);
int_alloc.deallocate(ip, 1);
UnsetCountdown();
}
{
ThrowingAllocator<int> int_alloc;
int* ip = nullptr;
ExpectNoThrow([&]() { ip = int_alloc.allocate(1); });
ExpectNoThrow([&]() { int_alloc.construct(ip, 2); });
EXPECT_EQ(*ip, 2);
int_alloc.deallocate(ip, 1);
}
{
ThrowingAllocator<ThrowingValue<>, AllocSpec::kNoThrowAllocate>
nothrow_alloc;
ThrowingValue<>* ptr;
SetCountdown();
ExpectNoThrow([&]() { ptr = nothrow_alloc.allocate(1); });
SetCountdown();
ExpectNoThrow(
[&]() { nothrow_alloc.construct(ptr, 2, testing::nothrow_ctor); });
EXPECT_EQ(ptr->Get(), 2);
nothrow_alloc.destroy(ptr);
nothrow_alloc.deallocate(ptr, 1);
UnsetCountdown();
}
{
ThrowingAllocator<int> a;
SetCountdown();
ExpectNoThrow([&]() { ThrowingAllocator<double> a1 = a; });
SetCountdown();
ExpectNoThrow([&]() { ThrowingAllocator<double> a1 = std::move(a); });
UnsetCountdown();
}
}
TEST(ThrowingAllocatorTest, ThrowingAllocatorConstruction) {
ThrowingAllocator<int> a;
TestOp([]() { ThrowingAllocator<int> a; });
TestOp([&]() { a.select_on_container_copy_construction(); });
}
TEST(ThrowingAllocatorTest, State) {
ThrowingAllocator<int> a1, a2;
EXPECT_NE(a1, a2);
auto a3 = a1;
EXPECT_EQ(a3, a1);
int* ip = a1.allocate(1);
EXPECT_EQ(a3, a1);
a3.deallocate(ip, 1);
EXPECT_EQ(a3, a1);
}
TEST(ThrowingAllocatorTest, InVector) {
std::vector<ThrowingValue<>, ThrowingAllocator<ThrowingValue<>>> v;
for (int i = 0; i < 20; ++i) v.push_back({});
for (int i = 0; i < 20; ++i) v.pop_back();
}
TEST(ThrowingAllocatorTest, InList) {
std::list<ThrowingValue<>, ThrowingAllocator<ThrowingValue<>>> l;
for (int i = 0; i < 20; ++i) l.push_back({});
for (int i = 0; i < 20; ++i) l.pop_back();
for (int i = 0; i < 20; ++i) l.push_front({});
for (int i = 0; i < 20; ++i) l.pop_front();
}
template <typename TesterInstance, typename = void>
struct NullaryTestValidator : public std::false_type {};
template <typename TesterInstance>
struct NullaryTestValidator<
TesterInstance,
absl::void_t<decltype(std::declval<TesterInstance>().Test())>>
: public std::true_type {};
template <typename TesterInstance>
bool HasNullaryTest(const TesterInstance&) {
return NullaryTestValidator<TesterInstance>::value;
}
void DummyOp(void*) {}
template <typename TesterInstance, typename = void>
struct UnaryTestValidator : public std::false_type {};
template <typename TesterInstance>
struct UnaryTestValidator<
TesterInstance,
absl::void_t<decltype(std::declval<TesterInstance>().Test(DummyOp))>>
: public std::true_type {};
template <typename TesterInstance>
bool HasUnaryTest(const TesterInstance&) {
return UnaryTestValidator<TesterInstance>::value;
}
TEST(ExceptionSafetyTesterTest, IncompleteTypesAreNotTestable) {
using T = exceptions_internal::UninitializedT;
auto op = [](T* t) {};
auto inv = [](T*) { return testing::AssertionSuccess(); };
auto fac = []() { return absl::make_unique<T>(); };
auto without_fac =
testing::MakeExceptionSafetyTester().WithOperation(op).WithContracts(
inv, testing::strong_guarantee);
EXPECT_FALSE(HasNullaryTest(without_fac));
EXPECT_FALSE(HasUnaryTest(without_fac));
auto without_op = testing::MakeExceptionSafetyTester()
.WithContracts(inv, testing::strong_guarantee)
.WithFactory(fac);
EXPECT_FALSE(HasNullaryTest(without_op));
EXPECT_TRUE(HasUnaryTest(without_op));
auto without_inv =
testing::MakeExceptionSafetyTester().WithOperation(op).WithFactory(fac);
EXPECT_FALSE(HasNullaryTest(without_inv));
EXPECT_FALSE(HasUnaryTest(without_inv));
}
struct ExampleStruct {};
std::unique_ptr<ExampleStruct> ExampleFunctionFactory() {
return absl::make_unique<ExampleStruct>();
}
void ExampleFunctionOperation(ExampleStruct*) {}
testing::AssertionResult ExampleFunctionContract(ExampleStruct*) {
return testing::AssertionSuccess();
}
struct {
std::unique_ptr<ExampleStruct> operator()() const {
return ExampleFunctionFactory();
}
} example_struct_factory;
struct {
void operator()(ExampleStruct*) const {}
} example_struct_operation;
struct {
testing::AssertionResult operator()(ExampleStruct* example_struct) const {
return ExampleFunctionContract(example_struct);
}
} example_struct_contract;
auto example_lambda_factory = []() { return ExampleFunctionFactory(); };
auto example_lambda_operation = [](ExampleStruct*) {};
auto example_lambda_contract = [](ExampleStruct* example_struct) {
return ExampleFunctionContract(example_struct);
};
TEST(ExceptionSafetyTesterTest, MixedFunctionTypes) {
EXPECT_TRUE(testing::MakeExceptionSafetyTester()
.WithFactory(ExampleFunctionFactory)
.WithOperation(ExampleFunctionOperation)
.WithContracts(ExampleFunctionContract)
.Test());
EXPECT_TRUE(testing::MakeExceptionSafetyTester()
.WithFactory(&ExampleFunctionFactory)
.WithOperation(&ExampleFunctionOperation)
.WithContracts(&ExampleFunctionContract)
.Test());
EXPECT_TRUE(testing::MakeExceptionSafetyTester()
.WithFactory(example_struct_factory)
.WithOperation(example_struct_operation)
.WithContracts(example_struct_contract)
.Test());
EXPECT_TRUE(testing::MakeExceptionSafetyTester()
.WithFactory(example_lambda_factory)
.WithOperation(example_lambda_operation)
.WithContracts(example_lambda_contract)
.Test());
}
struct NonNegative {
bool operator==(const NonNegative& other) const { return i == other.i; }
int i;
};
testing::AssertionResult CheckNonNegativeInvariants(NonNegative* g) {
if (g->i >= 0) {
return testing::AssertionSuccess();
}
return testing::AssertionFailure()
<< "i should be non-negative but is " << g->i;
}
struct {
template <typename T>
void operator()(T* t) const {
(*t)();
}
} invoker;
auto tester =
testing::MakeExceptionSafetyTester().WithOperation(invoker).WithContracts(
CheckNonNegativeInvariants);
auto strong_tester = tester.WithContracts(testing::strong_guarantee);
struct FailsBasicGuarantee : public NonNegative {
void operator()() {
--i;
ThrowingValue<> bomb;
++i;
}
};
TEST(ExceptionCheckTest, BasicGuaranteeFailure) {
EXPECT_FALSE(tester.WithInitialValue(FailsBasicGuarantee{}).Test());
}
struct FollowsBasicGuarantee : public NonNegative {
void operator()() {
++i;
ThrowingValue<> bomb;
}
};
TEST(ExceptionCheckTest, BasicGuarantee) {
EXPECT_TRUE(tester.WithInitialValue(FollowsBasicGuarantee{}).Test());
}
TEST(ExceptionCheckTest, StrongGuaranteeFailure) {
EXPECT_FALSE(strong_tester.WithInitialValue(FailsBasicGuarantee{}).Test());
EXPECT_FALSE(strong_tester.WithInitialValue(FollowsBasicGuarantee{}).Test());
}
struct BasicGuaranteeWithExtraContracts : public NonNegative {
void operator()() {
int old_i = i;
i = kExceptionSentinel;
ThrowingValue<> bomb;
i = ++old_i;
}
static constexpr int kExceptionSentinel = 9999;
};
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr int BasicGuaranteeWithExtraContracts::kExceptionSentinel;
#endif
TEST(ExceptionCheckTest, BasicGuaranteeWithExtraContracts) {
auto tester_with_val =
tester.WithInitialValue(BasicGuaranteeWithExtraContracts{});
EXPECT_TRUE(tester_with_val.Test());
EXPECT_TRUE(
tester_with_val
.WithContracts([](BasicGuaranteeWithExtraContracts* o) {
if (o->i == BasicGuaranteeWithExtraContracts::kExceptionSentinel) {
return testing::AssertionSuccess();
}
return testing::AssertionFailure()
<< "i should be "
<< BasicGuaranteeWithExtraContracts::kExceptionSentinel
<< ", but is " << o->i;
})
.Test());
}
struct FollowsStrongGuarantee : public NonNegative {
void operator()() { ThrowingValue<> bomb; }
};
TEST(ExceptionCheckTest, StrongGuarantee) {
EXPECT_TRUE(tester.WithInitialValue(FollowsStrongGuarantee{}).Test());
EXPECT_TRUE(strong_tester.WithInitialValue(FollowsStrongGuarantee{}).Test());
}
struct HasReset : public NonNegative {
void operator()() {
i = -1;
ThrowingValue<> bomb;
i = 1;
}
void reset() { i = 0; }
};
testing::AssertionResult CheckHasResetContracts(HasReset* h) {
h->reset();
return testing::AssertionResult(h->i == 0);
}
TEST(ExceptionCheckTest, ModifyingChecker) {
auto set_to_1000 = [](FollowsBasicGuarantee* g) {
g->i = 1000;
return testing::AssertionSuccess();
};
auto is_1000 = [](FollowsBasicGuarantee* g) {
return testing::AssertionResult(g->i == 1000);
};
auto increment = [](FollowsStrongGuarantee* g) {
++g->i;
return testing::AssertionSuccess();
};
EXPECT_FALSE(tester.WithInitialValue(FollowsBasicGuarantee{})
.WithContracts(set_to_1000, is_1000)
.Test());
EXPECT_TRUE(strong_tester.WithInitialValue(FollowsStrongGuarantee{})
.WithContracts(increment)
.Test());
EXPECT_TRUE(testing::MakeExceptionSafetyTester()
.WithInitialValue(HasReset{})
.WithContracts(CheckHasResetContracts)
.Test(invoker));
}
TEST(ExceptionSafetyTesterTest, ResetsCountdown) {
auto test =
testing::MakeExceptionSafetyTester()
.WithInitialValue(ThrowingValue<>())
.WithContracts([](ThrowingValue<>*) { return AssertionSuccess(); })
.WithOperation([](ThrowingValue<>*) {});
ASSERT_TRUE(test.Test());
EXPECT_TRUE(test.Test());
}
struct NonCopyable : public NonNegative {
NonCopyable(const NonCopyable&) = delete;
NonCopyable() : NonNegative{0} {}
void operator()() { ThrowingValue<> bomb; }
};
TEST(ExceptionCheckTest, NonCopyable) {
auto factory = []() { return absl::make_unique<NonCopyable>(); };
EXPECT_TRUE(tester.WithFactory(factory).Test());
EXPECT_TRUE(strong_tester.WithFactory(factory).Test());
}
struct NonEqualityComparable : public NonNegative {
void operator()() { ThrowingValue<> bomb; }
void ModifyOnThrow() {
++i;
ThrowingValue<> bomb;
static_cast<void>(bomb);
--i;
}
};
TEST(ExceptionCheckTest, NonEqualityComparable) {
auto nec_is_strong = [](NonEqualityComparable* nec) {
return testing::AssertionResult(nec->i == NonEqualityComparable().i);
};
auto strong_nec_tester = tester.WithInitialValue(NonEqualityComparable{})
.WithContracts(nec_is_strong);
EXPECT_TRUE(strong_nec_tester.Test());
EXPECT_FALSE(strong_nec_tester.Test(
[](NonEqualityComparable* n) { n->ModifyOnThrow(); }));
}
template <typename T>
struct ExhaustivenessTester {
void operator()() {
successes |= 1;
T b1;
static_cast<void>(b1);
successes |= (1 << 1);
T b2;
static_cast<void>(b2);
successes |= (1 << 2);
T b3;
static_cast<void>(b3);
successes |= (1 << 3);
}
bool operator==(const ExhaustivenessTester<ThrowingValue<>>&) const {
return true;
}
static unsigned char successes;
};
struct {
template <typename T>
testing::AssertionResult operator()(ExhaustivenessTester<T>*) const {
return testing::AssertionSuccess();
}
} CheckExhaustivenessTesterContracts;
template <typename T>
unsigned char ExhaustivenessTester<T>::successes = 0;
TEST(ExceptionCheckTest, Exhaustiveness) {
auto exhaust_tester = testing::MakeExceptionSafetyTester()
.WithContracts(CheckExhaustivenessTesterContracts)
.WithOperation(invoker);
EXPECT_TRUE(
exhaust_tester.WithInitialValue(ExhaustivenessTester<int>{}).Test());
EXPECT_EQ(ExhaustivenessTester<int>::successes, 0xF);
EXPECT_TRUE(
exhaust_tester.WithInitialValue(ExhaustivenessTester<ThrowingValue<>>{})
.WithContracts(testing::strong_guarantee)
.Test());
EXPECT_EQ(ExhaustivenessTester<ThrowingValue<>>::successes, 0xF);
}
struct LeaksIfCtorThrows : private exceptions_internal::TrackedObject {
LeaksIfCtorThrows() : TrackedObject(ABSL_PRETTY_FUNCTION) {
++counter;
ThrowingValue<> v;
static_cast<void>(v);
--counter;
}
LeaksIfCtorThrows(const LeaksIfCtorThrows&) noexcept
: TrackedObject(ABSL_PRETTY_FUNCTION) {}
static int counter;
};
int LeaksIfCtorThrows::counter = 0;
TEST(ExceptionCheckTest, TestLeakyCtor) {
testing::TestThrowingCtor<LeaksIfCtorThrows>();
EXPECT_EQ(LeaksIfCtorThrows::counter, 1);
LeaksIfCtorThrows::counter = 0;
}
struct Tracked : private exceptions_internal::TrackedObject {
Tracked() : TrackedObject(ABSL_PRETTY_FUNCTION) {}
};
TEST(ConstructorTrackerTest, CreatedBefore) {
Tracked a, b, c;
exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown);
}
TEST(ConstructorTrackerTest, CreatedAfter) {
exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown);
Tracked a, b, c;
}
TEST(ConstructorTrackerTest, NotDestroyedAfter) {
alignas(Tracked) unsigned char storage[sizeof(Tracked)];
EXPECT_NONFATAL_FAILURE(
{
exceptions_internal::ConstructorTracker ct(
exceptions_internal::countdown);
new (&storage) Tracked();
},
"not destroyed");
}
TEST(ConstructorTrackerTest, DestroyedTwice) {
exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown);
EXPECT_NONFATAL_FAILURE(
{
Tracked t;
t.~Tracked();
},
"re-destroyed");
}
TEST(ConstructorTrackerTest, ConstructedTwice) {
exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown);
alignas(Tracked) unsigned char storage[sizeof(Tracked)];
EXPECT_NONFATAL_FAILURE(
{
new (&storage) Tracked();
new (&storage) Tracked();
reinterpret_cast<Tracked*>(&storage)->~Tracked();
},
"re-constructed");
}
TEST(ThrowingValueTraitsTest, RelationalOperators) {
ThrowingValue<> a, b;
EXPECT_TRUE((std::is_convertible<decltype(a == b), bool>::value));
EXPECT_TRUE((std::is_convertible<decltype(a != b), bool>::value));
EXPECT_TRUE((std::is_convertible<decltype(a < b), bool>::value));
EXPECT_TRUE((std::is_convertible<decltype(a <= b), bool>::value));
EXPECT_TRUE((std::is_convertible<decltype(a > b), bool>::value));
EXPECT_TRUE((std::is_convertible<decltype(a >= b), bool>::value));
}
TEST(ThrowingAllocatorTraitsTest, Assignablility) {
EXPECT_TRUE(absl::is_move_assignable<ThrowingAllocator<int>>::value);
EXPECT_TRUE(absl::is_copy_assignable<ThrowingAllocator<int>>::value);
EXPECT_TRUE(std::is_nothrow_move_assignable<ThrowingAllocator<int>>::value);
EXPECT_TRUE(std::is_nothrow_copy_assignable<ThrowingAllocator<int>>::value);
}
}
}
#endif | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/exception_safety_testing.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/exception_safety_testing_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
cc39785c-7b2f-476e-a2fa-adde7d1d292a | cpp | abseil/abseil-cpp | kernel_timeout | absl/synchronization/internal/kernel_timeout.cc | absl/synchronization/internal/kernel_timeout_test.cc | #include "absl/synchronization/internal/kernel_timeout.h"
#ifndef _WIN32
#include <sys/types.h>
#endif
#include <algorithm>
#include <chrono>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <limits>
#include "absl/base/attributes.h"
#include "absl/base/call_once.h"
#include "absl/base/config.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr uint64_t KernelTimeout::kNoTimeout;
constexpr int64_t KernelTimeout::kMaxNanos;
#endif
int64_t KernelTimeout::SteadyClockNow() {
if (!SupportsSteadyClock()) {
return absl::GetCurrentTimeNanos();
}
return std::chrono::duration_cast<std::chrono::nanoseconds>(
std::chrono::steady_clock::now().time_since_epoch())
.count();
}
KernelTimeout::KernelTimeout(absl::Time t) {
if (t == absl::InfiniteFuture()) {
rep_ = kNoTimeout;
return;
}
int64_t unix_nanos = absl::ToUnixNanos(t);
if (unix_nanos < 0) {
unix_nanos = 0;
}
if (unix_nanos >= kMaxNanos) {
rep_ = kNoTimeout;
return;
}
rep_ = static_cast<uint64_t>(unix_nanos) << 1;
}
KernelTimeout::KernelTimeout(absl::Duration d) {
if (d == absl::InfiniteDuration()) {
rep_ = kNoTimeout;
return;
}
int64_t nanos = absl::ToInt64Nanoseconds(d);
if (nanos < 0) {
nanos = 0;
}
int64_t now = SteadyClockNow();
if (nanos > kMaxNanos - now) {
rep_ = kNoTimeout;
return;
}
nanos += now;
rep_ = (static_cast<uint64_t>(nanos) << 1) | uint64_t{1};
}
int64_t KernelTimeout::MakeAbsNanos() const {
if (!has_timeout()) {
return kMaxNanos;
}
int64_t nanos = RawAbsNanos();
if (is_relative_timeout()) {
nanos = std::max<int64_t>(nanos - SteadyClockNow(), 0);
int64_t now = absl::GetCurrentTimeNanos();
if (nanos > kMaxNanos - now) {
nanos = kMaxNanos;
} else {
nanos += now;
}
} else if (nanos == 0) {
nanos = 1;
}
return nanos;
}
int64_t KernelTimeout::InNanosecondsFromNow() const {
if (!has_timeout()) {
return kMaxNanos;
}
int64_t nanos = RawAbsNanos();
if (is_absolute_timeout()) {
return std::max<int64_t>(nanos - absl::GetCurrentTimeNanos(), 0);
}
return std::max<int64_t>(nanos - SteadyClockNow(), 0);
}
struct timespec KernelTimeout::MakeAbsTimespec() const {
return absl::ToTimespec(absl::Nanoseconds(MakeAbsNanos()));
}
struct timespec KernelTimeout::MakeRelativeTimespec() const {
return absl::ToTimespec(absl::Nanoseconds(InNanosecondsFromNow()));
}
#ifndef _WIN32
struct timespec KernelTimeout::MakeClockAbsoluteTimespec(clockid_t c) const {
if (!has_timeout()) {
return absl::ToTimespec(absl::Nanoseconds(kMaxNanos));
}
int64_t nanos = RawAbsNanos();
if (is_absolute_timeout()) {
nanos -= absl::GetCurrentTimeNanos();
} else {
nanos -= SteadyClockNow();
}
struct timespec now;
ABSL_RAW_CHECK(clock_gettime(c, &now) == 0, "clock_gettime() failed");
absl::Duration from_clock_epoch =
absl::DurationFromTimespec(now) + absl::Nanoseconds(nanos);
if (from_clock_epoch <= absl::ZeroDuration()) {
return absl::ToTimespec(absl::Nanoseconds(1));
}
return absl::ToTimespec(from_clock_epoch);
}
#endif
KernelTimeout::DWord KernelTimeout::InMillisecondsFromNow() const {
constexpr DWord kInfinite = std::numeric_limits<DWord>::max();
if (!has_timeout()) {
return kInfinite;
}
constexpr uint64_t kNanosInMillis = uint64_t{1'000'000};
constexpr uint64_t kMaxValueNanos =
std::numeric_limits<int64_t>::max() - kNanosInMillis + 1;
uint64_t ns_from_now = static_cast<uint64_t>(InNanosecondsFromNow());
if (ns_from_now >= kMaxValueNanos) {
return kInfinite;
}
uint64_t ms_from_now = (ns_from_now + kNanosInMillis - 1) / kNanosInMillis;
if (ms_from_now > kInfinite) {
return kInfinite;
}
return static_cast<DWord>(ms_from_now);
}
std::chrono::time_point<std::chrono::system_clock>
KernelTimeout::ToChronoTimePoint() const {
if (!has_timeout()) {
return std::chrono::time_point<std::chrono::system_clock>::max();
}
auto micros = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::nanoseconds(MakeAbsNanos()));
return std::chrono::system_clock::from_time_t(0) + micros;
}
std::chrono::nanoseconds KernelTimeout::ToChronoDuration() const {
if (!has_timeout()) {
return std::chrono::nanoseconds::max();
}
return std::chrono::nanoseconds(InNanosecondsFromNow());
}
}
ABSL_NAMESPACE_END
} | #include "absl/synchronization/internal/kernel_timeout.h"
#include <ctime>
#include <chrono>
#include <limits>
#include "absl/base/config.h"
#include "absl/random/random.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "gtest/gtest.h"
#if defined(__GOOGLE_GRTE_VERSION__) && \
!defined(ABSL_HAVE_ADDRESS_SANITIZER) && \
!defined(ABSL_HAVE_MEMORY_SANITIZER) && \
!defined(ABSL_HAVE_THREAD_SANITIZER)
extern "C" int __clock_gettime(clockid_t c, struct timespec* ts);
extern "C" int clock_gettime(clockid_t c, struct timespec* ts) {
if (c == CLOCK_MONOTONIC &&
!absl::synchronization_internal::KernelTimeout::SupportsSteadyClock()) {
thread_local absl::BitGen gen;
ts->tv_sec = absl::Uniform(gen, 0, 1'000'000'000);
ts->tv_nsec = absl::Uniform(gen, 0, 1'000'000'000);
return 0;
}
return __clock_gettime(c, ts);
}
#endif
namespace {
#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
defined(ABSL_HAVE_MEMORY_SANITIZER) || \
defined(ABSL_HAVE_THREAD_SANITIZER) || defined(__ANDROID__) || \
defined(__APPLE__) || defined(_WIN32) || defined(_WIN64)
constexpr absl::Duration kTimingBound = absl::Milliseconds(5);
#else
constexpr absl::Duration kTimingBound = absl::Microseconds(250);
#endif
using absl::synchronization_internal::KernelTimeout;
TEST(KernelTimeout, DISABLED_FiniteTimes) {
constexpr absl::Duration kDurationsToTest[] = {
absl::ZeroDuration(),
absl::Nanoseconds(1),
absl::Microseconds(1),
absl::Milliseconds(1),
absl::Seconds(1),
absl::Minutes(1),
absl::Hours(1),
absl::Hours(1000),
-absl::Nanoseconds(1),
-absl::Microseconds(1),
-absl::Milliseconds(1),
-absl::Seconds(1),
-absl::Minutes(1),
-absl::Hours(1),
-absl::Hours(1000),
};
for (auto duration : kDurationsToTest) {
const absl::Time now = absl::Now();
const absl::Time when = now + duration;
SCOPED_TRACE(duration);
KernelTimeout t(when);
EXPECT_TRUE(t.has_timeout());
EXPECT_TRUE(t.is_absolute_timeout());
EXPECT_FALSE(t.is_relative_timeout());
EXPECT_EQ(absl::TimeFromTimespec(t.MakeAbsTimespec()), when);
#ifndef _WIN32
EXPECT_LE(
absl::AbsDuration(absl::Now() + duration -
absl::TimeFromTimespec(
t.MakeClockAbsoluteTimespec(CLOCK_REALTIME))),
absl::Milliseconds(10));
#endif
EXPECT_LE(
absl::AbsDuration(absl::DurationFromTimespec(t.MakeRelativeTimespec()) -
std::max(duration, absl::ZeroDuration())),
kTimingBound);
EXPECT_EQ(absl::FromUnixNanos(t.MakeAbsNanos()), when);
EXPECT_LE(absl::AbsDuration(absl::Milliseconds(t.InMillisecondsFromNow()) -
std::max(duration, absl::ZeroDuration())),
absl::Milliseconds(5));
EXPECT_LE(absl::AbsDuration(absl::FromChrono(t.ToChronoTimePoint()) - when),
absl::Microseconds(1));
EXPECT_LE(absl::AbsDuration(absl::FromChrono(t.ToChronoDuration()) -
std::max(duration, absl::ZeroDuration())),
kTimingBound);
}
}
TEST(KernelTimeout, InfiniteFuture) {
KernelTimeout t(absl::InfiniteFuture());
EXPECT_FALSE(t.has_timeout());
EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
absl::Now() + absl::Hours(100000));
#ifndef _WIN32
EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
absl::Now() + absl::Hours(100000));
#endif
EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
absl::Hours(100000));
EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
absl::Now() + absl::Hours(100000));
EXPECT_EQ(t.InMillisecondsFromNow(),
std::numeric_limits<KernelTimeout::DWord>::max());
EXPECT_EQ(t.ToChronoTimePoint(),
std::chrono::time_point<std::chrono::system_clock>::max());
EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
}
TEST(KernelTimeout, DefaultConstructor) {
KernelTimeout t;
EXPECT_FALSE(t.has_timeout());
EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
absl::Now() + absl::Hours(100000));
#ifndef _WIN32
EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
absl::Now() + absl::Hours(100000));
#endif
EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
absl::Hours(100000));
EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
absl::Now() + absl::Hours(100000));
EXPECT_EQ(t.InMillisecondsFromNow(),
std::numeric_limits<KernelTimeout::DWord>::max());
EXPECT_EQ(t.ToChronoTimePoint(),
std::chrono::time_point<std::chrono::system_clock>::max());
EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
}
TEST(KernelTimeout, TimeMaxNanos) {
KernelTimeout t(absl::FromUnixNanos(std::numeric_limits<int64_t>::max()));
EXPECT_FALSE(t.has_timeout());
EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
absl::Now() + absl::Hours(100000));
#ifndef _WIN32
EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
absl::Now() + absl::Hours(100000));
#endif
EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
absl::Hours(100000));
EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
absl::Now() + absl::Hours(100000));
EXPECT_EQ(t.InMillisecondsFromNow(),
std::numeric_limits<KernelTimeout::DWord>::max());
EXPECT_EQ(t.ToChronoTimePoint(),
std::chrono::time_point<std::chrono::system_clock>::max());
EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
}
TEST(KernelTimeout, Never) {
KernelTimeout t = KernelTimeout::Never();
EXPECT_FALSE(t.has_timeout());
EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
absl::Now() + absl::Hours(100000));
#ifndef _WIN32
EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
absl::Now() + absl::Hours(100000));
#endif
EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
absl::Hours(100000));
EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
absl::Now() + absl::Hours(100000));
EXPECT_EQ(t.InMillisecondsFromNow(),
std::numeric_limits<KernelTimeout::DWord>::max());
EXPECT_EQ(t.ToChronoTimePoint(),
std::chrono::time_point<std::chrono::system_clock>::max());
EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
}
TEST(KernelTimeout, InfinitePast) {
KernelTimeout t(absl::InfinitePast());
EXPECT_TRUE(t.has_timeout());
EXPECT_TRUE(t.is_absolute_timeout());
EXPECT_FALSE(t.is_relative_timeout());
EXPECT_LE(absl::TimeFromTimespec(t.MakeAbsTimespec()),
absl::FromUnixNanos(1));
#ifndef _WIN32
EXPECT_LE(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
absl::FromUnixSeconds(1));
#endif
EXPECT_EQ(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
absl::ZeroDuration());
EXPECT_LE(absl::FromUnixNanos(t.MakeAbsNanos()), absl::FromUnixNanos(1));
EXPECT_EQ(t.InMillisecondsFromNow(), KernelTimeout::DWord{0});
EXPECT_LT(t.ToChronoTimePoint(), std::chrono::system_clock::from_time_t(0) +
std::chrono::seconds(1));
EXPECT_EQ(t.ToChronoDuration(), std::chrono::nanoseconds(0));
}
TEST(KernelTimeout, DISABLED_FiniteDurations) {
constexpr absl::Duration kDurationsToTest[] = {
absl::ZeroDuration(),
absl::Nanoseconds(1),
absl::Microseconds(1),
absl::Milliseconds(1),
absl::Seconds(1),
absl::Minutes(1),
absl::Hours(1),
absl::Hours(1000),
};
for (auto duration : kDurationsToTest) {
SCOPED_TRACE(duration);
KernelTimeout t(duration);
EXPECT_TRUE(t.has_timeout());
EXPECT_FALSE(t.is_absolute_timeout());
EXPECT_TRUE(t.is_relative_timeout());
EXPECT_LE(absl::AbsDuration(absl::Now() + duration -
absl::TimeFromTimespec(t.MakeAbsTimespec())),
absl::Milliseconds(5));
#ifndef _WIN32
EXPECT_LE(
absl::AbsDuration(absl::Now() + duration -
absl::TimeFromTimespec(
t.MakeClockAbsoluteTimespec(CLOCK_REALTIME))),
absl::Milliseconds(5));
#endif
EXPECT_LE(
absl::AbsDuration(absl::DurationFromTimespec(t.MakeRelativeTimespec()) -
duration),
kTimingBound);
EXPECT_LE(absl::AbsDuration(absl::Now() + duration -
absl::FromUnixNanos(t.MakeAbsNanos())),
absl::Milliseconds(5));
EXPECT_LE(absl::Milliseconds(t.InMillisecondsFromNow()) - duration,
absl::Milliseconds(5));
EXPECT_LE(absl::AbsDuration(absl::Now() + duration -
absl::FromChrono(t.ToChronoTimePoint())),
kTimingBound);
EXPECT_LE(
absl::AbsDuration(absl::FromChrono(t.ToChronoDuration()) - duration),
kTimingBound);
}
}
TEST(KernelTimeout, DISABLED_NegativeDurations) {
constexpr absl::Duration kDurationsToTest[] = {
-absl::ZeroDuration(),
-absl::Nanoseconds(1),
-absl::Microseconds(1),
-absl::Milliseconds(1),
-absl::Seconds(1),
-absl::Minutes(1),
-absl::Hours(1),
-absl::Hours(1000),
-absl::InfiniteDuration(),
};
for (auto duration : kDurationsToTest) {
SCOPED_TRACE(duration);
KernelTimeout t(duration);
EXPECT_TRUE(t.has_timeout());
EXPECT_FALSE(t.is_absolute_timeout());
EXPECT_TRUE(t.is_relative_timeout());
EXPECT_LE(absl::AbsDuration(absl::Now() -
absl::TimeFromTimespec(t.MakeAbsTimespec())),
absl::Milliseconds(5));
#ifndef _WIN32
EXPECT_LE(absl::AbsDuration(absl::Now() - absl::TimeFromTimespec(
t.MakeClockAbsoluteTimespec(
CLOCK_REALTIME))),
absl::Milliseconds(5));
#endif
EXPECT_EQ(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
absl::ZeroDuration());
EXPECT_LE(
absl::AbsDuration(absl::Now() - absl::FromUnixNanos(t.MakeAbsNanos())),
absl::Milliseconds(5));
EXPECT_EQ(t.InMillisecondsFromNow(), KernelTimeout::DWord{0});
EXPECT_LE(absl::AbsDuration(absl::Now() -
absl::FromChrono(t.ToChronoTimePoint())),
absl::Milliseconds(5));
EXPECT_EQ(t.ToChronoDuration(), std::chrono::nanoseconds(0));
}
}
TEST(KernelTimeout, InfiniteDuration) {
KernelTimeout t(absl::InfiniteDuration());
EXPECT_FALSE(t.has_timeout());
EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
absl::Now() + absl::Hours(100000));
#ifndef _WIN32
EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
absl::Now() + absl::Hours(100000));
#endif
EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
absl::Hours(100000));
EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
absl::Now() + absl::Hours(100000));
EXPECT_EQ(t.InMillisecondsFromNow(),
std::numeric_limits<KernelTimeout::DWord>::max());
EXPECT_EQ(t.ToChronoTimePoint(),
std::chrono::time_point<std::chrono::system_clock>::max());
EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
}
TEST(KernelTimeout, DurationMaxNanos) {
KernelTimeout t(absl::Nanoseconds(std::numeric_limits<int64_t>::max()));
EXPECT_FALSE(t.has_timeout());
EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
absl::Now() + absl::Hours(100000));
#ifndef _WIN32
EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
absl::Now() + absl::Hours(100000));
#endif
EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
absl::Hours(100000));
EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
absl::Now() + absl::Hours(100000));
EXPECT_EQ(t.InMillisecondsFromNow(),
std::numeric_limits<KernelTimeout::DWord>::max());
EXPECT_EQ(t.ToChronoTimePoint(),
std::chrono::time_point<std::chrono::system_clock>::max());
EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
}
TEST(KernelTimeout, OverflowNanos) {
int64_t now_nanos = absl::ToUnixNanos(absl::Now());
int64_t limit = std::numeric_limits<int64_t>::max() - now_nanos;
absl::Duration duration = absl::Nanoseconds(limit) + absl::Seconds(1);
KernelTimeout t(duration);
EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
absl::Now() + absl::Hours(100000));
#ifndef _WIN32
EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
absl::Now() + absl::Hours(100000));
#endif
EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
absl::Hours(100000));
EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
absl::Now() + absl::Hours(100000));
EXPECT_LE(absl::Milliseconds(t.InMillisecondsFromNow()) - duration,
absl::Milliseconds(5));
EXPECT_GT(t.ToChronoTimePoint(),
std::chrono::system_clock::now() + std::chrono::hours(100000));
EXPECT_GT(t.ToChronoDuration(), std::chrono::hours(100000));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/synchronization/internal/kernel_timeout.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/synchronization/internal/kernel_timeout_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
70560aea-19e3-4db0-bb61-409c88fde8ba | cpp | tensorflow/tensorflow | quantized_pooling_ops | tensorflow/core/kernels/quantized_pooling_ops.cc | tensorflow/core/kernels/quantized_pooling_ops_test.cc | #define EIGEN_USE_THREADS
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/kernels/pooling_ops_common.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/padding.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
template <typename Device, typename T>
class QuantizedAvgPoolingOp : public OpKernel {
public:
explicit QuantizedAvgPoolingOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_));
OP_REQUIRES(context, ksize_.size() == 4,
errors::InvalidArgument("Sliding window ksize field must "
"specify 4 dimensions"));
OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_));
OP_REQUIRES(context, stride_.size() == 4,
errors::InvalidArgument("Sliding window strides field must "
"specify 4 dimensions"));
OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_));
OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1,
errors::Unimplemented(
"Pooling is not yet supported on the batch dimension."));
}
void Compute(OpKernelContext* context) override {
const Tensor& tensor_in = context->input(0);
PoolParameters params{context,
ksize_,
stride_,
padding_,
{},
FORMAT_NHWC,
tensor_in.shape()};
if (!context->status().ok()) {
return;
}
const Tensor& min_input_tensor = context->input(1);
const Tensor& max_input_tensor = context->input(2);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_input_tensor.shape()),
errors::InvalidArgument(
"min_input shape must be rank 0 but is rank ",
min_input_tensor.dims(),
", received shape: ", min_input_tensor.shape()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_input_tensor.shape()),
errors::InvalidArgument(
"max_input shape must be rank 0 but is rank ",
max_input_tensor.dims(),
", received shape: ", max_input_tensor.shape()));
const float min_input = context->input(1).scalar<float>()();
const float max_input = context->input(2).scalar<float>()();
OP_REQUIRES(context, params.depth_window == 1,
errors::Unimplemented("Non-spatial pooling is not "
"yet supported. Volunteers? :)"));
OP_REQUIRES(context, tensor_in.dims() == 4,
errors::InvalidArgument("tensor_in must be 4-dimensional"));
Tensor* output = nullptr;
TensorShape params_forward_output_shape;
OP_REQUIRES_OK(context,
params.forward_output_shape(¶ms_forward_output_shape));
OP_REQUIRES_OK(context, context->allocate_output(
0, params_forward_output_shape, &output));
const int32_t highest = static_cast<int32>(Eigen::NumTraits<T>::highest());
const int32_t lowest = static_cast<int32>(Eigen::NumTraits<T>::lowest());
OP_REQUIRES_OK(context,
params.forward_output_shape(¶ms_forward_output_shape));
Tensor int32_output(DT_INT32, params_forward_output_shape);
Tensor int32_input(DT_INT32, tensor_in.shape());
int32_input.flat<int32>() = tensor_in.flat<T>().template cast<int32>();
SpatialAvgPool<Device, int32>(context, &int32_output, int32_input, params,
padding_);
output->flat<T>() = int32_output.flat<int32>()
.cwiseMax(lowest)
.cwiseMin(highest)
.template cast<T>();
Tensor* output_min = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min));
output_min->flat<float>()(0) = min_input;
Tensor* output_max = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max));
output_max->flat<float>()(0) = max_input;
}
private:
std::vector<int32> ksize_;
std::vector<int32> stride_;
Padding padding_;
};
template <typename Device, typename T>
class QuantizedMaxPoolingOp : public MaxPoolingOp<Device, T> {
public:
explicit QuantizedMaxPoolingOp(OpKernelConstruction* context)
: MaxPoolingOp<Device, T>(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& min_input_tensor = context->input(1);
const Tensor& max_input_tensor = context->input(2);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_input_tensor.shape()),
errors::InvalidArgument(
"min_input shape must be rank 0 but is rank ",
min_input_tensor.dims(),
", received shape: ", min_input_tensor.shape()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_input_tensor.shape()),
errors::InvalidArgument(
"max_input shape must be rank 0 but is rank ",
max_input_tensor.dims(),
", received shape: ", max_input_tensor.shape()));
const float min_input = context->input(1).scalar<float>()();
const float max_input = context->input(2).scalar<float>()();
MaxPoolingOp<Device, T>::Compute(context);
Tensor* output_min = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min));
output_min->flat<float>()(0) = min_input;
Tensor* output_max = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max));
output_max->flat<float>()(0) = max_input;
}
};
REGISTER_KERNEL_BUILDER(
Name("QuantizedAvgPool").Device(DEVICE_CPU).TypeConstraint<quint8>("T"),
QuantizedAvgPoolingOp<CPUDevice, quint8>);
REGISTER_KERNEL_BUILDER(
Name("QuantizedMaxPool").Device(DEVICE_CPU).TypeConstraint<quint8>("T"),
QuantizedMaxPoolingOp<CPUDevice, quint8>);
#ifdef INTEL_MKL
REGISTER_KERNEL_BUILDER(
Name("QuantizedAvgPool").Device(DEVICE_CPU).TypeConstraint<qint8>("T"),
QuantizedAvgPoolingOp<CPUDevice, qint8>);
REGISTER_KERNEL_BUILDER(
Name("QuantizedMaxPool").Device(DEVICE_CPU).TypeConstraint<qint8>("T"),
QuantizedMaxPoolingOp<CPUDevice, qint8>);
#endif
} | #define EIGEN_USE_THREADS
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class QuantizedPoolingTest : public OpsTestBase {
protected:
};
TEST_F(QuantizedPoolingTest, SmallAveragePooling) {
const int ksize = 2;
const int stride = 2;
TF_ASSERT_OK(NodeDefBuilder("quantized_avg_pool_op", "QuantizedAvgPool")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<quint8>::v())
.Attr("ksize", {1, ksize, ksize, 1})
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const float input_min = 0.0f;
const float input_max = 255.0f;
const int input_height = 4;
const int input_width = 4;
const int input_channels = 2;
Tensor input_float(DT_FLOAT, {1, input_height, input_width, input_channels});
test::FillValues<float>(
&input_float,
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32});
Tensor input_quantized =
FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
const int expected_width = input_width / stride;
const int expected_height = input_height / stride;
Tensor expected_float(DT_FLOAT,
{1, expected_height, expected_width, input_channels});
test::FillValues<float>(&expected_float, {6, 7, 10, 11, 22, 23, 26, 27});
AddInputFromArray<quint8>(input_quantized.shape(),
input_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({}), {input_min});
AddInputFromArray<float>(TensorShape({}), {input_max});
TF_ASSERT_OK(RunOpKernel());
const Tensor& output_quantized = *GetOutput(0);
const float output_min = GetOutput(1)->flat<float>()(0);
const float output_max = GetOutput(2)->flat<float>()(0);
Tensor output_float =
QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
test::ExpectTensorNear<float>(expected_float, output_float, 0.2);
}
TEST_F(QuantizedPoolingTest, SmallMaxPooling) {
const int ksize = 2;
const int stride = 2;
TF_ASSERT_OK(NodeDefBuilder("quantized_max_pool_op", "QuantizedMaxPool")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<quint8>::v())
.Attr("ksize", {1, ksize, ksize, 1})
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const float input_min = 0.0f;
const float input_max = 255.0f;
const int input_height = 4;
const int input_width = 4;
const int input_channels = 2;
Tensor input_float(DT_FLOAT, {1, input_height, input_width, input_channels});
test::FillValues<float>(
&input_float,
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32});
Tensor input_quantized =
FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
const int expected_width = input_width / stride;
const int expected_height = input_height / stride;
Tensor expected_float(DT_FLOAT,
{1, expected_height, expected_width, input_channels});
test::FillValues<float>(&expected_float, {11, 12, 15, 16, 27, 28, 31, 32});
AddInputFromArray<quint8>(input_quantized.shape(),
input_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({}), {input_min});
AddInputFromArray<float>(TensorShape({}), {input_max});
TF_ASSERT_OK(RunOpKernel());
const Tensor& output_quantized = *GetOutput(0);
const float output_min = GetOutput(1)->flat<float>()(0);
const float output_max = GetOutput(2)->flat<float>()(0);
Tensor output_float =
QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
test::ExpectTensorNear<float>(expected_float, output_float, 0.2);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_pooling_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_pooling_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
486e1eb3-0d28-4e21-863a-18166ad5e866 | cpp | tensorflow/tensorflow | verify_clustering_pass | tensorflow/compiler/mlir/tf2xla/internal/passes/verify_clustering_pass.cc | tensorflow/compiler/mlir/tf2xla/internal/passes/verify_clustering_pass_test.cc | #include <memory>
#include <string>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Pass/Pass.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/utils/dialect_detection_utils.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
namespace {
#define GEN_PASS_DEF_VERIFYCLUSTERINGPASS
#include "tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.h.inc"
using mlir::Operation;
using mlir::WalkResult;
class VerifyClusteringPass
: public impl::VerifyClusteringPassBase<VerifyClusteringPass> {
public:
void runOnOperation() override;
};
void VerifyClusteringPass::runOnOperation() {
Operation* func_op = getOperation();
auto walk_result = func_op->walk([&](Operation* op) {
if (!tensorflow::tf2xla::internal::IsInBridgeAcceptableDialects(op)) {
std::string error = "op is in dialect " +
op->getDialect()->getNamespace().str() +
" not in tf functional dialect";
op->emitError() << error;
return WalkResult::interrupt();
}
if (op->hasAttr(mlir::TF::kXlaOutsideCompilationAttr)) {
std::string error =
"op has outside compilation attribute _xla_outside_compilation which "
"is not allowed after clustering";
op->emitError() << error;
return mlir::WalkResult::interrupt();
}
return WalkResult::advance();
});
if (walk_result.wasInterrupted()) {
signalPassFailure();
}
}
}
std::unique_ptr<mlir::OperationPass<mlir::func::FuncOp>>
CreateVerifyClusteringPass() {
return std::make_unique<VerifyClusteringPass>();
}
}
}
} | #include <memory>
#include <gtest/gtest.h>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/test_utils.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
namespace {
using mlir::mhlo::test::GetMlirModuleFromString;
class VerifyClusteringPassTest : public testing::Test {
protected:
void CreateModule(const char* module_string) {
TF_ASSERT_OK_AND_ASSIGN(module_,
GetMlirModuleFromString(module_string, &context_));
pm_ = std::make_unique<mlir::PassManager>(&context_);
pm_->addNestedPass<mlir::func::FuncOp>(CreateVerifyClusteringPass());
}
mlir::LogicalResult Run() { return pm_->run(module_.get()); }
private:
mlir::MLIRContext context_;
mlir::OwningOpRef<mlir::ModuleOp> module_;
std::unique_ptr<mlir::PassManager> pm_;
};
TEST_F(VerifyClusteringPassTest, OnlyTfFunctionalPasses) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.Const"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
return %0 : tensor<1xi32>
}
})";
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.succeeded());
}
TEST_F(VerifyClusteringPassTest, NotTfFunctionalFails) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<3x32x32x3xf32> {
%0 = mhlo.constant dense<2.550000e+02> : tensor<3x32x32x3xf32>
return %0 : tensor<3x32x32x3xf32>
}
})";
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.failed());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/passes/verify_clustering_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/passes/verify_clustering_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a07e1ff9-4f11-46b0-a33c-e1d5b746ce05 | cpp | google/tensorstore | nditerable_elementwise_output_transform | tensorstore/internal/nditerable_elementwise_output_transform.cc | tensorstore/internal/nditerable_elementwise_output_transform_test.cc | #include "tensorstore/internal/nditerable_elementwise_output_transform.h"
#include <array>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unique_with_intrusive_allocator.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
namespace {
struct ElementwiseOutputTransformNDIterator
: public NDIterator::Base<ElementwiseOutputTransformNDIterator> {
explicit ElementwiseOutputTransformNDIterator(
const NDIterable* output, ElementwiseClosure<2, void*> closure,
NDIterable::IterationBufferKindLayoutView layout,
ArenaAllocator<> allocator)
: output_(tensorstore::span(&output, 1), layout, allocator),
context_(closure.context),
elementwise_function_((*closure.function)[layout.buffer_kind]) {}
ArenaAllocator<> get_allocator() const override {
return output_.get_allocator();
}
bool UpdateBlock(tensorstore::span<const Index> indices,
IterationBufferShape block_shape,
IterationBufferPointer pointer,
absl::Status* status) override {
return output_.GetBlock(indices, block_shape, status) &&
elementwise_function_(context_, block_shape, pointer,
output_.block_pointers()[0], status) &&
output_.UpdateBlock(indices, block_shape, status);
}
NDIteratorsWithManagedBuffers<1> output_;
void* context_;
SpecializedElementwiseFunctionPointer<2, void*> elementwise_function_;
};
struct ElementwiseOutputTransformNDIterable
: public NDIterablesWithManagedBuffers<
std::array<NDIterable::Ptr, 1>,
NDIterable::Base<ElementwiseOutputTransformNDIterable>> {
using Base = NDIterablesWithManagedBuffers<
std::array<NDIterable::Ptr, 1>,
NDIterable::Base<ElementwiseOutputTransformNDIterable>>;
ElementwiseOutputTransformNDIterable(NDIterable::Ptr output,
DataType input_dtype,
ElementwiseClosure<2, void*> closure,
ArenaAllocator<> allocator)
: Base{{{std::move(output)}}},
input_dtype_(input_dtype),
closure_(closure),
allocator_(allocator) {}
ArenaAllocator<> get_allocator() const override { return allocator_; }
DataType dtype() const override { return input_dtype_; }
NDIterator::Ptr GetIterator(
NDIterable::IterationBufferKindLayoutView layout) const override {
return MakeUniqueWithVirtualIntrusiveAllocator<
ElementwiseOutputTransformNDIterator>(
allocator_, this->iterables[0].get(), closure_, layout);
}
DataType input_dtype_;
ElementwiseClosure<2, void*> closure_;
ArenaAllocator<> allocator_;
};
}
NDIterable::Ptr GetElementwiseOutputTransformNDIterable(
NDIterable::Ptr output, DataType input_dtype,
ElementwiseClosure<2, void*> closure, Arena* arena) {
return MakeUniqueWithVirtualIntrusiveAllocator<
ElementwiseOutputTransformNDIterable>(
ArenaAllocator<>(arena), std::move(output), input_dtype, closure);
}
}
} | #include "tensorstore/internal/nditerable_elementwise_output_transform.h"
#include <new>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable_copy.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::internal::NDIterableCopier;
using ::testing::_;
using ::testing::Pair;
template <typename Func, typename SourceArray, typename DestArray>
absl::Status TestCopy(Func func, tensorstore::IterationConstraints constraints,
SourceArray source_array, DestArray dest_array) {
tensorstore::internal::Arena arena;
tensorstore::internal::ElementwiseClosure<2, void*> closure =
tensorstore::internal::SimpleElementwiseFunction<
Func(typename SourceArray::Element, typename DestArray::Element),
void*>::Closure(&func);
auto iterable =
tensorstore::internal::GetElementwiseOutputTransformNDIterable(
tensorstore::internal::GetTransformedArrayNDIterable(dest_array,
&arena)
.value(),
tensorstore::dtype_v<typename SourceArray::Element>, closure, &arena);
return tensorstore::internal::NDIterableCopier(
*tensorstore::internal::GetTransformedArrayNDIterable(source_array,
&arena)
.value(),
*iterable, dest_array.shape(), constraints, &arena)
.Copy();
}
TEST(NDIterableElementwiseOutputTransformTest, Basic) {
auto source = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto dest = tensorstore::AllocateArray<double>(source.shape());
TENSORSTORE_EXPECT_OK(TestCopy(
[](const int* source, double* dest, void* status) { *dest = -*source; },
{}, source, dest));
EXPECT_EQ(
tensorstore::MakeArray<double>({{-1.0, -2.0, -3.0}, {-4.0, -5.0, -6.0}}),
dest);
}
TEST(NDIterableElementwiseOutputTransformTest, PartialCopy) {
auto source = tensorstore::MakeArray<int>({1, 2, 3, 0, 5, 6});
auto dest = tensorstore::AllocateArray<double>(
source.shape(), tensorstore::c_order, tensorstore::value_init);
EXPECT_THAT(TestCopy(
[](const int* source, double* dest, void* arg) {
auto* status = static_cast<absl::Status*>(arg);
if (*source == 0) {
*status = absl::UnknownError("zero");
return false;
}
*dest = -*source;
return true;
},
tensorstore::c_order, source, dest),
absl::UnknownError("zero"));
EXPECT_EQ(tensorstore::MakeArray<double>({-1.0, -2.0, -3.0, 0.0, 0.0, 0.0}),
dest);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_elementwise_output_transform.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_elementwise_output_transform_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
3c8d261a-8b13-453f-97e9-ad253db6ca01 | cpp | tensorflow/tensorflow | process_function_library_runtime | tensorflow/core/common_runtime/process_function_library_runtime.cc | tensorflow/core/common_runtime/process_function_library_runtime_test.cc | #include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <unordered_map>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "xla/tsl/util/env_var.h"
#include "tensorflow/core/common_runtime/build_graph_options.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/function_optimization_registry.h"
#include "tensorflow/core/common_runtime/int32_fulltype.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/common_runtime/optimize_function_graph_utils.h"
#include "tensorflow/core/common_runtime/partitioning_utils.h"
#include "tensorflow/core/common_runtime/placer.h"
#include "tensorflow/core/common_runtime/rendezvous_util.h"
#include "tensorflow/core/common_runtime/replicate_per_replica_nodes.h"
#include "tensorflow/core/common_runtime/single_threaded_executor.h"
#include "tensorflow/core/common_runtime/stats_publisher_interface.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tensorflow/core/util/reffed_status_callback.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#if !defined(IS_MOBILE_PLATFORM)
#include "tensorflow/core/protobuf/remote_tensor_handle.pb.h"
#endif
namespace tensorflow {
namespace {
int64_t GetParallelSubgraphThreshold() {
static int64_t parallel_subgraph_threshold = []() {
int64_t result;
TF_CHECK_OK(tsl::ReadInt64FromEnvVar(
"TF_PFLR_PARALLEL_INSTANTIATE_THRESHOLD", 8, &result));
return result;
}();
return parallel_subgraph_threshold;
}
}
const char ProcessFunctionLibraryRuntime::kDefaultFLRDevice[] = "null";
void ProcessFunctionLibraryRuntime::FunctionData::DistributedInit(
DistributedFunctionLibraryRuntime* parent, const string& function_name,
const FunctionLibraryDefinition& lib_def, AttrSlice attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
FunctionLibraryRuntime::DoneCallback done) {
{
mutex_lock l(mu_);
is_cross_process_ = true;
if (init_started_) {
init_done_.WaitForNotification();
done(init_result_);
return;
}
init_started_ = true;
}
parent->Instantiate(function_name, lib_def, attrs, options, &local_handle_,
[this, done](const Status& s) {
init_done_.Notify();
done(s);
});
}
ProcessFunctionLibraryRuntime::ProcessFunctionLibraryRuntime(
const DeviceMgr* device_mgr, Env* env, const ConfigProto* config,
int graph_def_version, const FunctionLibraryDefinition* lib_def,
const OptimizerOptions& optimizer_options,
thread::ThreadPool* default_thread_pool,
DistributedFunctionLibraryRuntime* parent,
const SessionMetadata* session_metadata,
Rendezvous::Factory rendezvous_factory,
StatsPublisherFactory stats_publisher_factory)
: parent_(parent),
env_(env),
config_(config ? std::make_optional(*config) : std::nullopt),
device_mgr_(device_mgr),
lib_def_(lib_def),
default_thread_pool_(default_thread_pool),
flr_map_(
new std::unordered_map<Device*,
core::RefCountPtr<FunctionLibraryRuntime>>),
next_handle_(0),
session_metadata_(session_metadata),
rendezvous_factory_(std::move(rendezvous_factory)),
optimizer_options_(optimizer_options),
graph_def_version_(graph_def_version),
stats_publisher_factory_(std::move(stats_publisher_factory)) {
if (device_mgr == nullptr) {
(*flr_map_)[nullptr] = NewFunctionLibraryRuntime(
nullptr, env, config_ ? &(*config_) : nullptr, nullptr,
graph_def_version, lib_def_, default_thread_pool, optimizer_options,
session_metadata_, this);
return;
}
InitializeDeviceAndFlr();
}
Status ProcessFunctionLibraryRuntime::SendTensors(
const string& source_device, const string& target_device,
const string& key_prefix, int64_t src_incarnation,
absl::Span<const Tensor> tensors_to_send, DeviceContext* device_context,
const std::vector<AllocatorAttributes>& alloc_attrs,
RendezvousInterface* rendezvous) {
std::vector<string> keys;
for (int i = 0; i < tensors_to_send.size(); ++i) {
string name = strings::StrCat(key_prefix, i);
string key = Rendezvous::CreateKey(source_device, src_incarnation,
target_device, name, FrameAndIter(0, 0));
keys.push_back(key);
}
TF_RETURN_IF_ERROR(SendTensorsToRendezvous(
rendezvous, device_context, alloc_attrs, keys, tensors_to_send));
return absl::OkStatus();
}
void ProcessFunctionLibraryRuntime::ReceiveTensorsAsync(
const string& source_device, const string& target_device,
const string& key_prefix, int64_t src_incarnation, int64_t num_tensors,
DeviceContext* device_context,
const std::vector<AllocatorAttributes>& alloc_attrs,
RendezvousInterface* rendezvous, std::vector<Tensor>* received_tensors,
StatusCallback done) {
std::vector<string> keys;
for (int64_t i = 0; i < num_tensors; ++i) {
string name = strings::StrCat(key_prefix, i);
string key = Rendezvous::CreateKey(source_device, src_incarnation,
target_device, name, FrameAndIter(0, 0));
keys.push_back(key);
}
RecvOutputsFromRendezvousAsync(rendezvous, device_context, alloc_attrs, keys,
received_tensors, std::move(done));
}
Status ProcessFunctionLibraryRuntime::GetRetTypes(
FunctionLibraryRuntime::Handle h, DataTypeVector* ret_types) {
FunctionLibraryRuntime* flr = nullptr;
{
tf_shared_lock l(mu_);
auto miter = mdevice_data_.find(h);
if (miter != mdevice_data_.end()) {
*ret_types = miter->second->ret_types_;
return absl::OkStatus();
}
auto fiter = function_data_.find(h);
if (fiter != function_data_.end()) {
flr = GetFLR(fiter->second->target_device());
}
}
if (flr != nullptr) {
return flr->GetRetTypes(h, ret_types);
}
return errors::InvalidArgument("Handle ", h, " not found.");
}
Status ProcessFunctionLibraryRuntime::GetDeviceIncarnation(
const string& device_name, int64_t* incarnation) const {
FunctionLibraryRuntime* flr = GetFLR(device_name);
if (flr == nullptr) {
return errors::InvalidArgument("Device name: ", device_name, " not found.");
}
*incarnation = flr->device()->attributes().incarnation();
return absl::OkStatus();
}
Status ProcessFunctionLibraryRuntime::GetDeviceContext(
const string& device_name, DeviceContext** device_context) const {
*device_context = nullptr;
FunctionLibraryRuntime* flr = GetFLR(device_name);
if (flr == nullptr) {
return errors::InvalidArgument("Device name: ", device_name, " not found.");
}
Device* device = flr->device();
string device_type = device->parsed_name().type;
if (device_type == "CPU" || device_type == "TPU_SYSTEM") {
return absl::OkStatus();
}
if (device->IsRemoteCallAllowed()) {
auto* dev_info = flr->device()->tensorflow_accelerator_device_info();
if (dev_info) {
*device_context = dev_info->default_context;
return absl::OkStatus();
}
}
return errors::Internal("Device type: ", device_type,
" is currently unsupported for remote ",
"function executions");
}
void ProcessFunctionLibraryRuntime::InitializeDeviceAndFlr() {
mutex_lock l(mu_);
device_set_ = std::make_shared<DeviceSet>();
if (parent_ != nullptr && parent_->remote_device_mgr() != nullptr) {
for (auto d : parent_->remote_device_mgr()->ListDevices()) {
Device* device = nullptr;
if (device_mgr_->LookupDevice(d->name(), &device) == absl::OkStatus()) {
device_set_->AddDevice(device);
} else {
device_set_->AddDevice(d);
}
}
} else {
for (auto d : device_mgr_->ListDevices()) {
device_set_->AddDevice(d);
}
}
for (Device* d : device_mgr_->ListDevices()) {
if ((*flr_map_)[d] == nullptr) {
(*flr_map_)[d] = NewFunctionLibraryRuntime(
device_mgr_, env_, config_ ? &(*config_) : nullptr, d,
graph_def_version_, lib_def_, default_thread_pool_,
optimizer_options_, session_metadata_, this);
}
}
}
FunctionLibraryRuntime* ProcessFunctionLibraryRuntime::GetFLR(
const string& device_name) const {
Device* device = nullptr;
if (device_name != kDefaultFLRDevice) {
if (!device_mgr_->LookupDevice(device_name, &device).ok()) {
VLOG(4) << "Could not find device: " << device_name;
return nullptr;
}
}
const auto& iter = flr_map_->find(device);
if (iter == flr_map_->end()) {
VLOG(1) << "Could not find device: " << device_name
<< "in the local process.";
return nullptr;
}
return iter->second.get();
}
FunctionLibraryRuntime::Handle ProcessFunctionLibraryRuntime::AddHandle(
const string& function_key, const string& device_name,
FunctionLibraryRuntime::LocalHandle local_handle) {
mutex_lock l(mu_);
return AddHandleLocked(function_key, device_name, local_handle);
}
FunctionLibraryRuntime::Handle ProcessFunctionLibraryRuntime::AddHandleLocked(
const string& function_key, const string& device_name,
FunctionLibraryRuntime::LocalHandle local_handle) {
auto h = next_handle_;
function_data_[h] =
std::make_unique<FunctionData>(device_name, local_handle, function_key);
table_[function_key] = h;
next_handle_++;
return h;
}
FunctionLibraryRuntime::Handle
ProcessFunctionLibraryRuntime::AddMultiDeviceHandle(
std::unique_ptr<MultiDeviceFunctionData> data, const string& function_key) {
mutex_lock l(mu_);
auto h = next_handle_;
mdevice_data_[h] = std::move(data);
table_[function_key] = h;
next_handle_++;
return h;
}
bool ProcessFunctionLibraryRuntime::HasMultiDeviceHandle(
FunctionLibraryRuntime::Handle handle) const {
bool multi_device;
{
tf_shared_lock l(mu_);
multi_device = mdevice_data_.find(handle) != mdevice_data_.end();
}
return multi_device;
}
FunctionLibraryRuntime::Handle ProcessFunctionLibraryRuntime::GetHandle(
const string& function_key) const {
tf_shared_lock l(mu_);
return gtl::FindWithDefault(table_, function_key, kInvalidHandle);
}
FunctionLibraryRuntime::LocalHandle
ProcessFunctionLibraryRuntime::GetHandleOnDevice(
const string& device_name, FunctionLibraryRuntime::Handle handle,
bool include_multi_device) const {
tf_shared_lock l(mu_);
auto miter = mdevice_data_.find(handle);
if (miter != mdevice_data_.end()) {
if (!include_multi_device) return kInvalidLocalHandle;
const MultiDeviceFunctionData& data = *miter->second;
if (data.glue_.size() != 1) return kInvalidLocalHandle;
const auto& pair = *data.glue_.begin();
const string& func_device_name = pair.first;
const ComponentFunctionData& component_data = pair.second;
if (func_device_name != device_name) return kInvalidLocalHandle;
handle = component_data.handle;
}
auto iter = function_data_.find(handle);
if (iter == function_data_.end()) {
return kInvalidLocalHandle;
}
FunctionData* function_data = iter->second.get();
if (function_data->target_device() != device_name) {
return kInvalidLocalHandle;
}
return function_data->local_handle();
}
string ProcessFunctionLibraryRuntime::GetDeviceName(
FunctionLibraryRuntime::Handle handle) const {
tf_shared_lock l(mu_);
auto iter = function_data_.find(handle);
CHECK(iter != function_data_.end());
FunctionData* function_data = iter->second.get();
return function_data->target_device();
}
ProcessFunctionLibraryRuntime::MultiDeviceFunctionData*
ProcessFunctionLibraryRuntime::IsMultiDevice(
FunctionLibraryRuntime::Handle handle) const {
tf_shared_lock l(mu_);
const auto& it = mdevice_data_.find(handle);
if (it != mdevice_data_.end()) {
return it->second.get();
}
return nullptr;
}
namespace {
std::vector<Tensor> GetLocalArgs(absl::Span<const FunctionArg> args) {
std::vector<Tensor> tensors;
for (const auto& arg : args) {
if (arg.index() == 0) {
tensors.push_back(absl::get<Tensor>(arg));
}
}
return tensors;
}
FunctionLibraryRuntime::DoneCallback TensorsToFunctionRetsDoneCallback(
std::vector<FunctionRet>* rets, std::vector<Tensor>* tensors,
FunctionLibraryRuntime::DoneCallback done) {
return [rets, tensors, done = std::move(done)](const Status& s) {
if (s.ok()) {
for (const auto& t : *tensors) {
rets->push_back(t);
}
}
delete tensors;
done(s);
};
}
Status FunctionRetsToTensors(const std::vector<FunctionRet>* function_rets,
std::vector<Tensor>* tensors) {
for (const auto& ret : *function_rets) {
if (ret.index() != 0) {
return errors::Internal(
"Expect a Tensor as a function output but got a TensorShape.");
}
tensors->push_back(absl::get<Tensor>(ret));
}
return absl::OkStatus();
}
}
ProcessFunctionLibraryRuntime::AsyncAttributes::Summary
ProcessFunctionLibraryRuntime::AsyncAttributes::Summarize(const Graph* graph) {
bool has_send_op = false;
bool has_recv_op = false;
bool has_unsafe_op = false;
for (const Node* node : graph->nodes()) {
if (node->IsSend() || node->IsHostSend()) {
has_send_op = true;
}
if (node->IsRecv() || node->IsHostRecv()) {
has_recv_op = true;
}
if (!ValidateOpIsSafeForSyncExecution(*node,
allow_control_flow_sync_execution())
.ok()) {
has_unsafe_op = true;
}
}
if (has_unsafe_op) {
metrics::IncrementTestCounter("subgraph_async_summary", "unsafe_op");
return AsyncAttributes::kAsyncRequired;
}
if (!has_send_op && !has_recv_op) {
metrics::IncrementTestCounter("subgraph_async_summary", "safe_for_sync");
return AsyncAttributes::kSafeForSync;
}
if (has_send_op && !has_recv_op) {
metrics::IncrementTestCounter("subgraph_async_summary", "send_only");
return AsyncAttributes::kSendOnly;
}
if (has_recv_op && !has_send_op) {
metrics::IncrementTestCounter("subgraph_async_summary", "recv_only");
return AsyncAttributes::kRecvOnly;
}
metrics::IncrementTestCounter("subgraph_async_summary", "other");
return AsyncAttributes::kAsyncRequired;
}
void ProcessFunctionLibraryRuntime::PublishSubgraphs(
const std::string& function_name,
std::vector<core::RefCountPtr<FunctionRecord>>&& function_records) {
std::unique_ptr<StatsPublisherInterface> stats_publisher =
stats_publisher_factory_(function_name, BuildGraphOptions(),
SessionOptions());
stats_publisher->PublishGraphProto(std::move(function_records));
mutex_lock l(mu_);
stats_publishers_.push_back(std::move(stats_publisher));
}
Status ProcessFunctionLibraryRuntime::InstantiateMultiDevice(
const string& function_name, AttrSlice attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
FunctionLibraryRuntime::Handle* handle) {
const string& function_key = Canonicalize(function_name, attrs, options);
{
mutex_lock l(mu_);
const auto& it = table_.find(function_key);
if (it != table_.end()) {
*handle = it->second;
++mdevice_data_[*handle]->instantiation_counter_;
return absl::OkStatus();
}
}
VLOG(1) << "Instantiating MultiDevice function \"" << function_name
<< "\" on default device \"" << options.target << "\"";
if (VLOG_IS_ON(3)) {
int index = 0;
VLOG(3) << "Requested input devices:";
for (const string& device : options.input_devices) {
VLOG(3) << " [input " << index++ << "] " << device;
}
index = 0;
VLOG(3) << "Requested output devices:";
for (const string& device : options.output_devices) {
VLOG(3) << " [output " << index++ << "] " << device;
}
}
const std::shared_ptr<DeviceSet> dev_set = device_set();
Device* default_device = nullptr;
if (options.default_device_to_target && !options.target.empty()) {
FunctionLibraryRuntime* flr = GetFLR(options.target);
if (flr == nullptr) {
return errors::InvalidArgument(
"Cannot instantiate multi-device function with target device ",
options.target);
}
default_device = flr->device();
}
std::vector<CompositeDevice*> composite_devices;
{
tf_shared_lock l(mu_);
for (auto* d : composite_devices_) composite_devices.push_back(d);
}
Device* cpu_device;
TF_RETURN_IF_ERROR(device_mgr_->LookupDevice("CPU:0", &cpu_device));
const uint64 optimization_start_time_usecs = Env::Default()->NowMicros();
std::optional<absl::StatusOr<OptimizedFunctionGraph>> optimized_graph_proto =
options.lib_def != nullptr
? options.lib_def->FindOptimizedFunctionGraph(function_name)
: lib_def_->FindOptimizedFunctionGraph(function_name);
if (optimized_graph_proto.has_value()) {
if (optimized_graph_proto->ok()) {
LOG(INFO) << "Found AOT'd graph for function: " << function_name;
metrics::UpdateFunctionGraphOptimizationSavingTime(
optimized_graph_proto->value().optimization_time_usecs(),
metrics::GraphOptimizationSource::kAot);
metrics::IncrementFunctionGraphOptimizationCacheHitCount(
1, metrics::GraphOptimizationSource::kAot);
} else {
LOG(WARNING) << "Failed to create AOT'd graph for function: "
<< function_name
<< " with status: " << optimized_graph_proto->status();
}
}
absl::StatusOr<OptimizedFunctionGraphInfo> optimized_graph_info =
(!optimized_graph_proto.has_value() ||
!optimized_graph_proto.value().ok())
? OptimizeFunctionGraphOrReadFromFileCache(
function_name, attrs, options, *dev_set, lib_def_,
composite_devices, cpu_device, default_device, env_)
: OptimizedFunctionGraphInfo::FromProto(
std::move(optimized_graph_proto.value().value()));
if (!optimized_graph_info.ok()) return optimized_graph_info.status();
optimized_graph_info->function_graph->mutable_flib_def()
->set_default_registry(&(optimized_graph_info->lib_def));
TF_ASSIGN_OR_RETURN(auto subgraphs, PreprocessAndPartitionGraph(
function_name, *optimized_graph_info,
options, *dev_set, lib_def_,
composite_devices, cpu_device, env_));
const uint64 optimization_end_time_usecs = Env::Default()->NowMicros();
const uint64 graph_optimization_duration =
optimization_end_time_usecs - optimization_start_time_usecs;
metrics::UpdateFunctionGraphOptimizationTime(graph_optimization_duration);
VLOG(1) << "Finished graph optimizations for MultiDevice function \""
<< function_name << "\" with target device \"" << options.target
<< "\". Took " << graph_optimization_duration / 1000000 << " secs.";
const FunctionLibraryDefinition* lib_def =
options.lib_def == nullptr ? lib_def_ : options.lib_def;
if (options.graph_collector != nullptr) {
for (const auto& pair : *subgraphs) {
GraphDef def;
pair.second->ToGraphDef(&def);
*def.mutable_library() = lib_def->ReachableDefinitions(def).ToProto();
options.graph_collector->CollectPartitionedGraph(def);
}
}
const auto& node_name_to_control_ret =
optimized_graph_info->node_name_to_control_ret;
const auto control_ret =
[&node_name_to_control_ret](const Node* n) -> std::optional<string> {
const auto it = node_name_to_control_ret.find(n->name());
return it != node_name_to_control_ret.end()
? absl::make_optional<string>(it->second)
: absl::nullopt;
};
auto data = std::make_unique<MultiDeviceFunctionData>(
function_name, function_key, optimized_graph_info->num_return_nodes,
std::move(optimized_graph_info->ret_types));
int i = 0;
FunctionLibraryDefinition data_lib_def =
std::move(optimized_graph_info->lib_def);
FunctionNameGenerator name_generator(
&data_lib_def,
absl::StrCat(function_name, "_partitioned_", random::New64()));
const int num_subgraphs = subgraphs->size();
absl::InlinedVector<Status, 4UL> instantiate_status(num_subgraphs);
data->enable_sync_execution = false;
if (options.allow_small_function_optimizations) {
data->enable_sync_execution = true;
for (const auto& pair : *subgraphs) {
ComponentFunctionData* comp_data = &data->glue_[pair.first];
const Graph* subgraph = pair.second.get();
comp_data->async_attributes =
AsyncAttributes(subgraph, options.allow_control_flow_sync_execution);
if (comp_data->async_attributes.summary() ==
AsyncAttributes::kAsyncRequired) {
data->enable_sync_execution = false;
}
}
}
auto instantiate_component = [this, dev_set, &data_lib_def, &control_ret,
&options,
&data](const string& target,
std::unique_ptr<Graph> subgraph,
ComponentFunctionData* comp_data,
std::function<void(Status)> done) {
const string& device_type =
dev_set->FindDeviceByName(target)->device_type();
bool ints_on_device =
(device_type == "TPU" || device_type == "XLA_CPU" ||
device_type == "XLA_GPU" || options.int_args_and_retvals_on_device);
Int32FulltypePass int32_fulltype(
"ProcessFunctionLibraryRuntime::InstantiateMultiDevice");
Status s = int32_fulltype.ProcessGraph(subgraph.get(), ints_on_device);
if (!s.ok()) {
done(s);
return;
}
s = UpdateArgAndRetvalMetadata(subgraph.get(), &comp_data->arg_indices,
&comp_data->ret_indices,
&comp_data->arg_alloc_attrs,
&comp_data->ret_alloc_attrs, ints_on_device);
if (!s.ok()) {
done(s);
return;
}
FunctionDef shard;
s = GraphToFunctionDef(std::move(subgraph), comp_data->name, control_ret,
&shard);
if (!s.ok()) {
done(s);
return;
}
subgraph.reset();
AttrValueMap attrs(shard.attr());
s = data_lib_def.AddFunctionDef(std::move(shard));
if (!s.ok()) {
done(s);
return;
}
FunctionLibraryRuntime::InstantiateOptions opts;
opts.executor_type = options.executor_type;
opts.target = target;
opts.lib_def = &data_lib_def;
opts.create_kernels_eagerly = options.create_kernels_eagerly;
opts.state_handle = options.state_handle;
opts.allow_small_function_optimizations = data->enable_sync_execution;
opts.allow_control_flow_sync_execution =
options.allow_control_flow_sync_execution;
AttrValue ints_on_device_attr;
ints_on_device_attr.set_b(options.int_args_and_retvals_on_device);
attrs.insert(
{FunctionLibraryDefinition::kIntsOnDeviceAttr, ints_on_device_attr});
VLOG(1) << "Start instantiating component function " << comp_data->name
<< " on device " << target;
auto* component_handle = new FunctionLibraryRuntime::Handle;
auto wrapped_done = [this, comp_data, component_handle, &data,
done = std::move(done)](const Status& s) {
VLOG(1) << "Finished instantiating component function " << comp_data->name
<< " with handle " << *component_handle << " status: " << s;
if (s.ok()) {
{
mutex_lock l(mu_);
if (function_data_[*component_handle]->is_cross_process()) {
data->is_cross_process_ = true;
}
}
comp_data->handle = *component_handle;
}
delete component_handle;
done(s);
};
FunctionLibraryRuntime* flr = GetFLR(opts.target);
if (flr != nullptr) {
Status s = flr->Instantiate(comp_data->name, AttrSlice(&attrs), opts,
component_handle);
wrapped_done(s);
} else {
opts.ret_indices = comp_data->ret_indices;
InstantiateRemote(comp_data->name, AttrSlice(&attrs), opts,
component_handle, std::move(wrapped_done));
}
};
if (default_thread_pool_ != nullptr &&
num_subgraphs > GetParallelSubgraphThreshold()) {
BlockingCounter counter(static_cast<int>(num_subgraphs));
for (auto& pair : *subgraphs) {
Status* status = &instantiate_status[i];
ComponentFunctionData* comp_data = &data->glue_[pair.first];
comp_data->name = name_generator.GetName();
default_thread_pool_->Schedule(
[&instantiate_component, &pair, comp_data, &counter, status]() {
instantiate_component(pair.first, std::move(pair.second), comp_data,
[&counter, status](Status s) {
status->Update(s);
counter.DecrementCount();
});
});
i += 1;
}
counter.Wait();
} else {
for (auto& pair : *subgraphs) {
Notification n;
Status* status = &instantiate_status[i];
ComponentFunctionData* comp_data = &data->glue_[pair.first];
comp_data->name = name_generator.GetName();
instantiate_component(pair.first, std::move(pair.second), comp_data,
[&n, status](Status s) {
status->Update(s);
n.Notify();
});
n.WaitForNotification();
i += 1;
}
}
StatusGroup group;
for (auto& status : instantiate_status) {
group.Update(status);
}
TF_RETURN_IF_ERROR(group.as_summary_status());
std::vector<core::RefCountPtr<FunctionRecord>> function_records;
const bool should_publish_function_graphs =
flags::Global().publish_function_graphs.value();
if (should_publish_function_graphs) {
for (const auto& pair : *subgraphs) {
ComponentFunctionData* comp_data = &data->glue_[pair.first];
function_records.push_back(data_lib_def.FindRecord(comp_data->name));
}
}
*handle = AddMultiDeviceHandle(std::move(data), function_key);
VLOG(1) << "Instantiated MultiDevice function \"" << function_name
<< "\" with handle " << *handle;
if (should_publish_function_graphs) {
PublishSubgraphs(function_name, std::move(function_records));
}
return absl::OkStatus();
}
Status ProcessFunctionLibraryRuntime::GetOutputDevices(
FunctionLibraryRuntime::Handle handle,
std::vector<Device*>* output_devices) const {
MultiDeviceFunctionData* data = IsMultiDevice(handle);
if (data == nullptr) {
return errors::InvalidArgument(
"Failed for find multi-device function handle ", handle);
}
for (const auto& pair : data->glue_) {
const ComponentFunctionData& comp_data = pair.second;
DCHECK(comp_data.ret_alloc_attrs.size() == comp_data.ret_indices.size());
if (comp_data.ret_indices.empty()) {
continue;
}
const string& target = pair.first;
FunctionLibraryRuntime* target_flr = GetFLR(target);
Device* target_device = nullptr;
Device* host = nullptr;
if (target_flr == nullptr) {
if (!data->has_remote_outputs) {
data->has_remote_outputs = true;
}
target_device = device_set()->FindDeviceByName(target);
string remote_host;
TF_RETURN_IF_ERROR(
DeviceNameUtils::DeviceNameToCpuDeviceName(target, &remote_host));
host = device_set()->FindDeviceByName(remote_host);
} else {
target_device = target_flr->device();
}
output_devices->resize(data->num_outputs_);
for (int j = 0; j < comp_data.ret_indices.size(); ++j) {
int ret_index = comp_data.ret_indices[j];
if (data->ret_types_[ret_index] == DT_RESOURCE) {
(*output_devices)[ret_index] = target_device;
} else {
(*output_devices)[ret_index] =
comp_data.ret_alloc_attrs[j].on_host() ? host : target_device;
}
}
}
return absl::OkStatus();
}
Status ProcessFunctionLibraryRuntime::PrepareRunMultiDevice(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::Handle handle,
const MultiDeviceFunctionData** data) const {
if (opts.create_rendezvous) {
return errors::Internal(
"Cannot call ProcessFunctionLibraryRuntime::Run with "
"create_rendezvous=true. Please run the function "
"using FunctionLibraryRuntime::Run");
}
*data = IsMultiDevice(handle);
if (*data == nullptr) {
return errors::NotFound("Multi-device function handle ", handle,
"not found. Was the function instantiated?");
}
if (opts.rendezvous && (*data)->is_cross_process_ &&
!opts.rendezvous->is_cross_process()) {
return errors::InvalidArgument(
"Running a cross process function ", (*data)->function_name_,
" without an appropriate cross process Rendezvous.");
}
return absl::OkStatus();
}
std::vector<string> ProcessFunctionLibraryRuntime::GetOrderedSubgraphs(
const MultiDeviceFunctionData* data) const {
std::vector<string> subgraph_keys;
subgraph_keys.reserve(data->glue_.size());
for (const auto& pair : data->glue_) {
subgraph_keys.push_back(pair.first);
}
auto send_first_ordering = [&](const string& a, const string& b) {
auto a_summary = data->glue_.at(a).async_attributes.summary();
auto b_summary = data->glue_.at(b).async_attributes.summary();
if (a_summary == b_summary) {
return false;
}
if (a_summary == AsyncAttributes::kSendOnly) {
return true;
}
return false;
};
std::sort(subgraph_keys.begin(), subgraph_keys.end(), send_first_ordering);
return subgraph_keys;
}
Status ProcessFunctionLibraryRuntime::RunMultiDeviceSync(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::Handle outer_handle, std::vector<FunctionRet>* rets,
std::function<Status(const ComponentFunctionData& comp_data,
InternalArgs* args)>
get_component_args) const {
const MultiDeviceFunctionData* data;
Status prepare_status = PrepareRunMultiDevice(opts, outer_handle, &data);
if (!prepare_status.ok()) {
return prepare_status;
}
FunctionLibraryRuntime::Options opts_copy = opts;
std::vector<string> subgraph_keys = GetOrderedSubgraphs(data);
for (const string& target : subgraph_keys) {
const ComponentFunctionData& comp_data = data->glue_.at(target);
FunctionLibraryRuntime::Handle comp_handle = comp_data.handle;
opts_copy.args_alloc_attrs = comp_data.arg_alloc_attrs;
opts_copy.rets_alloc_attrs = comp_data.ret_alloc_attrs;
InternalArgs comp_args;
Status args_status = get_component_args(comp_data, &comp_args);
if (!args_status.ok()) {
VLOG(2) << "Failed to get component function arguments: " << args_status;
return args_status;
}
rets->resize(data->num_outputs_);
VLOG(1) << "Running component function on device " << target << " from "
<< data->function_name_ << " with handle " << comp_handle;
FunctionLibraryRuntime* flr = GetFLR(target);
if (flr != nullptr) {
opts_copy.remote_execution = false;
thread::ThreadPool* pool = flr->device()->tensorflow_device_thread_pool();
opts_copy.runner = (pool == nullptr) ? opts.runner : flr->runner();
VLOG(4) << " with " << opts_copy.DebugString();
std::vector<Tensor> comp_tensor_rets;
Status run_status =
flr->RunSync(opts_copy, comp_handle, GetLocalArgs(comp_args.args),
&comp_tensor_rets);
if (!run_status.ok()) {
VLOG(2) << "Component function execution failed: " << run_status;
const string function_and_msg = strings::StrCat(
errors::FormatFunctionForError(data->function_name_), " ",
run_status.message());
if (opts.rendezvous != nullptr) opts.rendezvous->StartAbort(run_status);
return errors::CreateWithUpdatedMessage(run_status, function_and_msg);
} else {
VLOG(2) << "Component function execution succeeded.";
for (int i = 0; i < comp_tensor_rets.size(); ++i) {
(*rets)[comp_data.ret_indices[i]] = comp_tensor_rets[i];
}
}
} else {
opts_copy.remote_execution = true;
VLOG(4) << " with " << opts_copy.DebugString();
std::vector<std::unique_ptr<CleanUpItem>> cleanup_items;
Notification n;
Status s;
std::vector<FunctionRet> comp_rets;
RunInternal(opts_copy, comp_handle, comp_args.args, &comp_rets,
&cleanup_items, [&n, &s](const Status& status) {
s.Update(status);
n.Notify();
});
n.WaitForNotification();
return s;
}
}
return absl::OkStatus();
}
void ProcessFunctionLibraryRuntime::RunMultiDeviceAsync(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::Handle outer_handle, std::vector<FunctionRet>* rets,
std::vector<std::unique_ptr<CleanUpItem>>* cleanup_items,
FunctionLibraryRuntime::DoneCallback done,
std::function<Status(const ComponentFunctionData& comp_data,
InternalArgs* args)>
get_component_args) const {
const MultiDeviceFunctionData* data;
Status prepare_status = PrepareRunMultiDevice(opts, outer_handle, &data);
if (!prepare_status.ok()) {
done(prepare_status);
return;
}
std::shared_ptr<CancellationManager> local_cm;
CancellationManager* cm = opts.cancellation_manager;
if (cm == nullptr) {
local_cm = std::make_shared<CancellationManager>();
cm = local_cm.get();
}
auto* refcounted_done = new ReffedStatusCallback(std::move(done));
for (int i = 0; i < data->glue_.size(); ++i) {
refcounted_done->Ref();
}
FunctionLibraryRuntime::Options opts_copy = opts;
for (const auto& pair : data->glue_) {
const string& target = pair.first;
const ComponentFunctionData& comp_data = pair.second;
FunctionLibraryRuntime::Handle comp_handle = pair.second.handle;
opts_copy.args_alloc_attrs = comp_data.arg_alloc_attrs;
opts_copy.rets_alloc_attrs = comp_data.ret_alloc_attrs;
opts_copy.cancellation_manager = cm;
InternalArgs comp_args;
Status s = get_component_args(comp_data, &comp_args);
if (!s.ok()) {
VLOG(2) << "Failed to get component function arguments: " << s;
refcounted_done->UpdateStatus(s);
refcounted_done->Unref();
cm->StartCancel();
continue;
}
std::vector<FunctionRet>* comp_rets = new std::vector<FunctionRet>;
rets->resize(data->num_outputs_);
auto component_fn_callback = [comp_rets, rets, comp_data, refcounted_done,
cm, local_cm, data, comp_handle,
target](const Status& status) {
if (!status.ok()) {
VLOG(2) << "Component function execution on target " << target
<< " from " << data->function_name_ << " with handle "
<< comp_handle << " failed: " << status;
const string function_and_msg = strings::StrCat(
errors::FormatFunctionForError(data->function_name_), " ",
status.message());
refcounted_done->UpdateStatus(
errors::CreateWithUpdatedMessage(status, function_and_msg));
cm->StartCancel();
} else {
VLOG(2) << "Component function execution on target " << target
<< " from " << data->function_name_ << " with handle "
<< comp_handle << " succeeded.";
for (int i = 0; i < comp_rets->size(); ++i) {
(*rets)[comp_data.ret_indices[i]] = (*comp_rets)[i];
}
}
delete comp_rets;
refcounted_done->Unref();
};
FunctionLibraryRuntime* flr = GetFLR(target);
if (flr != nullptr) {
opts_copy.remote_execution = false;
thread::ThreadPool* pool = flr->device()->tensorflow_device_thread_pool();
opts_copy.runner = (pool == nullptr) ? opts.runner : flr->runner();
VLOG(1) << "Running component function on device " << target << " from "
<< data->function_name_ << " with handle " << comp_handle;
VLOG(4) << " with " << opts_copy.DebugString();
std::vector<Tensor>* comp_tensor_rets = new std::vector<Tensor>;
flr->Run(
opts_copy, comp_handle, GetLocalArgs(comp_args.args),
comp_tensor_rets,
TensorsToFunctionRetsDoneCallback(comp_rets, comp_tensor_rets,
std::move(component_fn_callback)));
} else {
opts_copy.remote_execution = true;
VLOG(1) << "Running component function on device " << target << " from "
<< data->function_name_ << " with handle " << comp_handle;
VLOG(4) << " with " << opts_copy.DebugString();
RunInternal(opts_copy, comp_handle, comp_args.args, comp_rets,
cleanup_items, std::move(component_fn_callback));
}
}
refcounted_done->Unref();
}
Status ProcessFunctionLibraryRuntime::Instantiate(
const string& function_name, AttrSlice attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
FunctionLibraryRuntime::Handle* handle) {
if (options.is_multi_device_function) {
return InstantiateMultiDevice(function_name, attrs, options, handle);
}
*handle = kInvalidHandle;
FunctionLibraryRuntime* flr = GetFLR(options.target);
if (flr != nullptr) {
return flr->Instantiate(function_name, attrs, options, handle);
}
Status status;
Notification notification;
InstantiateRemote(function_name, attrs, options, handle,
[&status, ¬ification](const Status& s) {
status = s;
notification.Notify();
});
notification.WaitForNotification();
return status;
}
Status ProcessFunctionLibraryRuntime::IsCrossProcess(
FunctionLibraryRuntime::Handle handle, bool* is_cross_process) const {
tf_shared_lock l(mu_);
const auto& mdevice_it = mdevice_data_.find(handle);
if (mdevice_it != mdevice_data_.end()) {
*is_cross_process = mdevice_it->second->is_cross_process_;
return absl::OkStatus();
}
const auto& it = function_data_.find(handle);
if (it != function_data_.end()) {
*is_cross_process = it->second->is_cross_process();
return absl::OkStatus();
}
return errors::InvalidArgument("Handle ", handle, " not found.");
}
void ProcessFunctionLibraryRuntime::InstantiateRemote(
const string& function_name, AttrSlice attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
FunctionLibraryRuntime::Handle* handle,
FunctionLibraryRuntime::DoneCallback done) {
if (parent_ == nullptr) {
done(errors::Internal(
"Currently don't support instantiating functions on device: ",
options.target));
return;
}
auto target = options.target;
VLOG(1) << "ProcessFLR Instantiate: " << function_name << " on: " << target;
string function_key = Canonicalize(function_name, attrs, options);
FunctionData* f;
{
mutex_lock l(mu_);
FunctionLibraryRuntime::Handle h =
gtl::FindWithDefault(table_, function_key, kInvalidHandle);
if (h == kInvalidHandle || function_data_.count(h) == 0) {
h = AddHandleLocked(function_key, target, kInvalidHandle);
}
f = function_data_[h].get();
*handle = h;
}
f->DistributedInit(
parent_, function_name,
options.lib_def == nullptr ? *lib_def_ : *options.lib_def, attrs, options,
[this, function_name, target, handle, done](const Status& s) {
VLOG(1) << "ProcessFLR Instantiate [success]: " << function_name
<< " on: " << target << " with handle: " << *handle
<< " (this: " << this << ")";
done(s);
});
}
Status ProcessFunctionLibraryRuntime::RemoveHandle(
FunctionLibraryRuntime::Handle handle) {
mutex_lock l(mu_);
table_.erase(function_data_[handle]->function_key());
function_data_.erase(handle);
return absl::OkStatus();
}
Status ProcessFunctionLibraryRuntime::ReleaseMultiDeviceHandle(
FunctionLibraryRuntime::Handle handle) {
std::unique_ptr<MultiDeviceFunctionData> mdata;
{
mutex_lock l(mu_);
auto it = mdevice_data_.find(handle);
--it->second->instantiation_counter_;
if (it->second->instantiation_counter_ != 0) {
return absl::OkStatus();
}
mdata = std::move(it->second);
table_.erase(mdata->function_key_);
mdevice_data_.erase(it);
}
Status overall_status;
for (const auto& it : mdata->glue_) {
const string& device = it.first;
FunctionLibraryRuntime::Handle flr_handle = it.second.handle;
FunctionLibraryRuntime* flr = GetFLR(device);
if (flr == nullptr) {
if (parent_ != nullptr) {
return errors::Unimplemented(
"Releasing a multi-device component handle on a remote device is "
"not yet implemented.");
}
return errors::InvalidArgument(
"Failed to find FunctionLibraryRuntime for device ", device,
" when releasing multi-device function handle ", handle);
}
Status status = flr->ReleaseHandle(flr_handle);
if (!status.ok()) {
overall_status = status;
}
}
return overall_status;
}
Status ProcessFunctionLibraryRuntime::ReleaseHandle(
FunctionLibraryRuntime::Handle handle) {
if (flr_map_ == nullptr) return absl::OkStatus();
if (IsMultiDevice(handle)) {
return ReleaseMultiDeviceHandle(handle);
}
FunctionLibraryRuntime* flr = nullptr;
string target_device;
{
mutex_lock l(mu_);
CHECK_EQ(1, function_data_.count(handle)) << " handle: " << handle;
target_device = function_data_[handle]->target_device();
}
flr = GetFLR(target_device);
if (flr != nullptr) {
return flr->ReleaseHandle(handle);
}
return errors::InvalidArgument("Handle not found: ", handle);
}
FunctionLibraryRuntime::DoneCallback
ProcessFunctionLibraryRuntime::ApplyCleanUpToDoneCallback(
std::vector<std::unique_ptr<CleanUpItem>>* items,
FunctionLibraryRuntime::DoneCallback done,
const FunctionLibraryRuntime::Options& opts,
tsl::core::RefCountPtr<Rendezvous> created_rendezvous) const {
return [this, items, done = std::move(done), step_id = opts.step_id,
created_rendezvous =
created_rendezvous.release()](const Status& status) {
if (created_rendezvous != nullptr) {
created_rendezvous->Unref();
}
auto* local_status = new Status(status);
CleanUp(items, [local_status, done](const Status& cleanup_status) {
local_status->Update(cleanup_status);
done(*local_status);
delete local_status;
});
delete items;
};
}
Status ProcessFunctionLibraryRuntime::CreateRendezvous(
FunctionLibraryRuntime::Options& opts,
tsl::core::RefCountPtr<Rendezvous>* created_rendezvous) const {
DCHECK(opts.rendezvous == nullptr);
if (!rendezvous_factory_) {
return errors::FailedPrecondition(
"The caller does not provide a rendezvous and "
"ProcessFunctionLibraryRuntime was created without a rendezvous "
"factory.");
}
Status s = rendezvous_factory_(opts.step_id, device_mgr_, created_rendezvous);
if (s.ok()) {
opts.rendezvous = created_rendezvous->get();
opts.create_rendezvous = false;
}
return s;
}
Status ProcessFunctionLibraryRuntime::GetComponentArgs(
const absl::Span<const Tensor> args,
const ProcessFunctionLibraryRuntime::ComponentFunctionData& comp_data,
ProcessFunctionLibraryRuntime::InternalArgs* comp_args) {
for (const auto& it : comp_data.arg_indices) {
if (it.index >= args.size()) {
return errors::InvalidArgument("index ", it.index,
" is out of range [0, ", args.size(), ")");
}
if (it.sub_index >= 0) {
const Tensor& t = args[it.index];
if (t.dtype() != DT_RESOURCE) {
return errors::InvalidArgument("Got unexpected sub_index ",
it.sub_index, " for argument ",
it.index);
}
const auto& handles = t.flat<ResourceHandle>();
if (it.sub_index >= handles.size()) {
return errors::InvalidArgument("Sub_index ", it.sub_index,
"is out of range [0,", handles.size(),
") for argument ", it.index);
}
comp_args->args.push_back(Tensor(handles(it.sub_index)));
} else {
comp_args->args.push_back(args[it.index]);
}
}
return absl::OkStatus();
}
#if !defined(IS_MOBILE_PLATFORM)
Status ProcessFunctionLibraryRuntime::GetComponentArgs(
const FunctionArgsInterface& args,
const ProcessFunctionLibraryRuntime::ComponentFunctionData& comp_data,
ProcessFunctionLibraryRuntime::InternalArgs* comp_args) {
for (int i = 0; i < comp_data.arg_indices.size(); ++i) {
const FunctionArgIndex index = comp_data.arg_indices.at(i);
Tensor tensor;
if (args.GetLocalArg(index, &tensor).ok()) {
comp_args->args.push_back(std::move(tensor));
} else {
eager::RemoteTensorHandle remote_handle;
TF_RETURN_IF_ERROR(args.GetRemoteArg(index, &remote_handle));
comp_args->remote_args.emplace_back(
std::make_unique<eager::RemoteTensorHandle>(
std::move(remote_handle)));
comp_args->args.push_back(comp_args->remote_args.back().get());
}
}
return absl::OkStatus();
}
#endif
void ProcessFunctionLibraryRuntime::Run(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::Handle handle, absl::Span<const Tensor> args,
std::vector<Tensor>* rets,
FunctionLibraryRuntime::DoneCallback done) const {
FunctionLibraryRuntime::Options new_opts = opts;
tsl::core::RefCountPtr<Rendezvous> created_rendezvous = nullptr;
if (!opts.rendezvous) {
Status s = CreateRendezvous(new_opts, &created_rendezvous);
if (!s.ok()) {
done(s);
return;
}
}
auto* cleanup_items = new std::vector<std::unique_ptr<CleanUpItem>>;
done = ApplyCleanUpToDoneCallback(cleanup_items, std::move(done), new_opts,
std::move(created_rendezvous));
std::vector<FunctionRet>* function_rets = new std::vector<FunctionRet>;
done = [rets, function_rets, done = std::move(done)](const Status& s) {
Status status = s;
if (status.ok()) {
status.Update(FunctionRetsToTensors(function_rets, rets));
}
delete function_rets;
done(status);
};
bool multi_device = HasMultiDeviceHandle(handle);
if (multi_device) {
auto get_component_args = [&args](const ComponentFunctionData& comp_data,
InternalArgs* comp_args) -> Status {
return GetComponentArgs(args, comp_data, comp_args);
};
return RunMultiDeviceAsync(new_opts, handle, function_rets, cleanup_items,
std::move(done), std::move(get_component_args));
}
std::vector<FunctionArg> local_args;
for (const auto& tensor : args) {
local_args.push_back(tensor);
}
RunInternal(new_opts, handle, local_args, function_rets, cleanup_items,
std::move(done));
}
void ProcessFunctionLibraryRuntime::RunInternal(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::Handle handle, absl::Span<const FunctionArg> args,
std::vector<FunctionRet>* rets,
std::vector<std::unique_ptr<CleanUpItem>>* cleanup_items,
FunctionLibraryRuntime::DoneCallback done) const {
FunctionLibraryRuntime* flr = nullptr;
string target_device;
FunctionLibraryRuntime::LocalHandle local_handle;
{
tf_shared_lock l(mu_);
auto iter = function_data_.find(handle);
if (iter == function_data_.end()) {
done(errors::NotFound("Handle: ", handle, " not found."));
return;
}
FunctionData* function_data = iter->second.get();
target_device = function_data->target_device();
local_handle = function_data->local_handle();
}
if (!opts.remote_execution) {
done(
errors::InvalidArgument("ProcessFunctionLibraryRuntime::Run should "
"only be called for multi-device functions or "
"for remote execution."));
return;
}
flr = GetFLR(target_device);
if (flr != nullptr) {
auto rendezvous = opts.rendezvous;
string source_device = opts.source_device;
DeviceContext* device_context;
Status s = GetDeviceContext(source_device, &device_context);
if (!s.ok()) {
done(s);
return;
}
int64_t src_incarnation, target_incarnation;
s = GetDeviceIncarnation(source_device, &src_incarnation);
s.Update(GetDeviceIncarnation(target_device, &target_incarnation));
if (!s.ok()) {
done(s);
return;
}
std::vector<Tensor> local_args = GetLocalArgs(args);
s = SendTensors(source_device, target_device, "arg_", src_incarnation,
local_args, device_context, opts.args_alloc_attrs,
rendezvous);
if (!s.ok()) {
done(s);
return;
}
const std::vector<AllocatorAttributes>& rets_alloc_attrs =
opts.rets_alloc_attrs;
std::vector<Tensor>* remote_rets = new std::vector<Tensor>;
flr->Run(opts, handle, local_args, remote_rets,
[source_device, target_device, target_incarnation, rendezvous,
device_context, rets_alloc_attrs, remote_rets, rets,
done = std::move(done)](const Status& status) mutable {
if (!status.ok()) {
delete remote_rets;
done(status);
return;
}
int64_t num_returns = remote_rets->size();
delete remote_rets;
std::vector<Tensor>* recv_tensors = new std::vector<Tensor>;
ReceiveTensorsAsync(target_device, source_device, "ret_",
target_incarnation, num_returns,
device_context, rets_alloc_attrs, rendezvous,
recv_tensors,
TensorsToFunctionRetsDoneCallback(
rets, recv_tensors, std::move(done)));
});
return;
}
if (parent_ != nullptr) {
auto cleanup_item = std::make_unique<CleanUpItem>();
cleanup_item->device = target_device;
cleanup_item->step_id = opts.step_id;
cleanup_item->local_handle = local_handle;
cleanup_items->emplace_back(std::move(cleanup_item));
parent_->Run(opts, local_handle, args, rets, std::move(done));
return;
}
done(errors::Internal("Could not find device"));
}
void ProcessFunctionLibraryRuntime::Run(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::Handle handle, CallFrameInterface* frame,
FunctionLibraryRuntime::DoneCallback done) const {
std::vector<Tensor> args;
args.reserve(frame->num_args());
for (size_t i = 0; i < frame->num_args(); ++i) {
const Tensor* arg;
Status s = frame->GetArg(i, &arg);
args.emplace_back(*arg);
if (!s.ok()) {
done(s);
}
}
std::vector<Tensor>* rets = new std::vector<Tensor>;
rets->reserve(frame->num_retvals());
Run(opts, handle, args, rets,
[frame, rets, done = std::move(done)](const Status& status) {
std::unique_ptr<std::vector<Tensor>> rets_releaser(rets);
if (!status.ok()) {
done(status);
return;
}
if (rets->size() != frame->num_retvals()) {
done(errors::Internal(
"Number of return values from function (", rets->size(),
") did not match expected number of return values (",
frame->num_retvals(), ")."));
return;
}
for (size_t i = 0; i < frame->num_retvals(); ++i) {
Status s = frame->SetRetval(i, (*rets)[i]);
if (!s.ok()) {
done(s);
return;
}
}
done(absl::OkStatus());
});
}
Status ProcessFunctionLibraryRuntime::RunSync(
const FunctionLibraryRuntime::Options& orig_opts,
FunctionLibraryRuntime::Handle handle, absl::Span<const Tensor> args,
std::vector<Tensor>* rets) const {
MultiDeviceFunctionData* multi_device_data = IsMultiDevice(handle);
if (multi_device_data && multi_device_data->enable_sync_execution) {
metrics::IncrementTestCounter("pflr_runsync", "sync");
FunctionLibraryRuntime::Options new_opts = orig_opts;
tsl::core::RefCountPtr<Rendezvous> created_rendezvous = nullptr;
if (!new_opts.rendezvous) {
TF_RETURN_IF_ERROR(CreateRendezvous(new_opts, &created_rendezvous));
}
std::vector<FunctionRet> function_rets;
auto get_component_args = [&args](const ComponentFunctionData& comp_data,
InternalArgs* comp_args) {
return GetComponentArgs(args, comp_data, comp_args);
};
Status status = RunMultiDeviceSync(new_opts, handle, &function_rets,
std::move(get_component_args));
status.Update(FunctionRetsToTensors(&function_rets, rets));
return status;
} else {
metrics::IncrementTestCounter("pflr_runsync", "async");
Notification n;
Status s;
Run(orig_opts, handle, args, rets, [&n, &s](const Status& status) {
s.Update(status);
n.Notify();
});
n.WaitForNotification();
return s;
}
}
Status ProcessFunctionLibraryRuntime::RunSync(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::Handle handle, CallFrameInterface* frame) const {
Notification n;
Status s;
Run(opts, handle, frame, [&n, &s](const Status& status) {
s.Update(status);
n.Notify();
});
n.WaitForNotification();
return s;
}
void ProcessFunctionLibraryRuntime::Run(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::Handle handle, const FunctionArgsInterface& args,
std::vector<FunctionRet>* rets,
FunctionLibraryRuntime::DoneCallback done) const {
bool has_remote_outputs = false;
const MultiDeviceFunctionData* data = IsMultiDevice(handle);
if (data != nullptr) {
has_remote_outputs = data->has_remote_outputs;
}
if (!args.HasRemoteOrPackedInputs() && !has_remote_outputs) {
const std::vector<Tensor> local_inputs = args.GetLocalTensors();
std::vector<Tensor>* tensor_rets = new std::vector<Tensor>;
return Run(
opts, handle, local_inputs, tensor_rets,
TensorsToFunctionRetsDoneCallback(rets, tensor_rets, std::move(done)));
}
FunctionLibraryRuntime::Options new_opts = opts;
tsl::core::RefCountPtr<Rendezvous> created_rendezvous = nullptr;
if (!opts.rendezvous) {
Status s = CreateRendezvous(new_opts, &created_rendezvous);
if (!s.ok()) {
done(s);
return;
}
}
#if defined(IS_MOBILE_PLATFORM)
done(errors::Unimplemented(
"Remote inputs are not available on mobile devices."));
return;
#else
auto* cleanup_items = new std::vector<std::unique_ptr<CleanUpItem>>;
done = ApplyCleanUpToDoneCallback(cleanup_items, done, opts,
std::move(created_rendezvous));
auto get_component_args = [&args](const ComponentFunctionData& comp_data,
InternalArgs* comp_args) -> Status {
return GetComponentArgs(args, comp_data, comp_args);
};
return RunMultiDeviceAsync(new_opts, handle, rets, cleanup_items,
std::move(done), std::move(get_component_args));
#endif
}
void ProcessFunctionLibraryRuntime::CleanUp(
std::vector<std::unique_ptr<CleanUpItem>>* items,
FunctionLibraryRuntime::DoneCallback done) const {
auto* refcounted_done = new ReffedStatusCallback(std::move(done));
for (auto& item : *items) {
refcounted_done->Ref();
auto* flr = GetFLR(item->device);
if (flr != nullptr) {
refcounted_done->UpdateStatus(
errors::Internal("Cleanup items shouldn't contain local item."));
refcounted_done->Unref();
} else if (parent_ != nullptr) {
parent_->CleanUp(item->step_id, item->local_handle,
[refcounted_done](const Status& status) {
if (!status.ok()) {
refcounted_done->UpdateStatus(status);
}
refcounted_done->Unref();
});
} else {
refcounted_done->UpdateStatus(
errors::Internal("Could not find device in cleanup."));
refcounted_done->Unref();
}
}
refcounted_done->Unref();
}
Status ProcessFunctionLibraryRuntime::Clone(
Env* env, int graph_def_version, const OptimizerOptions& optimizer_options,
std::unique_ptr<FunctionLibraryDefinition>* out_lib_def,
std::unique_ptr<ProcessFunctionLibraryRuntime>* out_pflr,
bool skip_flib_def) const {
if (skip_flib_def) {
*out_lib_def = std::make_unique<FunctionLibraryDefinition>(
lib_def_->default_registry(), FunctionDefLibrary());
} else {
*out_lib_def = std::make_unique<FunctionLibraryDefinition>(*lib_def_);
}
*out_pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr_, env, config_ ? &(*config_) : nullptr, graph_def_version,
out_lib_def->get(), optimizer_options, default_thread_pool_, parent_,
session_metadata_, rendezvous_factory_);
{
tf_shared_lock l(mu_);
for (auto* d : composite_devices_) (*out_pflr)->AddCompositeDevice(d);
}
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include <memory>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/common_runtime/composite_device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/rendezvous_cache.h"
#include "tensorflow/core/common_runtime/function_testlib.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/optimized_function_graph.pb.h"
#include "tensorflow/core/framework/resource_var.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/type_index.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tsl/platform/protobuf.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#elif TENSORFLOW_USE_ROCM
#include "rocm/include/hip/hip_runtime.h"
#endif
namespace tensorflow {
namespace {
class TestClusterFLR : public DistributedFunctionLibraryRuntime {
public:
explicit TestClusterFLR(DeviceMgr* device_mgr) : device_mgr_(device_mgr) {}
void Instantiate(const string& function_name,
const FunctionLibraryDefinition& lib_def, AttrSlice attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
FunctionLibraryRuntime::LocalHandle* handle,
FunctionLibraryRuntime::DoneCallback done) override {
{
mutex_lock l(mu_);
*handle = next_handle_;
next_handle_++;
}
done(absl::OkStatus());
}
void Run(const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::LocalHandle handle,
absl::Span<const Tensor> args, std::vector<Tensor>* rets,
FunctionLibraryRuntime::DoneCallback done) override {}
void Run(const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::LocalHandle handle,
absl::Span<const FunctionArg> args, std::vector<FunctionRet>* rets,
FunctionLibraryRuntime::DoneCallback done) override {}
void CleanUp(uint64 step_id, FunctionLibraryRuntime::LocalHandle handle,
FunctionLibraryRuntime::DoneCallback done) override {}
DeviceMgr* remote_device_mgr() const override { return device_mgr_; }
private:
mutex mu_;
int next_handle_ TF_GUARDED_BY(mu_) = 0;
DeviceMgr* device_mgr_;
};
SessionMetadata GenerateSessionMetadata() {
SessionMetadata session_metadata;
session_metadata.set_name("name");
session_metadata.set_version(42);
return session_metadata;
}
class ProcessFunctionLibraryRuntimeTest : public ::testing::Test {
public:
ProcessFunctionLibraryRuntimeTest()
: rendezvous_cache_(new RendezvousCache<IntraProcessRendezvous>()) {
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", 3});
std::vector<std::unique_ptr<Device>> created_devices;
TF_CHECK_OK(DeviceFactory::AddDevices(options, "/job:a/replica:0/task:0",
&created_devices));
device2_ = std::move(created_devices[2]);
created_devices.erase(created_devices.begin() + 2);
device_mgr_ = std::make_unique<DynamicDeviceMgr>();
TF_CHECK_OK(device_mgr_->AddDevices(std::move(created_devices)));
TF_CHECK_OK(device_mgr_->LookupDevice(
"/job:a/replica:0/task:0/device:CPU:0", &device0_));
TF_CHECK_OK(device_mgr_->LookupDevice(
"/job:a/replica:0/task:0/device:CPU:1", &device1_));
Device* device2_ptr = nullptr;
EXPECT_NE(
error::OK,
device_mgr_
->LookupDevice("/job:a/replica:0/task:0/device:CPU:2", &device2_ptr)
.code());
Status status = device_mgr_->LookupDevice(
"/job:a/replica:0/task:0/device:GPU:0", &gpu_device_);
if (!status.ok()) {
CHECK_EQ(nullptr, gpu_device_);
}
}
void Init(const std::vector<FunctionDef>& flib,
const SessionMetadata* session_metadata = nullptr,
const std::vector<OptimizedFunctionGraph>&
optimized_function_graphs = {}) {
FunctionDefLibrary proto;
for (const auto& fdef : flib) *(proto.add_function()) = fdef;
lib_def_.reset(new FunctionLibraryDefinition(OpRegistry::Global(), proto));
for (const auto& fg : optimized_function_graphs) {
lib_def_->AddOptimizedFunctionGraph(fg.name(), fg);
}
OptimizerOptions opts;
cluster_flr_.reset(new TestClusterFLR(device_mgr_.get()));
proc_flr_.reset(new ProcessFunctionLibraryRuntime(
device_mgr_.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, lib_def_.get(), opts,
nullptr, cluster_flr_.get(), session_metadata,
Rendezvous::Factory{[this](const int64_t step_id,
const DeviceMgr* device_mgr,
tsl::core::RefCountPtr<Rendezvous>* r) {
*r = this->rendezvous_cache_->FindOrCreate(step_id, [device_mgr]() {
return tsl::core::RefCountPtr<IntraProcessRendezvous>(
new IntraProcessRendezvous(device_mgr));
});
return absl::OkStatus();
}}));
}
void AddCompositeDevice(CompositeDevice* d) {
proc_flr_->AddCompositeDevice(d);
}
Status Instantiate(
const string& name, test::function::Attrs attrs,
const FunctionLibraryRuntime::InstantiateOptions& instantiate_opts,
FunctionLibraryRuntime::Handle* handle) {
return proc_flr_->Instantiate(name, attrs, instantiate_opts, handle);
}
Tensor GPUToCPU(const Tensor& device_tensor) {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
CHECK(gpu_device_);
CHECK(gpu_device_->tensorflow_accelerator_device_info() != nullptr);
DeviceContext* device_context =
gpu_device_->tensorflow_accelerator_device_info()->default_context;
Tensor cpu_tensor(device_tensor.dtype(), device_tensor.shape());
CHECK(device_context
->CopyDeviceTensorToCPUSync(&device_tensor, "", gpu_device_,
&cpu_tensor)
.ok());
return cpu_tensor;
#else
CHECK(false);
#endif
}
Tensor CPUToGPU(const Tensor& cpu_tensor) {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
CHECK(gpu_device_);
CHECK(gpu_device_->tensorflow_accelerator_device_info() != nullptr);
DeviceContext* device_context =
gpu_device_->tensorflow_accelerator_device_info()->default_context;
Tensor device_tensor(gpu_device_->GetAllocator({}), cpu_tensor.dtype(),
cpu_tensor.shape(), {});
CHECK(device_context
->CopyCPUTensorToDeviceSync(&cpu_tensor, gpu_device_,
&device_tensor)
.ok());
return device_tensor;
#else
CHECK(false);
#endif
}
template <typename T, typename K>
Status RunWithRuntime(
const string& name, FunctionLibraryRuntime::Options opts,
test::function::Attrs attrs,
const FunctionLibraryRuntime::InstantiateOptions& instantiate_opts,
const T& args, std::vector<K*> rets,
ProcessFunctionLibraryRuntime* pflr) {
FunctionLibraryRuntime::Handle handle;
Status status = pflr->Instantiate(name, attrs, instantiate_opts, &handle);
if (!status.ok()) {
return status;
}
bool is_cross_process = false;
TF_CHECK_OK(pflr->IsCrossProcess(handle, &is_cross_process));
EXPECT_FALSE(is_cross_process);
std::function<void(std::function<void()>)> runner =
[](std::function<void()> fn) {
test::function::FunctionTestSchedClosure(fn);
};
Notification done;
opts.runner = &runner;
std::vector<K> out;
pflr->Run(opts, handle, args, &out, [&status, &done](const Status& s) {
status = s;
done.Notify();
});
done.WaitForNotification();
if (!status.ok()) {
return status;
}
CHECK_EQ(rets.size(), out.size());
for (size_t i = 0; i < rets.size(); ++i) {
*rets[i] = out[i];
}
status = pflr->ReleaseHandle(handle);
if (!status.ok()) {
return status;
}
Notification done2;
pflr->Run(opts, handle, args, &out, [&status, &done2](const Status& s) {
status = s;
done2.Notify();
});
done2.WaitForNotification();
EXPECT_TRUE(errors::IsNotFound(status)) << "Actual status: " << status;
EXPECT_TRUE(absl::StrContains(status.message(), "not found."));
return absl::OkStatus();
}
Status Run(const string& name, FunctionLibraryRuntime::Options opts,
test::function::Attrs attrs,
const FunctionLibraryRuntime::InstantiateOptions& instantiate_opts,
const std::vector<Tensor>& args, std::vector<Tensor*> rets,
ProcessFunctionLibraryRuntime* pflr = nullptr) {
return RunWithRuntime<std::vector<Tensor>, Tensor>(
name, opts, attrs, instantiate_opts, args, rets, proc_flr_.get());
}
Status RunWithPackedArgs(
const string& name, FunctionLibraryRuntime::Options opts,
test::function::Attrs attrs,
const FunctionLibraryRuntime::InstantiateOptions& instantiate_opts,
const FunctionArgsInterface& args, std::vector<FunctionRet*> rets,
ProcessFunctionLibraryRuntime* pflr = nullptr) {
return RunWithRuntime<FunctionArgsInterface, FunctionRet>(
name, opts, attrs, instantiate_opts, args, rets, proc_flr_.get());
}
Status RunInstantiated(FunctionLibraryRuntime::Handle handle,
FunctionLibraryRuntime::Options opts,
const std::vector<Tensor>& args,
std::vector<Tensor*> rets) {
std::function<void(std::function<void()>)> runner =
[](std::function<void()> fn) {
test::function::FunctionTestSchedClosure(fn);
};
opts.runner = &runner;
Status status;
Notification done;
std::vector<Tensor> out;
proc_flr_->Run(opts, handle, args, &out, [&status, &done](const Status& s) {
status = s;
done.Notify();
});
done.WaitForNotification();
if (!status.ok()) {
return status;
}
CHECK_EQ(rets.size(), out.size());
for (size_t i = 0; i < rets.size(); ++i) {
*rets[i] = out[i];
}
return absl::OkStatus();
}
std::unique_ptr<DynamicDeviceMgr> device_mgr_;
Device* device0_ = nullptr;
Device* device1_ = nullptr;
std::unique_ptr<Device> device2_;
Device* gpu_device_ = nullptr;
std::unique_ptr<FunctionLibraryDefinition> lib_def_;
std::unique_ptr<TestClusterFLR> cluster_flr_;
std::unique_ptr<ProcessFunctionLibraryRuntime> proc_flr_;
tsl::core::RefCountPtr<RendezvousCache<IntraProcessRendezvous>>
rendezvous_cache_;
};
TEST_F(ProcessFunctionLibraryRuntimeTest, GetFLRNull) {
FunctionDefLibrary proto;
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), proto));
OptimizerOptions opts;
std::unique_ptr<ProcessFunctionLibraryRuntime> proc_flr(
new ProcessFunctionLibraryRuntime(
nullptr , Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, lib_def.get(), opts));
FunctionLibraryRuntime* flr =
proc_flr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
EXPECT_NE(flr, nullptr);
}
TEST_F(ProcessFunctionLibraryRuntimeTest, DeviceSet) {
FunctionDefLibrary proto;
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), proto));
OptimizerOptions opts;
std::vector<std::unique_ptr<Device>> devices;
devices.emplace_back(std::move(device2_));
auto mgr = std::make_unique<DynamicDeviceMgr>();
TF_CHECK_OK(mgr.get()->AddDevices(std::move(devices)));
std::unique_ptr<ProcessFunctionLibraryRuntime> proc_flr(
new ProcessFunctionLibraryRuntime(
device_mgr_.get(), Env::Default(),
nullptr, TF_GRAPH_DEF_VERSION, lib_def.get(), opts,
nullptr));
EXPECT_NE(nullptr, proc_flr->device_set()->FindDeviceByName(
"/job:a/replica:0/task:0/device:CPU:0"));
EXPECT_NE(nullptr, proc_flr->device_set()->FindDeviceByName(
"/job:a/replica:0/task:0/device:CPU:1"));
cluster_flr_.reset(new TestClusterFLR(mgr.get()));
proc_flr.reset(new ProcessFunctionLibraryRuntime(
device_mgr_.get(), Env::Default(),
nullptr, TF_GRAPH_DEF_VERSION, lib_def.get(), opts,
nullptr, cluster_flr_.get()));
EXPECT_NE(nullptr, proc_flr->device_set()->FindDeviceByName(
"/job:a/replica:0/task:0/device:CPU:2"));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, Basic) {
Init({});
FunctionLibraryRuntime* flr =
proc_flr_->GetFLR("/job:a/replica:0/task:0/cpu:0");
EXPECT_NE(flr, nullptr);
EXPECT_EQ(flr->device(), device0_);
flr = proc_flr_->GetFLR("/job:a/replica:0/task:0/device:CPU:0");
EXPECT_NE(flr, nullptr);
EXPECT_EQ(flr->device(), device0_);
flr = proc_flr_->GetFLR("/device:CPU:0");
EXPECT_NE(flr, nullptr);
EXPECT_EQ(flr->device(), device0_);
flr = proc_flr_->GetFLR("/job:a/replica:0/task:0/cpu:1");
EXPECT_NE(flr, nullptr);
EXPECT_EQ(flr->device(), device1_);
flr = proc_flr_->GetFLR("abc");
EXPECT_EQ(flr, nullptr);
}
TEST_F(ProcessFunctionLibraryRuntimeTest, GetDeviceIncarnation) {
Init({});
int64_t incarnation;
TF_EXPECT_OK(proc_flr_->GetDeviceIncarnation("/job:a/replica:0/task:0/cpu:1",
&incarnation));
EXPECT_NE(incarnation, 0);
Status s = proc_flr_->GetDeviceIncarnation("/job:a/replica:0/task:0/cpu:2",
&incarnation);
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
}
TEST_F(ProcessFunctionLibraryRuntimeTest, SingleCall) {
Init({test::function::XTimesTwo()});
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/cpu:0";
auto x = test::AsTensor<float>({1, 2, 3, 4});
Tensor y;
TF_CHECK_OK(
Run("XTimesTwo", opts, {{"T", DT_FLOAT}}, instantiate_opts, {x}, {&y}));
test::ExpectTensorEqual<float>(y, test::AsTensor<float>({2, 4, 6, 8}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, SingleCallFindDevice) {
Init({test::function::FindDevice()});
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/cpu:0";
Tensor y;
TF_CHECK_OK(Run("FindDevice", opts, {}, instantiate_opts, {}, {&y}));
test::ExpectTensorEqual<tstring>(
y, test::AsTensor<tstring>({"/job:a/replica:0/task:0/device:CPU:0"},
TensorShape({})));
EXPECT_EQ(0, rendezvous_cache_->Size());
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultipleCallsSameDeviceXTimes) {
Init({test::function::XTimesTwo(), test::function::XTimesFour()});
auto x = test::AsTensor<float>({1, 2, 3, 4});
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/cpu:0";
Tensor y;
TF_CHECK_OK(
Run("XTimesTwo", opts, {{"T", DT_FLOAT}}, instantiate_opts, {x}, {&y}));
test::ExpectTensorEqual<float>(y, test::AsTensor<float>({2, 4, 6, 8}));
TF_CHECK_OK(
Run("XTimesFour", opts, {{"T", DT_FLOAT}}, instantiate_opts, {x}, {&y}));
test::ExpectTensorEqual<float>(y, test::AsTensor<float>({4, 8, 12, 16}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest,
SameDeviceXTimesFourInt32MultiDevice) {
Init({test::function::XTimesTwoInt32(), test::function::XTimesFourInt32()});
auto x = test::AsTensor<int32>({1, 2, 3, 4});
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/cpu:0";
instantiate_opts.input_devices = {"/job:a/replica:0/task:0/cpu:0"};
instantiate_opts.output_devices = {"/job:a/replica:0/task:0/cpu:0"};
instantiate_opts.is_multi_device_function = true;
Tensor y;
TF_CHECK_OK(Run("XTimesFourInt32", opts, {{"T", DT_INT32}}, instantiate_opts,
{x}, {&y}));
test::ExpectTensorEqual<int32>(y, test::AsTensor<int32>({4, 8, 12, 16}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest,
MultipleCallsSameDeviceXTimesMultiDevice) {
Init({test::function::XTimesTwoInt32(), test::function::XTimesFourInt32()});
auto x = test::AsTensor<int32>({1, 2, 3, 4});
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/cpu:0";
instantiate_opts.input_devices = {"/job:a/replica:0/task:0/cpu:0"};
instantiate_opts.output_devices = {"/job:a/replica:0/task:0/cpu:0"};
instantiate_opts.is_multi_device_function = true;
Tensor y;
TF_CHECK_OK(Run("XTimesTwoInt32", opts, {{"T", DT_INT32}}, instantiate_opts,
{x}, {&y}));
test::ExpectTensorEqual<int32>(y, test::AsTensor<int32>({2, 4, 6, 8}));
TF_CHECK_OK(Run("XTimesFourInt32", opts, {{"T", DT_INT32}}, instantiate_opts,
{x}, {&y}));
test::ExpectTensorEqual<int32>(y, test::AsTensor<int32>({4, 8, 12, 16}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultipleCallsSameDeviceFindDevice) {
Init({test::function::FindDevice()});
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/cpu:1";
Tensor y;
TF_CHECK_OK(Run("FindDevice", opts, {}, instantiate_opts, {}, {&y}));
test::ExpectTensorEqual<tstring>(
y, test::AsTensor<tstring>({"/job:a/replica:0/task:0/device:CPU:1"},
TensorShape({})));
TF_CHECK_OK(Run("FindDevice", opts, {}, instantiate_opts, {}, {&y}));
test::ExpectTensorEqual<tstring>(
y, test::AsTensor<tstring>({"/job:a/replica:0/task:0/device:CPU:1"},
TensorShape({})));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultipleCallsDiffDeviceFindDevice) {
Init({test::function::FindDevice()});
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
Tensor y;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts_0;
instantiate_opts_0.target = "/job:a/replica:0/task:0/device:CPU:0";
TF_CHECK_OK(Run("FindDevice", opts, {}, instantiate_opts_0, {}, {&y}));
test::ExpectTensorEqual<tstring>(
y, test::AsTensor<tstring>({"/job:a/replica:0/task:0/device:CPU:0"},
TensorShape({})));
FunctionLibraryRuntime::InstantiateOptions instantiate_opts_1;
instantiate_opts_1.target = "/job:a/replica:0/task:0/device:CPU:1";
TF_CHECK_OK(Run("FindDevice", opts, {}, instantiate_opts_1, {}, {&y}));
test::ExpectTensorEqual<tstring>(
y, test::AsTensor<tstring>({"/job:a/replica:0/task:0/device:CPU:1"},
TensorShape({})));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, InstantiateFunctionOnRemovedDevice) {
std::vector<std::unique_ptr<Device>> devices;
Device* device2_ptr = device2_.get();
devices.emplace_back(std::move(device2_));
TF_CHECK_OK(device_mgr_->AddDevices(std::move(devices)));
Init({test::function::FindDevice()});
std::vector<Device*> remove_devices{device2_ptr};
TF_CHECK_OK(device_mgr_->RemoveDevices(std::move(remove_devices)));
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
FunctionLibraryRuntime::Handle h;
instantiate_opts.target = "/job:a/replica:0/task:0/device:CPU:1";
instantiate_opts.is_multi_device_function = true;
TF_CHECK_OK(Instantiate("FindDevice",
{{"_target", "/job:b/replica:0/task:0/device:CPU:2"}},
instantiate_opts, &h));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, ClusterFLRSerialTest) {
Init({test::function::FindDevice()});
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:b/replica:0/task:0/device:CPU:0";
FunctionLibraryRuntime::Handle h;
TF_CHECK_OK(Instantiate("FindDevice",
{{"_target", "/job:b/replica:0/task:0/device:CPU:0"}},
instantiate_opts, &h));
bool is_cross_process = false;
TF_CHECK_OK(proc_flr_->IsCrossProcess(h, &is_cross_process));
EXPECT_TRUE(is_cross_process);
EXPECT_EQ(0, proc_flr_->GetHandleOnDevice(
"/job:b/replica:0/task:0/device:CPU:0", h));
TF_CHECK_OK(Instantiate("FindDevice",
{{"_target", "/job:b/replica:0/task:0/device:CPU:0"}},
instantiate_opts, &h));
EXPECT_EQ(0, proc_flr_->GetHandleOnDevice(
"/job:b/replica:0/task:0/device:CPU:0", h));
instantiate_opts.target = "/job:c/replica:0/task:0/device:CPU:0";
TF_CHECK_OK(Instantiate("FindDevice",
{{"_target", "/job:c/replica:0/task:0/device:CPU:0"}},
instantiate_opts, &h));
EXPECT_EQ(1, proc_flr_->GetHandleOnDevice(
"/job:c/replica:0/task:0/device:CPU:0", h));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, ClusterFLRParallelTest) {
Init({test::function::FindDevice()});
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:b/replica:0/task:0/device:CPU:0";
thread::ThreadPool* tp = new thread::ThreadPool(Env::Default(), "test", 4);
auto fn = [this, &instantiate_opts]() {
FunctionLibraryRuntime::Handle h;
TF_CHECK_OK(Instantiate(
"FindDevice", {{"_target", "/job:b/replica:0/task:0/device:CPU:0"}},
instantiate_opts, &h));
EXPECT_EQ(0, proc_flr_->GetHandleOnDevice(
"/job:b/replica:0/task:0/device:CPU:0", h));
};
for (int i = 0; i < 100; ++i) {
tp->Schedule(fn);
}
delete tp;
}
bool IsCUDATensor(const Tensor& t) {
#if GOOGLE_CUDA
cudaPointerAttributes attributes;
cudaError_t err =
cudaPointerGetAttributes(&attributes, t.tensor_data().data());
if (err == cudaErrorInvalidValue) return false;
CHECK_EQ(cudaSuccess, err) << cudaGetErrorString(err);
return (attributes.type == cudaMemoryTypeDevice);
#elif TENSORFLOW_USE_ROCM
hipPointerAttribute_t attributes;
hipError_t err = hipPointerGetAttributes(&attributes, t.tensor_data().data());
if (err == hipErrorInvalidValue) return false;
CHECK_EQ(hipSuccess, err) << hipGetErrorString(err);
return (attributes.memoryType == hipMemoryTypeDevice);
#else
CHECK(false)
<< "IsCUDATensor should not be called when CUDA is not available";
#endif
}
void TestTwoDeviceMult(
ProcessFunctionLibraryRuntimeTest* fixture,
const FunctionLibraryRuntime::InstantiateOptions& inst_opts,
const string& error = "") {
fixture->Init({test::function::TwoDeviceMult()});
FunctionLibraryRuntime::Options opts;
auto x = test::AsTensor<float>({1, 2, 3});
Tensor y_cpu;
Tensor y_gpu;
Status status = fixture->Run("TwoDeviceMult", opts, {{"T", DT_FLOAT}},
inst_opts, {x}, {&y_cpu, &y_gpu});
if (!error.empty()) {
EXPECT_TRUE(errors::IsInvalidArgument(status))
<< "Actual status: " << status;
EXPECT_TRUE(absl::StrContains(status.message(), error))
<< "Actual error message: " << status.message();
return;
}
EXPECT_TRUE(status.ok()) << "Actual status: " << status;
EXPECT_FALSE(IsCUDATensor(y_cpu));
test::ExpectTensorEqual<float>(y_cpu, test::AsTensor<float>({2, 4, 6}));
EXPECT_TRUE(IsCUDATensor(y_gpu));
Tensor y_gpu_on_cpu = fixture->GPUToCPU(y_gpu);
test::ExpectTensorEqual<float>(y_gpu_on_cpu,
test::AsTensor<float>({3, 6, 9}));
}
void TestInstantiateSimpleFunction(
ProcessFunctionLibraryRuntimeTest* fixture,
const FunctionLibraryRuntime::InstantiateOptions& orig_opts) {
fixture->Init({test::function::FindDevice()});
FunctionLibraryRuntime::InstantiateOptions opts_copy = orig_opts;
opts_copy.input_devices.clear();
FunctionLibraryRuntime::Handle h;
TF_CHECK_OK(fixture->Instantiate(
"FindDevice", {{"_target", "/job:b/replica:0/task:0/device:CPU:0"}},
opts_copy, &h));
}
void TestControlFlow(
ProcessFunctionLibraryRuntimeTest* fixture,
const FunctionLibraryRuntime::InstantiateOptions& inst_opts) {
fixture->Init({test::function::ControlFlow()});
FunctionLibraryRuntime::Options opts;
Tensor x1 = test::AsTensor<float>({3, 5, 17, 257});
if (absl::StrContains(inst_opts.input_devices[0], "GPU")) {
x1 = fixture->CPUToGPU(x1);
}
Tensor y1;
TF_CHECK_OK(fixture->Run("ControlFlow", opts, {}, inst_opts, {x1}, {&y1}));
if (absl::StrContains(inst_opts.output_devices[0], "GPU")) {
EXPECT_TRUE(IsCUDATensor(y1));
y1 = fixture->GPUToCPU(y1);
}
test::ExpectTensorEqual<float>(y1, test::AsTensor<float>({3, 5, 17, 257}));
}
void TestTwoDeviceInputOutput(
ProcessFunctionLibraryRuntimeTest* fixture,
const FunctionLibraryRuntime::InstantiateOptions& inst_opts) {
if (fixture->gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
fixture->Init({test::function::TwoDeviceInputOutput()});
FunctionLibraryRuntime::Options opts;
Tensor x1 = test::AsTensor<float>({1, 2});
if (absl::StrContains(inst_opts.input_devices[0], "GPU")) {
x1 = fixture->CPUToGPU(x1);
}
Tensor x2 = test::AsTensor<float>({10, 20});
if (absl::StrContains(inst_opts.input_devices[1], "GPU")) {
x2 = fixture->CPUToGPU(x2);
}
Tensor y1;
Tensor y2;
TF_CHECK_OK(fixture->Run("TwoDeviceInputOutput", opts, {{"T", DT_FLOAT}},
inst_opts, {x1, x2}, {&y1, &y2}));
if (absl::StrContains(inst_opts.output_devices[0], "GPU")) {
EXPECT_TRUE(IsCUDATensor(y1));
y1 = fixture->GPUToCPU(y1);
} else {
EXPECT_FALSE(IsCUDATensor(y1));
}
test::ExpectTensorEqual<float>(y1, test::AsTensor<float>({2, 4}));
if (absl::StrContains(inst_opts.output_devices[1], "GPU")) {
EXPECT_TRUE(IsCUDATensor(y2));
y2 = fixture->GPUToCPU(y2);
} else {
EXPECT_FALSE(IsCUDATensor(y2));
}
test::ExpectTensorEqual<float>(y2, test::AsTensor<float>({30, 60}));
}
std::vector<string> CompleteDevices(const std::vector<string>& v) {
std::vector<string> result;
result.reserve(v.size());
for (const string& s : v) {
result.push_back(strings::StrCat("/job:a/replica:0/task:0/device:", s));
}
return result;
}
FunctionLibraryRuntime::InstantiateOptions MakeOptions(
const string& target, const std::vector<string>& input_devices,
const std::vector<string>& output_devices) {
FunctionLibraryRuntime::InstantiateOptions inst_opts;
inst_opts.target = target;
inst_opts.input_devices = CompleteDevices(input_devices);
inst_opts.output_devices = CompleteDevices(output_devices);
inst_opts.is_multi_device_function = true;
return inst_opts;
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_ExplicitOutputDevice) {
if (gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
TestTwoDeviceMult(this, MakeOptions("CPU:0", {"CPU:0"}, {"CPU:0", "GPU:0"}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_InferredOutputDevice) {
if (gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
TestTwoDeviceMult(this, MakeOptions("CPU:0", {"CPU:0"}, {}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_ErrorWhenNoInputDevices) {
if (gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
TestTwoDeviceMult(this, MakeOptions("CPU:0", {}, {}),
"input_devices must have the same length");
}
TEST_F(ProcessFunctionLibraryRuntimeTest,
MultiDevice_ErrorWhenTooManyInputDevices) {
if (gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
TestTwoDeviceMult(this, MakeOptions("CPU:0", {"CPU:0", "CPU:1"}, {}),
"input_devices must have the same length");
}
TEST_F(ProcessFunctionLibraryRuntimeTest,
MultiDevice_ErrorWhenTooManyOutputDevices) {
TestTwoDeviceMult(
this, MakeOptions("CPU:0", {"CPU:0"}, {"CPU:0", "GPU:0", "CPU:1"}),
"output_devices must either be empty or have the same length");
}
TEST_F(ProcessFunctionLibraryRuntimeTest,
MultiDevice_ErrorWhenBadTargetDevice) {
TestTwoDeviceMult(
this, MakeOptions("GPU:11", {"CPU:0"}, {"CPU:0", "GPU:0"}),
"Cannot instantiate multi-device function with target device GPU:11");
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_ErrorWhenListInput) {
const FunctionDef& def = test::function::FuncWithListInput();
Init({def});
FunctionLibraryRuntime::Handle handle;
Status status = proc_flr_->Instantiate(
"FuncWithListInput", test::function::Attrs({{"T", DT_FLOAT}, {"N", 1}}),
MakeOptions("CPU:0", {"CPU:0"}, {}), &handle);
ASSERT_TRUE(errors::IsInvalidArgument(status)) << "Actual status: " << status;
ASSERT_TRUE(absl::StrContains(
status.message(),
"FuncWithListInput has an input named \"x1\" that is a list of tensors"))
<< "Actual error message: " << status.message();
}
TEST_F(ProcessFunctionLibraryRuntimeTest, FullTypeForInt32) {
FunctionDef def = test::function::XTimesTwoInt32();
def.mutable_node_def(2)->mutable_experimental_type()->set_type_id(
TFT_PRODUCT);
def.mutable_node_def(2)->mutable_experimental_type()->add_args()->set_type_id(
TFT_TENSOR);
Init({def});
FunctionLibraryRuntime::Handle handle;
Status status =
proc_flr_->Instantiate("XTimesTwoInt32", test::function::Attrs({}),
MakeOptions("CPU:0", {"CPU:0"}, {}), &handle);
ASSERT_TRUE(errors::IsInvalidArgument(status)) << "Actual status: " << status;
EXPECT_TRUE(absl::StrContains(
status.message(),
"in 'ProcessFunctionLibraryRuntime::InstantiateMultiDevice' has "
"TFT_TENSOR output 0 which has 0 args instead of 1"));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_ErrorWhenListOutput) {
const FunctionDef& def = test::function::FuncWithListOutput();
Init({def});
FunctionLibraryRuntime::Handle handle;
Status status = proc_flr_->Instantiate(
"FuncWithListOutput", test::function::Attrs({{"T", DT_FLOAT}, {"N", 1}}),
MakeOptions("CPU:0", {}, {"CPU:0"}), &handle);
ASSERT_TRUE(errors::IsInvalidArgument(status)) << "Actual status: " << status;
ASSERT_TRUE(absl::StrContains(
status.message(),
"FuncWithListOutput has an output named \"y\" that is a list of tensors"))
<< "Actual error message: " << status.message();
}
TEST_F(ProcessFunctionLibraryRuntimeTest,
MultiDevice_ExplicitMultiInputOutput) {
TestTwoDeviceInputOutput(
this, MakeOptions("CPU:0", {"CPU:0", "GPU:0"}, {"CPU:0", "GPU:0"}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_FlipInputs) {
TestTwoDeviceInputOutput(
this, MakeOptions("CPU:0", {"GPU:0", "CPU:0"}, {"CPU:0", "GPU:0"}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_FlipOutputs) {
TestTwoDeviceInputOutput(
this, MakeOptions("CPU:0", {"CPU:0", "GPU:0"}, {"GPU:0", "CPU:0"}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_FlipBoth) {
TestTwoDeviceInputOutput(
this, MakeOptions("CPU:0", {"GPU:0", "CPU:0"}, {"GPU:0", "CPU:0"}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_EmptyBodySwap) {
if (gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
FunctionLibraryRuntime::InstantiateOptions inst_opts =
MakeOptions("CPU:0", {"GPU:0", "CPU:0"}, {"CPU:0", "GPU:0"});
Init({test::function::EmptyBodySwap()});
Tensor x1 = CPUToGPU(test::AsTensor<float>({1, 2}));
Tensor x2 = test::AsTensor<float>({10, 20});
Tensor y1;
Tensor y2;
TF_CHECK_OK(Run("EmptyBodySwap", {}, {{"T", DT_FLOAT}}, inst_opts, {x1, x2},
{&y1, &y2}));
EXPECT_FALSE(IsCUDATensor(y1));
test::ExpectTensorEqual<float>(y1, test::AsTensor<float>({10, 20}));
EXPECT_TRUE(IsCUDATensor(y2));
y2 = GPUToCPU(y2);
test::ExpectTensorEqual<float>(y2, test::AsTensor<float>({1, 2}));
}
Tensor GetResourceHandle(const string& var_name, const string& container,
const string& device_name) {
ResourceHandle handle;
handle.set_device(device_name);
handle.set_container(container);
handle.set_name(var_name);
handle.set_hash_code(TypeIndex::Make<Var>().hash_code());
handle.set_maybe_type_name(TypeIndex::Make<Var>().name());
Tensor tensor(DT_RESOURCE, TensorShape({}));
tensor.scalar<ResourceHandle>()() = handle;
return tensor;
}
FunctionDef AddVarAcrossDevices() {
return FunctionDefHelper::Create(
"AddVarAcrossDevices",
{"x: resource"},
{"y: float"},
{},
{
{{"read0"},
"ReadVariableOp",
{"x"},
{{"dtype", DT_FLOAT}},
{},
"/device:CPU:0"},
{{"read1"},
"ReadVariableOp",
{"x"},
{{"dtype", DT_FLOAT}},
{},
"/device:CPU:1"},
{{"add"},
"Add",
{"read0:value:0", "read1:value:0"},
{{"T", DT_FLOAT}},
{},
"/device:CPU:0"},
},
{{"y", "add:z:0"}});
}
class TestFunctionPackedArgs : public FunctionArgsInterface {
public:
TestFunctionPackedArgs(const int index,
absl::InlinedVector<TensorValue, 4UL>&& tensor_args) {
packed_args_.emplace(index, std::move(tensor_args));
}
~TestFunctionPackedArgs() override{};
bool HasRemoteOrPackedInputs() const override { return true; };
Status GetLocalArg(const FunctionArgIndex& index,
Tensor* val) const override {
*val = *packed_args_.at(index.index).at(index.sub_index).tensor;
return absl::OkStatus();
};
std::vector<Tensor> GetLocalTensors() const override { return {}; }
private:
absl::flat_hash_map<int, absl::InlinedVector<TensorValue, 4UL>> packed_args_;
};
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_CompositeDevice) {
Init({AddVarAcrossDevices()});
const Tensor initial_resource_value0 = test::AsTensor<float>({10, 20});
Var* resource0 = new Var(DT_FLOAT);
*resource0->tensor() = initial_resource_value0;
resource0->is_initialized = true;
const Tensor initial_resource_value1 = test::AsTensor<float>({30, 40});
Var* resource1 = new Var(DT_FLOAT);
*resource1->tensor() = initial_resource_value1;
resource1->is_initialized = true;
ResourceMgr* mgr0 = device0_->resource_manager();
ResourceMgr* mgr1 = device1_->resource_manager();
TF_ASSERT_OK(mgr0->Create(mgr0->default_container(), "var", resource0));
TF_ASSERT_OK(mgr1->Create(mgr1->default_container(), "var", resource1));
Tensor resource_handle0 =
GetResourceHandle("var", mgr0->default_container(), device0_->name());
Tensor resource_handle1 =
GetResourceHandle("var", mgr1->default_container(), device1_->name());
Status s;
std::unique_ptr<CompositeDevice> composite_device =
CompositeDevice::MakeDevice({device0_->name(), device1_->name()},
0,
device_mgr_->HostCPU()->parsed_name(), &s);
TF_ASSERT_OK(s);
AddCompositeDevice(composite_device.get());
FunctionLibraryRuntime::Options opts;
FunctionLibraryRuntime::InstantiateOptions inst_opts =
MakeOptions("CPU:0", {"COMPOSITE:0"}, {"CPU:0"});
inst_opts.composite_devices[composite_device->name()] =
composite_device->underlying_devices();
inst_opts.input_resource_dtypes_and_shapes[0] = {
initial_resource_value0.dtype(), initial_resource_value0.shape()};
{
absl::InlinedVector<TensorValue, 4UL> handles;
handles.push_back(TensorValue(&resource_handle0));
handles.push_back(TensorValue(&resource_handle1));
TestFunctionPackedArgs args(0, std::move(handles));
FunctionRet ret;
TF_CHECK_OK(RunWithPackedArgs("AddVarAcrossDevices", opts,
{{"T", DT_FLOAT}}, inst_opts, args, {&ret}));
EXPECT_EQ(ret.index(), 0);
test::ExpectTensorEqual<float>(absl::get<Tensor>(ret),
test::AsTensor<float>({40, 60}));
}
{
Tensor arg(DT_RESOURCE, TensorShape({2}));
arg.flat<ResourceHandle>()(0) = resource_handle0.scalar<ResourceHandle>()();
arg.flat<ResourceHandle>()(1) = resource_handle1.scalar<ResourceHandle>()();
Tensor ret;
TF_CHECK_OK(Run("AddVarAcrossDevices", opts, {{"T", DT_FLOAT}}, inst_opts,
{arg}, {&ret}));
test::ExpectTensorEqual<float>(ret, test::AsTensor<float>({40, 60}));
}
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_ResourceOutput_GPU) {
if (gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
FunctionLibraryRuntime::InstantiateOptions inst_opts =
MakeOptions("CPU:0", {"GPU:0", "GPU:0"}, {"GPU:0", "GPU:0"});
Init({test::function::ResourceOutput(),
test::function::ReadResourceVariable()});
Tensor resource_value = CPUToGPU(test::AsTensor<float>({10, 20}));
Var* resource = new Var(DT_FLOAT);
*resource->tensor() = resource_value;
resource->is_initialized = true;
ResourceMgr* mgr = gpu_device_->resource_manager();
Status status = mgr->Create(mgr->default_container(), "my_gpu_var", resource);
ASSERT_TRUE(status.ok()) << status.message();
FunctionLibraryRuntime::Options opts;
Tensor x1 = CPUToGPU(test::AsTensor<float>({1, 2}));
Tensor x2 = GetResourceHandle("my_gpu_var", mgr->default_container(),
"/job:a/replica:0/task:0/device:GPU:0");
Tensor returned_handle;
Tensor y2;
TF_CHECK_OK(Run("ResourceOutput", opts, {{"T", DT_FLOAT}}, inst_opts,
{x1, x2}, {&returned_handle, &y2}));
EXPECT_FALSE(IsCUDATensor(returned_handle));
EXPECT_TRUE(IsCUDATensor(y2));
y2 = GPUToCPU(y2);
test::ExpectTensorEqual<float>(y2, test::AsTensor<float>({2, 4}));
inst_opts = MakeOptions("GPU:0", {"GPU:0"}, {"GPU:0"});
Tensor read_resource;
TF_CHECK_OK(Run("ReadResourceVariable", opts, {{"T", DT_FLOAT}}, inst_opts,
{returned_handle}, {&read_resource}));
EXPECT_TRUE(IsCUDATensor(read_resource));
read_resource = GPUToCPU(read_resource);
test::ExpectTensorEqual<float>(read_resource,
test::AsTensor<float>({10, 20}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_PlacerError) {
if (gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
FunctionLibraryRuntime::InstantiateOptions inst_opts =
MakeOptions("CPU:0", {"GPU:0", "GPU:0"}, {"CPU:0", "GPU:0"});
Init({test::function::ResourceOutput(),
test::function::ReadResourceVariable()});
FunctionLibraryRuntime::Handle handle;
Status status = proc_flr_->Instantiate(
"ResourceOutput", test::function::Attrs({{"T", DT_FLOAT}}), inst_opts,
&handle);
ASSERT_TRUE(errors::IsInvalidArgument(status)) << "Actual status: " << status;
ASSERT_TRUE(absl::StrContains(status.message(), "Cannot place"));
}
REGISTER_OP("BrokenOp")
.Input("in: T")
.Output("out: T")
.Attr("T: type")
.SetShapeFn(shape_inference::UnknownShape);
class BrokenOp : public OpKernel {
public:
explicit BrokenOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
ctx->SetStatus(errors::Internal("I am broken"));
}
void Compute(OpKernelContext* ctx) override {
ctx->SetStatus(errors::Internal("I am broken"));
}
};
REGISTER_KERNEL_BUILDER(Name("BrokenOp").Device(DEVICE_CPU), BrokenOp);
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_CreateKernelsEagerly) {
auto T = DT_INT32;
FunctionDef broken_func = FunctionDefHelper::Define(
"Broken",
{"x: int32"},
{"y: int32"},
{},
{{{"y"}, "BrokenOp", {"x"}, {{"T", T}}}});
Init({broken_func});
FunctionLibraryRuntime::InstantiateOptions inst_opts =
MakeOptions("CPU:0", {"CPU:0"}, {"CPU:0"});
FunctionLibraryRuntime::Handle handle;
TF_CHECK_OK(Instantiate("Broken", {{"T", DT_INT32}}, inst_opts, &handle));
TF_CHECK_OK(proc_flr_->ReleaseHandle(handle));
inst_opts.create_kernels_eagerly = true;
Status status = Instantiate("Broken", {{"T", DT_INT32}}, inst_opts, &handle);
EXPECT_TRUE(errors::IsInternal(status));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_StateHandle) {
auto T = DT_INT32;
FunctionDef stateful_func = FunctionDefHelper::Define(
"RandomUniformWrapper",
{"x: resource"},
{"y: int32"},
{},
{FunctionDefHelper::Const<int32>("shape", absl::Span<const int32>({1})),
FunctionDefHelper::Const<int32>("minval", 0),
{{"maxval"}, "ReadVariableOp", {"x"}, {{"dtype", T}}, {}},
{{"y"},
"RandomUniformInt",
{"shape", "minval", "maxval"},
{{"seed", 37}, {"seed2", 48}, {"Tout", T}, {"T", T}}}});
Init({stateful_func});
if (gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
ResourceMgr* mgr = gpu_device_->resource_manager();
Tensor resource_value = CPUToGPU(test::AsScalar<int>(10));
Var* resource = new Var(T);
*resource->tensor() = resource_value;
resource->is_initialized = true;
Status status = mgr->Create(mgr->default_container(), "my_gpu_var", resource);
ASSERT_TRUE(status.ok()) << status.message();
Tensor x = GetResourceHandle("my_gpu_var", mgr->default_container(),
"/job:a/replica:0/task:0/device:GPU:0");
Tensor y;
FunctionLibraryRuntime::InstantiateOptions inst_opts =
MakeOptions("CPU:0", {"GPU:0"}, {"CPU:0"});
FunctionLibraryRuntime::Handle handle;
TF_CHECK_OK(Instantiate("RandomUniformWrapper", {{"T", DT_INT32}}, inst_opts,
&handle));
for (auto expected : {6, 4}) {
TF_CHECK_OK(RunInstantiated(handle, {}, {x}, {&y}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int>({expected}));
}
FunctionLibraryRuntime::Handle other_handle;
TF_CHECK_OK(Instantiate("RandomUniformWrapper", {{"T", DT_INT32}}, inst_opts,
&other_handle));
EXPECT_EQ(handle, other_handle);
for (auto expected : {0, 1}) {
TF_CHECK_OK(RunInstantiated(other_handle, {}, {x}, {&y}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int>({expected}));
}
inst_opts.state_handle = "handle_1";
TF_CHECK_OK(Instantiate("RandomUniformWrapper", {{"T", DT_INT32}}, inst_opts,
&other_handle));
EXPECT_NE(handle, other_handle);
for (auto expected : {6, 4, 0, 1}) {
TF_CHECK_OK(RunInstantiated(other_handle, {}, {x}, {&y}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int>({expected}));
}
inst_opts.state_handle = "handle_2";
TF_CHECK_OK(Instantiate("RandomUniformWrapper", {{"T", DT_INT32}}, inst_opts,
&other_handle));
EXPECT_NE(handle, other_handle);
for (auto expected : {6, 4, 0, 1}) {
TF_CHECK_OK(RunInstantiated(other_handle, {}, {x}, {&y}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int>({expected}));
}
inst_opts.state_handle = "handle_3";
for (int i = 0; i < 2; ++i) {
TF_CHECK_OK(Instantiate("RandomUniformWrapper", {{"T", DT_INT32}},
inst_opts, &other_handle));
EXPECT_NE(handle, other_handle);
for (auto expected : {6, 4, 0, 1}) {
TF_CHECK_OK(RunInstantiated(other_handle, {}, {x}, {&y}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int>({expected}));
}
TF_CHECK_OK(proc_flr_->ReleaseHandle(other_handle));
}
}
REGISTER_OP("SessionMetadataReader")
.Input("x: int64")
.Output("y: string")
.SetIsStateful()
.Doc(R"doc(SessionMetadataReader returns the session metadata.
x: int64
y: string
)doc");
class SessionMetadataReaderOp : public OpKernel {
public:
explicit SessionMetadataReaderOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
Tensor* out_tensor = nullptr;
OP_REQUIRES_OK(ctx,
ctx->allocate_output("y", TensorShape({}), &out_tensor));
if (ctx->session_metadata() != nullptr) {
out_tensor->scalar<tstring>()() =
tsl::LegacyUnredactedDebugString(*ctx->session_metadata());
} else {
out_tensor->scalar<tstring>()() = "";
}
}
};
REGISTER_KERNEL_BUILDER(Name("SessionMetadataReader").Device(DEVICE_CPU),
SessionMetadataReaderOp);
FunctionDef SessionMetadataReaderOpFn() {
return FunctionDefHelper::Define(
"SessionMetadataReaderFn",
{"x: int64"},
{"y: string"},
{},
{{{"y"}, "SessionMetadataReader", {"x"}, {}}});
}
TEST_F(ProcessFunctionLibraryRuntimeTest, SessionMetadataAbsent) {
Init({SessionMetadataReaderOpFn()}, nullptr);
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/cpu:0";
const auto x = test::AsTensor<int64_t>({17});
Tensor y;
TF_CHECK_OK(
Run("SessionMetadataReaderFn", opts, {}, instantiate_opts, {x}, {&y}));
EXPECT_EQ("", y.scalar<tstring>()());
}
TEST_F(ProcessFunctionLibraryRuntimeTest, SessionMetadataPresent) {
const SessionMetadata session_metadata = GenerateSessionMetadata();
Init({SessionMetadataReaderOpFn()}, &session_metadata);
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/cpu:0";
const auto x = test::AsTensor<int64_t>({17});
Tensor y;
TF_CHECK_OK(
Run("SessionMetadataReaderFn", opts, {}, instantiate_opts, {x}, {&y}));
SessionMetadata read_metadata;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(y.scalar<tstring>()(),
&read_metadata));
EXPECT_EQ(session_metadata.name(), read_metadata.name());
EXPECT_EQ(session_metadata.version(), read_metadata.version());
}
TEST_F(ProcessFunctionLibraryRuntimeTest, CompositeDevicesAfterCloning) {
Init({AddVarAcrossDevices()});
Status s;
std::unique_ptr<CompositeDevice> composite_device =
CompositeDevice::MakeDevice({device0_->name(), device1_->name()},
0,
device_mgr_->HostCPU()->parsed_name(), &s);
TF_ASSERT_OK(s);
AddCompositeDevice(composite_device.get());
auto* flr = proc_flr_->GetFLR("/job:a/replica:0/task:0/cpu:0");
ASSERT_NE(nullptr, flr);
std::unique_ptr<FunctionLibraryDefinition> cloned_lib_def;
std::unique_ptr<ProcessFunctionLibraryRuntime> cloned_proc_flr;
FunctionLibraryRuntime* cloned_flr;
TF_ASSERT_OK(flr->Clone(&cloned_lib_def, &cloned_proc_flr, &cloned_flr));
EXPECT_EQ(
cloned_proc_flr->device_set()->FindDeviceByName(composite_device->name()),
composite_device.get());
}
TEST_F(ProcessFunctionLibraryRuntimeTest, SessionMetadataPresentAfterCloning) {
const SessionMetadata session_metadata = GenerateSessionMetadata();
Init({SessionMetadataReaderOpFn()}, &session_metadata);
auto* flr = proc_flr_->GetFLR("/job:a/replica:0/task:0/cpu:0");
ASSERT_NE(nullptr, flr);
std::unique_ptr<FunctionLibraryDefinition> cloned_lib_def;
std::unique_ptr<ProcessFunctionLibraryRuntime> cloned_proc_flr;
FunctionLibraryRuntime* cloned_flr;
TF_ASSERT_OK(flr->Clone(&cloned_lib_def, &cloned_proc_flr, &cloned_flr));
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/cpu:0";
const auto x = test::AsTensor<int64_t>({17});
Tensor y;
Status s = RunWithRuntime<std::vector<Tensor>, Tensor>(
"SessionMetadataReaderFn", opts, {}, instantiate_opts, {x}, {&y},
cloned_proc_flr.get());
TF_CHECK_OK(s);
SessionMetadata read_metadata;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(y.scalar<tstring>()(),
&read_metadata));
EXPECT_EQ(session_metadata.name(), read_metadata.name());
EXPECT_EQ(session_metadata.version(), read_metadata.version());
}
TEST_F(ProcessFunctionLibraryRuntimeTest, SimpleGraphAllowsSync) {
auto async_safe =
metrics::TestDelta("subgraph_async_summary", "safe_for_sync");
FunctionLibraryRuntime::InstantiateOptions opts =
MakeOptions("CPU:0", {}, {});
opts.allow_small_function_optimizations = true;
TestInstantiateSimpleFunction(this, opts);
EXPECT_GT(async_safe.Get(), 0);
}
TEST_F(ProcessFunctionLibraryRuntimeTest, UnsafeOpRequiresAsync) {
auto async_safe =
metrics::TestDelta("subgraph_async_summary", "safe_for_sync");
auto async_unsafe_op =
metrics::TestDelta("subgraph_async_summary", "unsafe_op");
FunctionLibraryRuntime::InstantiateOptions opts =
MakeOptions("CPU:0", {"CPU:0"}, {"CPU:0"});
opts.allow_small_function_optimizations = true;
TestControlFlow(this, opts);
EXPECT_EQ(async_safe.Get(), 0);
EXPECT_GT(async_unsafe_op.Get(), 0);
}
TEST_F(ProcessFunctionLibraryRuntimeTest, PartitionedGraphRequiresAsync) {
if (gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
auto async_send_only =
metrics::TestDelta("subgraph_async_summary", "send_only");
auto async_recv_only =
metrics::TestDelta("subgraph_async_summary", "recv_only");
FunctionLibraryRuntime::InstantiateOptions opts =
MakeOptions("CPU:0", {"CPU:0"}, {"CPU:0", "GPU:0"});
opts.allow_small_function_optimizations = true;
TestTwoDeviceMult(this, opts);
EXPECT_GT(async_send_only.Get(), 0);
EXPECT_GT(async_recv_only.Get(), 0);
}
TEST_F(ProcessFunctionLibraryRuntimeTest, RecordAotSavingTimeAndHitCount) {
FunctionLibraryRuntime::InstantiateOptions opts =
MakeOptions("CPU:0", {}, {});
opts.allow_small_function_optimizations = true;
FunctionLibraryRuntime::Handle h;
OptimizedFunctionGraph optimized_graph_proto;
optimized_graph_proto.set_name("FindDevice");
optimized_graph_proto.set_optimization_time_usecs(10);
Init({test::function::FindDevice()}, nullptr,
{optimized_graph_proto});
Instantiate("FindDevice",
{{"_target", "/job:b/replica:0/task:0/device:CPU:0"}}, opts, &h)
.IgnoreError();
EXPECT_EQ(metrics::GetFunctionGraphOptimizationSavingTimeUsecs(
metrics::GraphOptimizationSource::kAot),
10);
EXPECT_EQ(metrics::GetFunctionGraphOptimizationCacheHitCount(
metrics::GraphOptimizationSource::kAot),
1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/process_function_library_runtime.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/process_function_library_runtime_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8a3f8588-637c-45e9-a35c-80b3a7189e17 | cpp | tensorflow/tensorflow | cost_recorder | tensorflow/core/tfrt/fallback/cost_recorder.cc | tensorflow/core/tfrt/fallback/cost_recorder_test.cc | #include "tensorflow/core/tfrt/fallback/cost_recorder.h"
#include <limits>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tfrt/fallback/op_cost_map.pb.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace tfrt_stub {
void CostRecorder::RecordCost(int64_t op_key, uint64_t execution_time) {
mutex_lock l(op_cost_map_mutex_);
op_cost_map_[op_key].first += execution_time;
op_cost_map_[op_key].second += 1;
}
uint64_t CostRecorder::GetCost(int64_t op_key) const {
tf_shared_lock l(op_cost_map_mutex_);
const auto iter = op_cost_map_.find(op_key);
if (iter == op_cost_map_.end()) return std::numeric_limits<uint32_t>::max();
const auto total_cost = iter->second.first;
const auto num_ops = iter->second.second;
auto r =
std::max(static_cast<uint64_t>(1),
static_cast<uint64_t>(total_cost / num_ops));
VLOG(2) << "Get cost for op_key=" << op_key << ", cost=" << r;
return r;
}
Status CostRecorder::WriteToFile() const {
OpCostMapProto op_cost_map_proto;
{
tf_shared_lock l(op_cost_map_mutex_);
for (const auto& [op_key, op_cost] : op_cost_map_) {
const uint64_t avg_op_cost = op_cost.first / op_cost.second;
(*op_cost_map_proto.mutable_op_cost_map())[op_key] = avg_op_cost;
}
}
std::string measured_cost_path;
TF_RETURN_IF_ERROR(ReadStringFromEnvVar(MesuredCostPathEnvVarName(), "",
&measured_cost_path));
return tensorflow::WriteTextProto(tensorflow::Env::Default(),
measured_cost_path, op_cost_map_proto);
}
size_t CostRecorder::size() const {
tf_shared_lock l(op_cost_map_mutex_);
return op_cost_map_.size();
}
}
} | #include "tensorflow/core/tfrt/fallback/cost_recorder.h"
#include <limits>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/tfrt/fallback/op_cost_map.pb.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
constexpr int64_t kTestOpKey = 1;
constexpr uint64_t kTestCost = 1234;
constexpr uint64_t kTestAvgCost = 1851;
TEST(CostRecorderTest, RecordCostTest) {
CostRecorder recorder;
recorder.RecordCost(kTestOpKey, kTestCost);
recorder.RecordCost(kTestOpKey, kTestCost);
EXPECT_EQ(recorder.size(), 1);
}
TEST(CostRecorderTest, GetCostTest) {
CostRecorder recorder;
recorder.RecordCost(kTestOpKey, kTestCost);
recorder.RecordCost(kTestOpKey, 2 * kTestCost);
EXPECT_EQ(recorder.size(), 1);
EXPECT_EQ(recorder.GetCost(kTestOpKey), kTestAvgCost);
}
TEST(CostRecorderTest, GetCostDefaultValueTest) {
CostRecorder recorder;
ASSERT_EQ(recorder.size(), 0);
EXPECT_EQ(recorder.GetCost(kTestOpKey),
std::numeric_limits<uint32_t>::max());
}
TEST(CostRecorderTest, WriteToFileTest) {
CostRecorder recorder;
ASSERT_EQ(recorder.size(), 0);
std::string measured_cost_path;
tensorflow::Env::Default()->LocalTempFilename(&measured_cost_path);
ASSERT_EQ(setenv("TF_TFRT_MEASURED_COST_PATH", measured_cost_path.c_str(), 1),
0);
TF_CHECK_OK(recorder.WriteToFile());
OpCostMapProto op_cost_map_proto;
TF_CHECK_OK(tensorflow::ReadTextProto(
tensorflow::Env::Default(), measured_cost_path, &op_cost_map_proto));
EXPECT_EQ(op_cost_map_proto.op_cost_map_size(), 0);
}
TEST(CostRecorderTest, ProtoRecordsTest) {
CostRecorder recorder;
recorder.RecordCost(kTestOpKey, kTestCost);
recorder.RecordCost(kTestOpKey, 2 * kTestCost);
ASSERT_EQ(recorder.size(), 1);
std::string measured_cost_path;
tensorflow::Env::Default()->LocalTempFilename(&measured_cost_path);
ASSERT_EQ(setenv(CostRecorder::MesuredCostPathEnvVarName(),
measured_cost_path.c_str(), 1),
0);
TF_CHECK_OK(recorder.WriteToFile());
OpCostMapProto op_cost_map_proto;
TF_CHECK_OK(tensorflow::ReadTextProto(
tensorflow::Env::Default(), measured_cost_path, &op_cost_map_proto));
EXPECT_EQ(op_cost_map_proto.op_cost_map().find(kTestOpKey)->second,
kTestAvgCost);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/fallback/cost_recorder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/fallback/cost_recorder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
504bdc2c-48c0-44c6-9489-d2383b5d9d67 | cpp | tensorflow/tensorflow | cuda_platform | third_party/xla/xla/stream_executor/cuda/cuda_platform.cc | third_party/xla/xla/stream_executor/cuda/cuda_platform_test.cc | #include "xla/stream_executor/cuda/cuda_platform.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/stream_executor/cuda/cuda_executor.h"
#include "xla/stream_executor/cuda/cuda_platform_id.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform/initialize.h"
#include "xla/stream_executor/platform_manager.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace stream_executor {
namespace gpu {
CudaPlatform::CudaPlatform() : name_("CUDA") {}
Platform::Id CudaPlatform::id() const { return cuda::kCudaPlatformId; }
int CudaPlatform::VisibleDeviceCount() const {
static const int num_devices = [] {
if (!GpuDriver::Init().ok()) return -1;
return GpuDriver::GetDeviceCount();
}();
return num_devices;
}
const std::string& CudaPlatform::Name() const { return name_; }
absl::StatusOr<std::unique_ptr<DeviceDescription>>
CudaPlatform::DescriptionForDevice(int ordinal) const {
return CudaExecutor::CreateDeviceDescription(ordinal);
}
absl::StatusOr<StreamExecutor*> CudaPlatform::ExecutorForDevice(int ordinal) {
return executor_cache_.GetOrCreate(
ordinal, [this, ordinal]() { return GetUncachedExecutor(ordinal); });
}
absl::StatusOr<StreamExecutor*> CudaPlatform::FindExisting(int ordinal) {
return executor_cache_.Get(ordinal);
}
absl::StatusOr<std::unique_ptr<StreamExecutor>>
CudaPlatform::GetUncachedExecutor(int ordinal) {
auto executor = std::make_unique<CudaExecutor>(this, ordinal);
TF_RETURN_IF_ERROR(executor->Init());
return std::move(executor);
}
}
static void InitializeCudaPlatform() {
TF_CHECK_OK(
PlatformManager::RegisterPlatform(std::make_unique<gpu::CudaPlatform>()));
}
}
STREAM_EXECUTOR_REGISTER_MODULE_INITIALIZER(
cuda_platform, stream_executor::InitializeCudaPlatform()); | #include "xla/stream_executor/cuda/cuda_platform.h"
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor::gpu {
namespace {
TEST(CudaPlatformTest, FindExistingWorks) {
TF_ASSERT_OK_AND_ASSIGN(Platform * platform,
PlatformManager::PlatformWithName("CUDA"));
CHECK_GT(platform->VisibleDeviceCount(), 0);
for (int i = 0; i < platform->VisibleDeviceCount(); ++i) {
EXPECT_FALSE(platform->FindExisting(i).ok());
}
absl::flat_hash_map<int, StreamExecutor*> executors;
for (int i = 0; i < platform->VisibleDeviceCount(); ++i) {
TF_ASSERT_OK_AND_ASSIGN(auto executor, platform->ExecutorForDevice(i));
executors[i] = executor;
}
EXPECT_EQ(executors.size(), platform->VisibleDeviceCount());
for (int i = 0; i < platform->VisibleDeviceCount(); ++i) {
TF_ASSERT_OK_AND_ASSIGN(auto executor, platform->FindExisting(i));
EXPECT_EQ(executor, executors[i]);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_platform.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_platform_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5b87bef5-aef6-4c86-af67-6588098ca436 | cpp | tensorflow/tensorflow | overflow | tensorflow/core/util/overflow.h | tensorflow/core/util/overflow_test.cc | #ifndef TENSORFLOW_CORE_UTIL_OVERFLOW_H_
#define TENSORFLOW_CORE_UTIL_OVERFLOW_H_
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
inline int64_t MultiplyWithoutOverflow(int64_t x, int64_t y) {
if (TF_PREDICT_FALSE(x < 0)) return -1;
if (TF_PREDICT_FALSE(y < 0)) return -1;
if (TF_PREDICT_FALSE(x == 0)) return 0;
const uint64 ux = x;
const uint64 uy = y;
const uint64 uxy = ux * uy;
if (TF_PREDICT_FALSE((ux | uy) >> 32 != 0)) {
if (uxy / ux != uy) return -1;
}
return static_cast<int64_t>(uxy);
}
inline int64_t AddWithoutOverflow(int64_t x, int64_t y) {
if (TF_PREDICT_FALSE((x < 0)) || (y < 0)) return -1;
const uint64 ux = x;
const uint64 uy = y;
const uint64 uxy = ux + uy;
return static_cast<int64_t>(uxy);
}
}
#endif | #include "tensorflow/core/util/overflow.h"
#include <cmath>
#include <limits>
#include <vector>
#ifdef PLATFORM_WINDOWS
#include <Windows.h>
#endif
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
bool HasMultiplyOverflow(int64_t x, int64_t y) {
#ifdef PLATFORM_WINDOWS
return ::MultiplyHigh(x, y) != 0;
#else
long double dxy = static_cast<long double>(x) * static_cast<long double>(y);
return dxy > std::numeric_limits<int64_t>::max();
#endif
}
bool HasAddOverflow(int64_t x, int64_t y) {
int64_t carry_from_lower_bits = ((x & 0xffffffff) + (y & 0xffffffff)) >> 32;
if ((x >> 32) + (y >> 32) + carry_from_lower_bits >=
(static_cast<int64_t>(1) << 31)) {
return true;
}
return false;
}
TEST(OverflowTest, Nonnegative) {
std::vector<int64_t> interesting = {
0,
std::numeric_limits<int64_t>::max(),
};
for (int i = 0; i < 63; i++) {
int64_t bit = static_cast<int64_t>(1) << i;
interesting.push_back(bit);
interesting.push_back(bit + 1);
interesting.push_back(bit - 1);
}
for (const int64_t mid : {static_cast<int64_t>(1) << 32,
static_cast<int64_t>(std::pow(2, 63.0 / 2))}) {
for (int i = -5; i < 5; i++) {
interesting.push_back(mid + i);
}
}
for (int64_t x : interesting) {
for (int64_t y : interesting) {
int64_t xmy = MultiplyWithoutOverflow(x, y);
if (HasMultiplyOverflow(x, y)) {
EXPECT_LT(xmy, 0) << x << " " << y;
} else {
EXPECT_EQ(x * y, xmy) << x << " " << y;
}
int64_t xpy = AddWithoutOverflow(x, y);
if (HasAddOverflow(x, y)) {
EXPECT_LT(xpy, 0) << x << " " << y;
} else {
EXPECT_EQ(x + y, xpy) << x << " " << y;
}
}
}
}
TEST(OverflowTest, Negative) {
const int64_t negatives[] = {-1, std::numeric_limits<int64_t>::min()};
for (const int64_t n : negatives) {
EXPECT_LT(MultiplyWithoutOverflow(n, 0), 0) << n;
EXPECT_LT(MultiplyWithoutOverflow(0, n), 0) << n;
EXPECT_LT(MultiplyWithoutOverflow(n, n), 0) << n;
EXPECT_LT(AddWithoutOverflow(n, 0), 0) << n;
EXPECT_LT(AddWithoutOverflow(0, n), 0) << n;
EXPECT_LT(AddWithoutOverflow(n, n), 0) << n;
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/overflow.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/overflow_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
11861505-cf46-458e-b235-534742eff9ac | cpp | tensorflow/tensorflow | schedule_aware_collective_ops_cse | third_party/xla/xla/service/spmd/schedule_aware_collective_ops_cse.cc | third_party/xla/xla/service/spmd/schedule_aware_collective_ops_cse_test.cc | #include "xla/service/spmd/schedule_aware_collective_ops_cse.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsAddingOnlyDegenerateDimensions(const HloInstruction* inst) {
if (inst->opcode() != HloOpcode::kBitcast &&
inst->opcode() != HloOpcode::kReshape) {
return false;
}
const Shape& in_shape = inst->operand(0)->shape();
const Shape& out_shape = inst->shape();
return ShapeUtil::ElementsIn(in_shape) == ShapeUtil::ElementsIn(out_shape) &&
ShapeUtil::DimensionsUnmodifiedByReshape(in_shape, out_shape).size() ==
in_shape.rank();
}
const HloInstruction* PassthroughDegenerateAddingReshapes(
const HloInstruction* inst) {
while (IsAddingOnlyDegenerateDimensions(inst)) {
inst = inst->operand(0);
}
return inst;
}
bool ShouldConsiderSchedule(HloInstruction* hlo) {
return hlo->opcode() != HloOpcode::kCollectivePermute;
}
HloInstruction* MayConsiderCollective(HloInstruction* hlo, bool for_replicas) {
auto chan_instr = DynCast<HloChannelInstruction>(hlo);
if (!chan_instr) {
return nullptr;
}
if (for_replicas == chan_instr->channel_id().has_value()) {
return nullptr;
}
if (hlo->opcode() == HloOpcode::kCollectivePermute) {
return hlo;
}
auto coll = DynCast<HloCollectiveInstruction>(hlo);
if (!coll) {
return nullptr;
}
if (coll->constrain_layout()) {
return nullptr;
}
if (coll->opcode() == HloOpcode::kAllGather) {
return coll;
}
if (coll->opcode() == HloOpcode::kAllReduce && coll->shape().IsArray()) {
auto operand = coll->operand(0);
return operand->opcode() == HloOpcode::kDynamicUpdateSlice &&
operand->operand(0)->opcode() == HloOpcode::kBroadcast
? coll
: nullptr;
}
return nullptr;
}
absl::StatusOr<bool> RunOnComputation(HloComputation* comp, bool for_replicas,
int64_t distance_threshold) {
bool changed = false;
absl::flat_hash_map<const HloInstruction*, int64_t> height;
auto ordered_hlos = comp->MakeInstructionPostOrder();
int64_t max_height = 0;
for (auto it = ordered_hlos.rbegin(); it != ordered_hlos.rend(); ++it) {
auto hlo = *it;
int64_t h = 0;
for (auto user : hlo->users()) {
h = std::max(h, height[user]) + 1;
}
max_height = std::max(max_height, h);
height[hlo] = h;
}
auto lowest_user_height = [&](const HloInstruction* hlo) {
int64_t lowest = height[hlo];
for (auto user : hlo->users()) {
lowest = std::min(lowest, height[user]);
}
return lowest;
};
absl::flat_hash_map<const HloInstruction*, std::vector<HloInstruction*>>
operand_to_collective;
for (HloInstruction* hlo : ordered_hlos) {
HloInstruction* coll = MayConsiderCollective(hlo, for_replicas);
if (!coll) {
continue;
}
auto& earlier_colls =
operand_to_collective[PassthroughDegenerateAddingReshapes(
coll->operand(0))];
bool found = false;
int64_t coll_height = height[coll];
for (HloInstruction* earlier_coll : earlier_colls) {
if (!ShapeUtil::Equal(earlier_coll->shape(), coll->shape())) {
continue;
}
HloInstruction* coll_operand = coll->mutable_operand(0);
TF_RETURN_IF_ERROR(
coll->ReplaceOperandWith(0, earlier_coll->mutable_operand(0)));
if (!earlier_coll->IdenticalIgnoringChannelIdValues(*coll)) {
TF_RETURN_IF_ERROR(coll->ReplaceOperandWith(0, coll_operand));
continue;
}
found = true;
if (ShouldConsiderSchedule(coll) &&
lowest_user_height(earlier_coll) > coll_height + distance_threshold) {
TF_RETURN_IF_ERROR(coll->ReplaceOperandWith(0, coll_operand));
earlier_coll = coll;
continue;
}
changed = true;
VLOG(1) << "Replacing " << coll->ToString() << " with "
<< earlier_coll->ToString();
TF_RETURN_IF_ERROR(coll->ReplaceAllUsesWith(earlier_coll));
break;
}
if (!found) {
earlier_colls.push_back(coll);
}
}
return changed;
}
}
absl::StatusOr<bool> ScheduleAwareCollectiveOpsCSE::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto comp : module->computations(execution_threads)) {
TF_ASSIGN_OR_RETURN(
auto comp_changed,
RunOnComputation(comp, for_replicas_, distance_threshold_));
changed |= comp_changed;
}
return changed;
}
} | #include "xla/service/spmd/schedule_aware_collective_ops_cse.h"
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
class CollectiveOpsCseTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, int64_t distance_threshold = 100) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(
hlo_module, GetModuleConfigForTest()));
HloPassPipeline pipeline("all-gather-cse");
pipeline.AddPass<ScheduleAwareCollectiveOpsCSE>(distance_threshold,
false);
TF_RETURN_IF_ERROR(pipeline.Run(module.get()).status());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
};
TEST_F(CollectiveOpsCseTest, SimpleCseAllGather) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[1,8]{1,0} parameter(0)
ag1 = s32[2,8]{1,0} all-gather(param0), replica_groups={{0,1}}, dimensions={0},
channel_id=0, use_global_device_ids=true
ag2 = s32[2,8]{1,0} all-gather(param0), replica_groups={{0,1}}, dimensions={0},
channel_id=1, use_global_device_ids=true
ROOT tuple = (s32[2,8]{1,0}, s32[2,8]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_EQ(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, SimpleCseCollectivePermute) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[2,8]{1,0} parameter(0)
cp1 = s32[2,8]{1,0} collective-permute(param0), source_target_pairs={{0,1},{1,0}},
channel_id=0
cp2 = s32[2,8]{1,0} collective-permute(param0), source_target_pairs={{0,1},{1,0}},
channel_id=1
ROOT tuple = (s32[2,8]{1,0}, s32[2,8]{1,0}) tuple(cp1, cp2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_EQ(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, SimpleCseReshapeLookthroughAllGather) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
rshp = s32[1,8]{1,0} reshape(param0)
rshp2 = s32[1,8]{1,0} reshape(param0)
ag1 = s32[2,8]{1,0} all-gather(rshp), replica_groups={{0,1}}, dimensions={0},
channel_id=0, use_global_device_ids=true
ag2 = s32[2,8]{1,0} all-gather(rshp2), replica_groups={{0,1}}, dimensions={0},
channel_id=1, use_global_device_ids=true
ROOT tuple = (s32[2,8]{1,0}, s32[2,8]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_EQ(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, SimpleCseReshapeLookthroughCollectivePermute) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
rshp = s32[1,8]{1,0} reshape(param0)
rshp2 = s32[1,8]{1,0} reshape(param0)
cp1 = s32[1,8]{1,0} collective-permute(rshp), source_target_pairs={{0,1},{1,0}},
channel_id=0
cp2 = s32[1,8]{1,0} collective-permute(rshp2), source_target_pairs={{0,1},{1,0}},
channel_id=1
ROOT tuple = (s32[1,8]{1,0}, s32[1,8]{1,0}) tuple(cp1, cp2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_EQ(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, SimpleNoCseInvalidReshapes) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
rshp = s32[2,4]{1,0} reshape(param0)
rshp2 = s32[2,4]{1,0} reshape(param0)
ag1 = s32[4,4]{1,0} all-gather(rshp), replica_groups={{0,1}}, dimensions={0},
channel_id=0, use_global_device_ids=true
ag2 = s32[4,4]{1,0} all-gather(rshp2), replica_groups={{0,1}}, dimensions={0},
channel_id=1, use_global_device_ids=true
ROOT tuple = (s32[4,4]{1,0}, s32[4,4]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_NE(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, SimpleCseDifferentDim) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[1,8]{1,0} parameter(0)
ag1 = s32[1,16]{1,0} all-gather(param0), replica_groups={{0,1}}, dimensions={1},
channel_id=0, use_global_device_ids=true
ag2 = s32[1,16]{1,0} all-gather(param0), replica_groups={{0,1}},
dimensions={1}, channel_id=1, use_global_device_ids=true
ROOT tuple = (s32[1,16]{1,0}, s32[1,16]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_EQ(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, SimpleCseDifferentDimReshapeLookthrough) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
rshp = s32[1,8]{1,0} reshape(param0)
rshp2 = s32[1,8]{1,0} reshape(param0)
ag1 = s32[1,16]{1,0} all-gather(rshp), replica_groups={{0,1}}, dimensions={1},
channel_id=0, use_global_device_ids=true
ag2 = s32[1,16]{1,0} all-gather(rshp2), replica_groups={{0,1}},
dimensions={1}, channel_id=1, use_global_device_ids=true
ROOT tuple = (s32[1,16]{1,0}, s32[1,16]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_EQ(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, NoCseGlobalDevice) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[1,8]{1,0} parameter(0)
ag1 = s32[2,8]{1,0} all-gather(param0), replica_groups={{0,1}}, dimensions={0},
channel_id=0, use_global_device_ids=true
ag2 = s32[2,8]{1,0} all-gather(param0), replica_groups={{0},{1}}, dimensions={0},
channel_id=1, use_global_device_ids=false
ROOT tuple = (s32[2,8]{1,0}, s32[2,8]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_NE(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, NoCseChannelIdMismatch) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[1,8]{1,0} parameter(0)
ag1 = s32[1,16]{1,0} all-gather(param0), replica_groups={{0,1}}, dimensions={1},
channel_id=0
ag2 = s32[1,16]{1,0} all-gather(param0), replica_groups={{0,1}},
dimensions={1}
ROOT tuple = (s32[1,16]{1,0}, s32[1,16]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_NE(tuple->operand(0), tuple->operand(1));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/schedule_aware_collective_ops_cse.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/schedule_aware_collective_ops_cse_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
98be112c-907d-41a9-8977-35bd21348749 | cpp | tensorflow/tensorflow | local_response_norm | tensorflow/lite/kernels/local_response_norm.cc | tensorflow/lite/kernels/local_response_norm_test.cc | #include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace local_response_norm {
enum KernelType {
kReference,
kGenericOptimized,
};
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TfLiteIntArray* output_size = TfLiteIntArrayCreate(4);
output_size->data[0] = input->dims->data[0];
output_size->data[1] = input->dims->data[1];
output_size->data[2] = input->dims->data[2];
output_size->data[3] = input->dims->data[3];
return context->ResizeTensor(context, output, output_size);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteLocalResponseNormParams*>(node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (output->type == kTfLiteFloat32) {
#define TF_LITE_LOCAL_RESPONSE_NORM(type) \
tflite::LocalResponseNormalizationParams op_params; \
op_params.range = params->radius; \
op_params.bias = params->bias; \
op_params.alpha = params->alpha; \
op_params.beta = params->beta; \
type::LocalResponseNormalization( \
op_params, GetTensorShape(input), GetTensorData<float>(input), \
GetTensorShape(output), GetTensorData<float>(output))
if (kernel_type == kReference) {
TF_LITE_LOCAL_RESPONSE_NORM(reference_ops);
}
if (kernel_type == kGenericOptimized) {
TF_LITE_LOCAL_RESPONSE_NORM(optimized_ops);
}
#undef TF_LITE_LOCAL_RESPONSE_NORM
} else {
TF_LITE_KERNEL_LOG(context, "Output type is %d, requires float.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_LOCAL_RESPONSE_NORM_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, local_response_norm::Prepare,
local_response_norm::Eval<local_response_norm::kReference>};
return &r;
}
TfLiteRegistration* Register_LOCAL_RESPONSE_NORM_GENERIC_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, local_response_norm::Prepare,
local_response_norm::Eval<local_response_norm::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_LOCAL_RESPONSE_NORMALIZATION() {
return Register_LOCAL_RESPONSE_NORM_GENERIC_OPT();
}
}
}
} | #include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class LocalResponseNormOpModel : public SingleOpModel {
public:
LocalResponseNormOpModel(std::initializer_list<int> input_shape, int radius,
float bias, float alpha, float beta) {
input_ = AddInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
BuiltinOptions_LocalResponseNormalizationOptions,
CreateLocalResponseNormalizationOptions(builder_, radius, bias,
alpha, beta)
.Union());
BuildInterpreter({input_shape});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
private:
int input_;
int output_;
};
TEST(LocalResponseNormOpTest, SameAsL2Norm) {
LocalResponseNormOpModel m({1, 1, 1, 6}, 20, 0.0,
1.0, 0.5);
m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear({-0.55, 0.3, 0.35, 0.6, -0.35, 0.05})));
}
TEST(LocalResponseNormOpTest, WithAlpha) {
LocalResponseNormOpModel m({1, 1, 1, 6}, 20, 0.0,
4.0, 0.5);
m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{-0.275, 0.15, 0.175, 0.3, -0.175, 0.025})));
}
TEST(LocalResponseNormOpTest, WithBias) {
LocalResponseNormOpModel m({1, 1, 1, 6}, 20, 9.0,
4.0, 0.5);
m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear({-0.22, 0.12, 0.14, 0.24, -0.14, 0.02})));
}
TEST(LocalResponseNormOpTest, SmallRadius) {
LocalResponseNormOpModel m({1, 1, 1, 6}, 2, 9.0,
4.0, 0.5);
m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-0.264926, 0.125109, 0.140112, 0.267261, -0.161788, 0.0244266})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/local_response_norm.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/local_response_norm_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8163b5c8-cb29-47ef-96a7-84b47fec4f55 | cpp | google/cel-cpp | strings | extensions/strings.cc | extensions/strings_test.cc | #include "extensions/strings.h"
#include <cstddef>
#include <cstdint>
#include <limits>
#include <string>
#include <tuple>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "common/casting.h"
#include "common/type.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "eval/public/cel_function_registry.h"
#include "eval/public/cel_options.h"
#include "internal/status_macros.h"
#include "internal/utf8.h"
#include "runtime/function_adapter.h"
#include "runtime/function_registry.h"
#include "runtime/internal/errors.h"
#include "runtime/runtime_options.h"
namespace cel::extensions {
namespace {
struct AppendToStringVisitor {
std::string& append_to;
void operator()(absl::string_view string) const { append_to.append(string); }
void operator()(const absl::Cord& cord) const {
append_to.append(static_cast<std::string>(cord));
}
};
absl::StatusOr<Value> Join2(ValueManager& value_manager, const ListValue& value,
const StringValue& separator) {
std::string result;
CEL_ASSIGN_OR_RETURN(auto iterator, value.NewIterator(value_manager));
Value element;
if (iterator->HasNext()) {
CEL_RETURN_IF_ERROR(iterator->Next(value_manager, element));
if (auto string_element = As<StringValue>(element); string_element) {
string_element->NativeValue(AppendToStringVisitor{result});
} else {
return ErrorValue{
runtime_internal::CreateNoMatchingOverloadError("join")};
}
}
std::string separator_scratch;
absl::string_view separator_view = separator.NativeString(separator_scratch);
while (iterator->HasNext()) {
result.append(separator_view);
CEL_RETURN_IF_ERROR(iterator->Next(value_manager, element));
if (auto string_element = As<StringValue>(element); string_element) {
string_element->NativeValue(AppendToStringVisitor{result});
} else {
return ErrorValue{
runtime_internal::CreateNoMatchingOverloadError("join")};
}
}
result.shrink_to_fit();
return value_manager.CreateUncheckedStringValue(std::move(result));
}
absl::StatusOr<Value> Join1(ValueManager& value_manager,
const ListValue& value) {
return Join2(value_manager, value, StringValue{});
}
struct SplitWithEmptyDelimiter {
ValueManager& value_manager;
int64_t& limit;
ListValueBuilder& builder;
absl::StatusOr<Value> operator()(absl::string_view string) const {
char32_t rune;
size_t count;
std::string buffer;
buffer.reserve(4);
while (!string.empty() && limit > 1) {
std::tie(rune, count) = internal::Utf8Decode(string);
buffer.clear();
internal::Utf8Encode(buffer, rune);
CEL_RETURN_IF_ERROR(builder.Add(
value_manager.CreateUncheckedStringValue(absl::string_view(buffer))));
--limit;
string.remove_prefix(count);
}
if (!string.empty()) {
CEL_RETURN_IF_ERROR(
builder.Add(value_manager.CreateUncheckedStringValue(string)));
}
return std::move(builder).Build();
}
absl::StatusOr<Value> operator()(const absl::Cord& string) const {
auto begin = string.char_begin();
auto end = string.char_end();
char32_t rune;
size_t count;
std::string buffer;
while (begin != end && limit > 1) {
std::tie(rune, count) = internal::Utf8Decode(begin);
buffer.clear();
internal::Utf8Encode(buffer, rune);
CEL_RETURN_IF_ERROR(builder.Add(
value_manager.CreateUncheckedStringValue(absl::string_view(buffer))));
--limit;
absl::Cord::Advance(&begin, count);
}
if (begin != end) {
buffer.clear();
while (begin != end) {
auto chunk = absl::Cord::ChunkRemaining(begin);
buffer.append(chunk);
absl::Cord::Advance(&begin, chunk.size());
}
buffer.shrink_to_fit();
CEL_RETURN_IF_ERROR(builder.Add(
value_manager.CreateUncheckedStringValue(std::move(buffer))));
}
return std::move(builder).Build();
}
};
absl::StatusOr<Value> Split3(ValueManager& value_manager,
const StringValue& string,
const StringValue& delimiter, int64_t limit) {
if (limit == 0) {
return ListValue{};
}
if (limit < 0) {
limit = std::numeric_limits<int64_t>::max();
}
CEL_ASSIGN_OR_RETURN(auto builder,
value_manager.NewListValueBuilder(ListType{}));
if (string.IsEmpty()) {
builder->Reserve(1);
CEL_RETURN_IF_ERROR(builder->Add(StringValue{}));
return std::move(*builder).Build();
}
if (delimiter.IsEmpty()) {
return string.NativeValue(
SplitWithEmptyDelimiter{value_manager, limit, *builder});
}
std::string delimiter_scratch;
absl::string_view delimiter_view = delimiter.NativeString(delimiter_scratch);
std::string content_scratch;
absl::string_view content_view = string.NativeString(content_scratch);
while (limit > 1 && !content_view.empty()) {
auto pos = content_view.find(delimiter_view);
if (pos == absl::string_view::npos) {
break;
}
CEL_RETURN_IF_ERROR(builder->Add(
value_manager.CreateUncheckedStringValue(content_view.substr(0, pos))));
--limit;
content_view.remove_prefix(pos + delimiter_view.size());
if (content_view.empty()) {
CEL_RETURN_IF_ERROR(builder->Add(StringValue{}));
return std::move(*builder).Build();
}
}
CEL_RETURN_IF_ERROR(
builder->Add(value_manager.CreateUncheckedStringValue(content_view)));
return std::move(*builder).Build();
}
absl::StatusOr<Value> Split2(ValueManager& value_manager,
const StringValue& string,
const StringValue& delimiter) {
return Split3(value_manager, string, delimiter, -1);
}
absl::StatusOr<Value> LowerAscii(ValueManager& value_manager,
const StringValue& string) {
std::string content = string.NativeString();
absl::AsciiStrToLower(&content);
return value_manager.CreateUncheckedStringValue(std::move(content));
}
}
absl::Status RegisterStringsFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<absl::StatusOr<Value>, ListValue>::CreateDescriptor(
"join", true),
UnaryFunctionAdapter<absl::StatusOr<Value>, ListValue>::WrapFunction(
Join1)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<absl::StatusOr<Value>, ListValue, StringValue>::
CreateDescriptor("join", true),
BinaryFunctionAdapter<absl::StatusOr<Value>, ListValue,
StringValue>::WrapFunction(Join2)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<absl::StatusOr<Value>, StringValue, StringValue>::
CreateDescriptor("split", true),
BinaryFunctionAdapter<absl::StatusOr<Value>, StringValue,
StringValue>::WrapFunction(Split2)));
CEL_RETURN_IF_ERROR(registry.Register(
VariadicFunctionAdapter<
absl::StatusOr<Value>, StringValue, StringValue,
int64_t>::CreateDescriptor("split", true),
VariadicFunctionAdapter<absl::StatusOr<Value>, StringValue, StringValue,
int64_t>::WrapFunction(Split3)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<absl::StatusOr<Value>, StringValue>::
CreateDescriptor("lowerAscii", true),
UnaryFunctionAdapter<absl::StatusOr<Value>, StringValue>::WrapFunction(
LowerAscii)));
return absl::OkStatus();
}
absl::Status RegisterStringsFunctions(
google::api::expr::runtime::CelFunctionRegistry* registry,
const google::api::expr::runtime::InterpreterOptions& options) {
return RegisterStringsFunctions(
registry->InternalGetRegistry(),
google::api::expr::runtime::ConvertToRuntimeOptions(options));
}
} | #include "extensions/strings.h"
#include <memory>
#include <utility>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/strings/cord.h"
#include "common/memory.h"
#include "common/value.h"
#include "common/values/legacy_value_manager.h"
#include "extensions/protobuf/runtime_adapter.h"
#include "internal/testing.h"
#include "parser/options.h"
#include "parser/parser.h"
#include "runtime/activation.h"
#include "runtime/runtime.h"
#include "runtime/runtime_builder.h"
#include "runtime/runtime_options.h"
#include "runtime/standard_runtime_builder_factory.h"
namespace cel::extensions {
namespace {
using ::google::api::expr::v1alpha1::ParsedExpr;
using ::google::api::expr::parser::Parse;
using ::google::api::expr::parser::ParserOptions;
TEST(Strings, SplitWithEmptyDelimiterCord) {
MemoryManagerRef memory_manager = MemoryManagerRef::ReferenceCounting();
const auto options = RuntimeOptions{};
ASSERT_OK_AND_ASSIGN(auto builder, CreateStandardRuntimeBuilder(options));
EXPECT_OK(RegisterStringsFunctions(builder.function_registry(), options));
ASSERT_OK_AND_ASSIGN(auto runtime, std::move(builder).Build());
ASSERT_OK_AND_ASSIGN(ParsedExpr expr,
Parse("foo.split('') == ['h', 'e', 'l', 'l', 'o', ' ', "
"'w', 'o', 'r', 'l', 'd', '!']",
"<input>", ParserOptions{}));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<Program> program,
ProtobufRuntimeAdapter::CreateProgram(*runtime, expr));
common_internal::LegacyValueManager value_factory(memory_manager,
runtime->GetTypeProvider());
Activation activation;
activation.InsertOrAssignValue("foo",
StringValue{absl::Cord("hello world!")});
ASSERT_OK_AND_ASSIGN(Value result,
program->Evaluate(activation, value_factory));
ASSERT_TRUE(result.Is<BoolValue>());
EXPECT_TRUE(result.GetBool().NativeValue());
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/strings.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/strings_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
6c343a4e-4b19-467e-921b-27a1d42e21d4 | cpp | google/arolla | sequence_qtype | arolla/sequence/sequence_qtype.cc | arolla/sequence/sequence_qtype_test.cc | #include "arolla/sequence/sequence_qtype.h"
#include <memory>
#include <string>
#include "absl/base/no_destructor.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/synchronization/mutex.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/simple_qtype.h"
#include "arolla/sequence/sequence.h"
#include "arolla/util/fast_dynamic_downcast_final.h"
#include "arolla/util/meta.h"
namespace arolla {
namespace {
class SequenceQType final : public SimpleQType {
public:
explicit SequenceQType(QTypePtr value_qtype)
: SimpleQType(meta::type<Sequence>(),
"SEQUENCE[" + std::string(value_qtype->name()) + "]",
value_qtype,
"::arolla::SequenceQType") {}
};
class SequenceQTypeRegistry {
public:
QTypePtr GetSequenceQType(QTypePtr value_qtype) {
absl::WriterMutexLock l(&lock_);
auto& result = registry_[value_qtype];
if (!result) {
result = std::make_unique<SequenceQType>(value_qtype);
}
return result.get();
}
private:
absl::Mutex lock_;
absl::flat_hash_map<QTypePtr, std::unique_ptr<SequenceQType>> registry_
ABSL_GUARDED_BY(lock_);
};
}
bool IsSequenceQType(const QType* qtype) {
return fast_dynamic_downcast_final<const SequenceQType*>(qtype) != nullptr;
}
QTypePtr GetSequenceQType(QTypePtr value_qtype) {
static absl::NoDestructor<SequenceQTypeRegistry> registry;
return registry->GetSequenceQType(value_qtype);
}
} | #include "arolla/sequence/sequence_qtype.h"
#include <cstdint>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/sequence/mutable_sequence.h"
#include "arolla/sequence/sequence.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla {
namespace {
using ::arolla::testing::ReprTokenEq;
TEST(SequenceQTypeTest, Basics) {
const auto* qtype = GetSequenceQType<QTypePtr>();
EXPECT_EQ(qtype->name(), "SEQUENCE[QTYPE]");
EXPECT_EQ(qtype->type_info(), typeid(Sequence));
EXPECT_EQ(qtype->type_layout().AllocSize(), sizeof(Sequence));
EXPECT_EQ(qtype->type_layout().AllocAlignment().value, alignof(Sequence));
EXPECT_TRUE(qtype->type_fields().empty());
EXPECT_EQ(qtype->value_qtype(), GetQTypeQType());
EXPECT_EQ(qtype->qtype_specialization_key(), "::arolla::SequenceQType");
}
TEST(SequenceQTypeTest, IsSequenceQType) {
EXPECT_TRUE(IsSequenceQType(GetSequenceQType<QTypePtr>()));
EXPECT_TRUE(IsSequenceQType(GetSequenceQType<int32_t>()));
EXPECT_TRUE(IsSequenceQType(GetSequenceQType<float>()));
EXPECT_FALSE(IsSequenceQType(GetQTypeQType()));
EXPECT_FALSE(IsSequenceQType(GetQType<int32_t>()));
EXPECT_FALSE(IsSequenceQType(GetQType<float>()));
}
TEST(SequenceQTypeTest, TypedValue) {
ASSERT_OK_AND_ASSIGN(auto mutable_seq,
MutableSequence::Make(GetQType<int32_t>(), 3));
auto mutable_span = mutable_seq.UnsafeSpan<int32_t>();
mutable_span[0] = 1;
mutable_span[1] = 2;
mutable_span[2] = 3;
ASSERT_OK_AND_ASSIGN(auto typed_value, TypedValue::FromValueWithQType(
std::move(mutable_seq).Finish(),
GetSequenceQType<int32_t>()));
EXPECT_EQ(typed_value.GetType()->name(), "SEQUENCE[INT32]");
EXPECT_THAT(typed_value.GenReprToken(),
ReprTokenEq("sequence(1, 2, 3, value_qtype=INT32)"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/sequence/sequence_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/sequence/sequence_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
1d493d61-fa22-4cee-8c9e-7ecf1b5e0542 | cpp | tensorflow/tensorflow | gif_io | tensorflow/core/lib/gif/gif_io.cc | tensorflow/core/lib/gif/gif_io_test.cc | #include "tensorflow/core/lib/gif/gif_io.h"
#include <algorithm>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/gif.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace gif {
struct InputBufferInfo {
const uint8_t* buf;
int bytes_left;
};
int input_callback(GifFileType* gif_file, GifByteType* buf, int size) {
InputBufferInfo* const info =
reinterpret_cast<InputBufferInfo*>(gif_file->UserData);
if (info != nullptr) {
if (size > info->bytes_left) size = info->bytes_left;
memcpy(buf, info->buf, size);
info->buf += size;
info->bytes_left -= size;
return size;
}
return 0;
}
static const char* GifErrorStringNonNull(int error_code) {
const char* error_string = GifErrorString(error_code);
if (error_string == nullptr) {
return "Unknown error";
}
return error_string;
}
uint8* Decode(const void* srcdata, int datasize,
const std::function<uint8*(int, int, int, int)>& allocate_output,
string* error_string, bool expand_animations) {
int error_code = D_GIF_SUCCEEDED;
InputBufferInfo info = {reinterpret_cast<const uint8*>(srcdata), datasize};
GifFileType* gif_file =
DGifOpen(static_cast<void*>(&info), &input_callback, &error_code);
const auto cleanup = gtl::MakeCleanup([gif_file]() {
int error_code = D_GIF_SUCCEEDED;
if (gif_file && DGifCloseFile(gif_file, &error_code) != GIF_OK) {
LOG(WARNING) << "Fail to close gif file, reason: "
<< GifErrorStringNonNull(error_code);
}
});
if (error_code != D_GIF_SUCCEEDED) {
*error_string = absl::StrCat("failed to open gif file: ",
GifErrorStringNonNull(error_code));
return nullptr;
}
if (DGifSlurp(gif_file) != GIF_OK) {
*error_string = absl::StrCat("failed to slurp gif file: ",
GifErrorStringNonNull(gif_file->Error));
if (gif_file->ImageCount <= 0 ||
gif_file->SavedImages[gif_file->ImageCount - 1].RasterBits == NULL) {
return nullptr;
}
LOG(ERROR) << *error_string;
}
if (gif_file->ImageCount <= 0) {
*error_string = "gif file does not contain any image";
return nullptr;
}
int target_num_frames = gif_file->ImageCount;
int max_frame_width = 0;
int max_frame_height = 0;
for (int k = 0; k < target_num_frames; k++) {
SavedImage* si = &gif_file->SavedImages[k];
if (max_frame_height < si->ImageDesc.Height)
max_frame_height = si->ImageDesc.Height;
if (max_frame_width < si->ImageDesc.Width)
max_frame_width = si->ImageDesc.Width;
}
const int width = max_frame_width;
const int height = max_frame_height;
const int channel = 3;
if (!expand_animations) target_num_frames = 1;
uint8* const dstdata =
allocate_output(target_num_frames, width, height, channel);
if (!dstdata) return nullptr;
for (int64_t k = 0; k < target_num_frames; k++) {
uint8* this_dst = dstdata + k * width * channel * height;
SavedImage* this_image = &gif_file->SavedImages[k];
GifImageDesc* img_desc = &this_image->ImageDesc;
GraphicsControlBlock gcb;
DGifSavedExtensionToGCB(gif_file, k, &gcb);
int imgLeft = img_desc->Left;
int imgTop = img_desc->Top;
int imgRight = img_desc->Left + img_desc->Width;
int imgBottom = img_desc->Top + img_desc->Height;
if (k > 0) {
uint8* last_dst = dstdata + (k - 1) * width * channel * height;
for (int64_t i = 0; i < height; ++i) {
uint8* p_dst = this_dst + i * width * channel;
uint8* l_dst = last_dst + i * width * channel;
for (int64_t j = 0; j < width; ++j) {
p_dst[j * channel + 0] = l_dst[j * channel + 0];
p_dst[j * channel + 1] = l_dst[j * channel + 1];
p_dst[j * channel + 2] = l_dst[j * channel + 2];
}
}
}
if (img_desc->Left != 0 || img_desc->Top != 0 || img_desc->Width != width ||
img_desc->Height != height) {
if (k == 0) {
for (int64_t i = 0; i < height; ++i) {
uint8* p_dst = this_dst + i * width * channel;
for (int64_t j = 0; j < width; ++j) {
p_dst[j * channel + 0] = 0;
p_dst[j * channel + 1] = 0;
p_dst[j * channel + 2] = 0;
}
}
}
imgLeft = std::max(imgLeft, 0);
imgTop = std::max(imgTop, 0);
imgRight = std::min(imgRight, width);
imgBottom = std::min(imgBottom, height);
}
ColorMapObject* color_map = this_image->ImageDesc.ColorMap
? this_image->ImageDesc.ColorMap
: gif_file->SColorMap;
if (color_map == nullptr) {
*error_string = absl::StrCat("missing color map for frame ", k);
return nullptr;
}
for (int64_t i = imgTop; i < imgBottom; ++i) {
uint8* p_dst = this_dst + i * width * channel;
for (int64_t j = imgLeft; j < imgRight; ++j) {
GifByteType color_index =
this_image->RasterBits[(i - img_desc->Top) * (img_desc->Width) +
(j - img_desc->Left)];
if (color_index == gcb.TransparentColor) {
if (k == 0) {
p_dst[j * channel + 0] = 0;
p_dst[j * channel + 1] = 0;
p_dst[j * channel + 2] = 0;
}
continue;
}
if (color_index >= color_map->ColorCount) {
*error_string = absl::StrCat("found color index ", color_index,
" outside of color map range ",
color_map->ColorCount);
return nullptr;
}
const GifColorType& gif_color = color_map->Colors[color_index];
p_dst[j * channel + 0] = gif_color.Red;
p_dst[j * channel + 1] = gif_color.Green;
p_dst[j * channel + 2] = gif_color.Blue;
}
}
}
return dstdata;
}
}
} | #include "tensorflow/core/lib/gif/gif_io.h"
#include <memory>
#include "tensorflow/core/lib/png/png_io.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace gif {
namespace {
const char kTestData[] = "tensorflow/core/lib/gif/testdata/";
struct DecodeGifTestCase {
const string filepath;
const int num_frames;
const int width;
const int height;
const int channels;
};
void ReadFileToStringOrDie(Env* env, const string& filename, string* output) {
TF_CHECK_OK(ReadFileToString(env, filename, output));
}
void TestDecodeGif(Env* env, DecodeGifTestCase testcase) {
string gif;
ReadFileToStringOrDie(env, testcase.filepath, &gif);
std::unique_ptr<uint8[]> imgdata;
int nframes, w, h, c;
string error_string;
imgdata.reset(gif::Decode(
gif.data(), gif.size(),
[&](int frame_cnt, int width, int height, int channels) -> uint8* {
nframes = frame_cnt;
w = width;
h = height;
c = channels;
return new uint8[static_cast<int64_t>(frame_cnt) * height * width *
channels];
},
&error_string));
ASSERT_NE(imgdata, nullptr);
ASSERT_EQ(nframes, testcase.num_frames);
ASSERT_EQ(w, testcase.width);
ASSERT_EQ(h, testcase.height);
ASSERT_EQ(c, testcase.channels);
}
TEST(GifTest, Gif) {
Env* env = Env::Default();
const string testdata_path = kTestData;
std::vector<DecodeGifTestCase> testcases(
{
{testdata_path + "lena.gif", 1, 51, 26, 3},
{testdata_path + "optimized.gif", 12, 20, 40, 3},
{testdata_path + "red_black.gif", 1, 16, 16, 3},
{testdata_path + "scan.gif", 12, 20, 40, 3},
{testdata_path + "squares.gif", 2, 16, 16, 3},
{testdata_path + "3g_multiframe.gif", 519, 1920, 1080, 3}});
for (const auto& tc : testcases) {
TestDecodeGif(env, tc);
}
}
void TestDecodeAnimatedGif(Env* env, const uint8* gif_data,
const string& png_filepath, int frame_idx) {
string png;
ReadFileToStringOrDie(env, png_filepath, &png);
png::DecodeContext decode;
png::CommonInitDecode(png, 3, 8, &decode);
const int width = static_cast<int>(decode.width);
const int height = static_cast<int>(decode.height);
std::unique_ptr<uint8[]> png_imgdata(
new uint8[height * width * decode.channels]);
png::CommonFinishDecode(reinterpret_cast<png_bytep>(png_imgdata.get()),
decode.channels * width * sizeof(uint8), &decode);
int frame_len = width * height * decode.channels;
int gif_idx = frame_len * frame_idx;
for (int i = 0; i < frame_len; i++) {
ASSERT_EQ(gif_data[gif_idx + i], png_imgdata[i]);
}
}
TEST(GifTest, AnimatedGif) {
Env* env = Env::Default();
const string testdata_path = kTestData;
string gif;
ReadFileToStringOrDie(env, testdata_path + "pendulum_sm.gif", &gif);
std::unique_ptr<uint8[]> gif_imgdata;
int nframes, w, h, c;
string error_string;
gif_imgdata.reset(gif::Decode(
gif.data(), gif.size(),
[&](int num_frames, int width, int height, int channels) -> uint8* {
nframes = num_frames;
w = width;
h = height;
c = channels;
return new uint8[num_frames * height * width * channels];
},
&error_string));
TestDecodeAnimatedGif(env, gif_imgdata.get(),
testdata_path + "pendulum_sm_frame0.png", 0);
TestDecodeAnimatedGif(env, gif_imgdata.get(),
testdata_path + "pendulum_sm_frame1.png", 1);
TestDecodeAnimatedGif(env, gif_imgdata.get(),
testdata_path + "pendulum_sm_frame2.png", 2);
}
void TestExpandAnimations(Env* env, const string& filepath) {
string gif;
ReadFileToStringOrDie(env, filepath, &gif);
std::unique_ptr<uint8[]> imgdata;
string error_string;
int nframes;
bool expand_animations = false;
imgdata.reset(gif::Decode(
gif.data(), gif.size(),
[&](int frame_cnt, int width, int height, int channels) -> uint8* {
nframes = frame_cnt;
return new uint8[frame_cnt * height * width * channels];
},
&error_string, expand_animations));
ASSERT_EQ(nframes, 1);
}
TEST(GifTest, ExpandAnimations) {
Env* env = Env::Default();
const string testdata_path = kTestData;
TestExpandAnimations(env, testdata_path + "scan.gif");
TestExpandAnimations(env, testdata_path + "pendulum_sm.gif");
TestExpandAnimations(env, testdata_path + "squares.gif");
}
void TestInvalidGifFormat(const string& header_bytes) {
std::unique_ptr<uint8[]> imgdata;
string error_string;
int nframes;
imgdata.reset(gif::Decode(
header_bytes.data(), header_bytes.size(),
[&](int frame_cnt, int width, int height, int channels) -> uint8* {
nframes = frame_cnt;
return new uint8[frame_cnt * height * width * channels];
},
&error_string));
string err_msg = "failed to open gif file";
ASSERT_EQ(error_string.substr(0, 23), err_msg);
}
TEST(GifTest, BadGif) {
TestInvalidGifFormat("\x89\x50\x4E\x47\x0D\x0A\x1A\x0A");
TestInvalidGifFormat("\x42\x4d");
TestInvalidGifFormat("\xff\xd8\xff");
TestInvalidGifFormat("\x49\x49\x2A\x00");
}
TEST(GifTest, TransparentIndexOutsideColorTable) {
unsigned char encoded[43] = {
'G', 'I', 'F', '8', '9', 'a',
3, 0, 1, 0,
0b1'111'0'000,
0,
0,
0x80, 0x00, 0x00,
0xFF, 0xFF, 0xFF,
'!', 0xF9, 0x04,
1,
0, 0,
2,
0,
',', 0, 0, 0, 0,
3, 0, 1, 0,
0,
2,
2,
0b01'000'100,
0b0'101'010'0,
0, ';'
};
std::unique_ptr<uint8[]> imgdata;
string error_string;
int nframes;
auto allocate_image_data = [&](int frame_cnt, int width, int height,
int channels) -> uint8* {
nframes = frame_cnt;
imgdata = std::make_unique<uint8[]>(frame_cnt * height * width * channels);
return imgdata.get();
};
gif::Decode(encoded, sizeof(encoded), allocate_image_data, &error_string);
ASSERT_EQ(nframes, 1);
ASSERT_EQ(error_string, "");
uint8 expected[9] = {
0x80, 0x00, 0x00,
0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00,
};
for (int i = 0; i < 9; i++) {
ASSERT_EQ(imgdata[i], expected[i]) << "i=" << i;
}
encoded[40] = 0b0'101'011'0;
error_string.clear();
gif::Decode(encoded, sizeof(encoded), allocate_image_data, &error_string);
ASSERT_EQ(error_string, "found color index 3 outside of color map range 2");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gif/gif_io.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gif/gif_io_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ba7b5bf3-c665-4206-9012-cb249e157cf2 | cpp | google/arolla | cast_operator | arolla/qexpr/operators/core/cast_operator.h | arolla/qexpr/operators/core/cast_operator_test.cc | #ifndef AROLLA_OPERATORS_CORE_CAST_OPERATOR_H_
#define AROLLA_OPERATORS_CORE_CAST_OPERATOR_H_
#include <cstdint>
#include <limits>
#include <tuple>
#include <type_traits>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "arolla/memory/optional_value.h"
#include "arolla/util/meta.h"
#include "arolla/util/repr.h"
namespace arolla {
template <typename DST>
struct CastOp {
using run_on_missing = std::true_type;
using DstTypes =
meta::type_list<bool, int32_t, int64_t, uint64_t, float, double>;
using SrcTypes =
meta::type_list<bool, int32_t, int64_t, uint64_t, float, double>;
static_assert(meta::contains_v<DstTypes, DST>);
template <typename SRC>
static constexpr SRC max_float_to_int_safe_value() {
using dst_limits = std::numeric_limits<DST>;
using src_limits = std::numeric_limits<SRC>;
static_assert(dst_limits::is_integer);
static_assert(std::is_floating_point_v<SRC>);
SRC result = 0;
int i = 0;
for (; i < src_limits::digits; ++i) {
result *= 2;
result += 1;
}
for (; i < dst_limits::digits; ++i) {
result *= 2;
}
for (; i > dst_limits::digits; --i) {
result /= 2;
}
return result;
}
template <typename SRC>
static constexpr SRC min_float_to_int_safe_value() {
using dst_limits = std::numeric_limits<DST>;
using src_limits = std::numeric_limits<SRC>;
static_assert(dst_limits::is_integer);
static_assert(std::is_floating_point_v<SRC>);
if constexpr (!dst_limits::is_signed) {
return 0.0;
} else {
SRC result = 1;
int i = 0;
for (; i < src_limits::digits; ++i) {
result *= 2;
}
for (; i < dst_limits::digits; ++i) {
result *= 2;
}
for (; i > dst_limits::digits; --i) {
result += 1;
result /= 2;
}
return -result;
}
}
template <typename SRC>
static constexpr auto safe_range() {
static_assert(meta::contains_v<SrcTypes, SRC>);
using dst_limits = std::numeric_limits<DST>;
using src_limits = std::numeric_limits<SRC>;
if constexpr (std::is_same_v<SRC, DST>) {
return std::make_tuple();
} else if constexpr (std::is_integral_v<DST> && std::is_integral_v<SRC>) {
constexpr SRC safe_min =
std::max<int64_t>(dst_limits::min(), src_limits::min());
constexpr SRC safe_max =
std::min<uint64_t>(dst_limits::max(), src_limits::max());
if constexpr (safe_min <= src_limits::min() &&
safe_max >= src_limits::max()) {
return std::make_tuple();
} else {
return std::tuple<SRC, SRC>(safe_min, safe_max);
}
} else if constexpr (std::is_integral_v<DST> &&
std::is_floating_point_v<SRC>) {
return std::tuple<SRC, SRC>(min_float_to_int_safe_value<SRC>(),
max_float_to_int_safe_value<SRC>());
} else if constexpr (std::is_floating_point_v<DST> &&
std::is_floating_point_v<SRC>) {
constexpr bool ub_check =
(src_limits::max() <= dst_limits::max() ||
static_cast<DST>(src_limits::max()) == dst_limits::max() ||
static_cast<DST>(src_limits::max()) == dst_limits::infinity());
static_assert(ub_check);
return std::make_tuple();
} else {
return std::make_tuple();
}
}
template <typename SRC>
auto operator()(SRC src) const {
constexpr auto src_range = safe_range<SRC>();
if constexpr (std::tuple_size_v<decltype(src_range)> == 0) {
return static_cast<DST>(src);
} else {
using ReturnType = absl::StatusOr<DST>;
const auto& [range_min, range_max] = src_range;
if (range_min <= src && src <= range_max) {
return ReturnType(static_cast<DST>(src));
} else {
return ReturnType(absl::InvalidArgumentError(absl::StrCat(
"cannot cast ", ::arolla::Repr(src), " to ",
std::is_unsigned_v<DST> ? "u" : "", "int", 8 * sizeof(DST))));
}
}
}
};
struct ToBoolOp {
using run_on_missing = std::true_type;
template <typename T>
bool operator()(const T& x) const {
return x != 0;
}
};
struct ToOptionalOp {
using run_on_missing = std::true_type;
template <typename T>
OptionalValue<T> operator()(const T& x) const {
return OptionalValue<T>(x);
}
};
struct GetOptionalValueOp {
template <typename T>
absl::StatusOr<T> operator()(const OptionalValue<T>& x) const {
if (!x.present) {
return absl::FailedPreconditionError(
"core.get_optional_value expects present value, got missing");
}
return x.value;
}
};
}
#endif | #include "arolla/qexpr/operators/core/cast_operator.h"
#include <cmath>
#include <cstdint>
#include <limits>
#include <tuple>
#include <type_traits>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/util/meta.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::Eq;
TEST(CastOperatorTest, CastToInt32UB) {
constexpr auto kInt32Min = std::numeric_limits<int32_t>::min();
constexpr auto kInt32Max = std::numeric_limits<int32_t>::max();
constexpr auto kDoubleInt32Min = static_cast<double>(kInt32Min);
constexpr auto kDoubleInt32Max = static_cast<double>(kInt32Max);
const auto to_int32 = CastOp<int32_t>();
EXPECT_THAT(to_int32(kDoubleInt32Min), IsOkAndHolds(kInt32Min));
EXPECT_THAT(to_int32(kDoubleInt32Max), IsOkAndHolds(kInt32Max));
EXPECT_THAT(to_int32(std::nextafter(kDoubleInt32Min - 1., 0.)),
IsOkAndHolds(kInt32Min));
EXPECT_THAT(to_int32(std::nextafter(kDoubleInt32Max + 1., 0.)),
IsOkAndHolds(kInt32Max));
EXPECT_THAT(to_int32(kDoubleInt32Min - 1.),
StatusIs(absl::StatusCode::kInvalidArgument,
"cannot cast float64{-2147483649} to int32"));
EXPECT_THAT(to_int32(kDoubleInt32Max + 1.),
StatusIs(absl::StatusCode::kInvalidArgument,
"cannot cast float64{2147483648} to int32"));
}
TEST(CastOperatorTest, CastFromUInt64) {
EXPECT_THAT((CastOp<int32_t>()(uint64_t{1})), IsOkAndHolds(int32_t{1}));
EXPECT_THAT((CastOp<float>()(uint64_t{1})), Eq(1.0f));
EXPECT_THAT((CastOp<double>()(uint64_t{1})), Eq(1.0));
EXPECT_THAT((CastOp<int64_t>()(uint64_t{1ull << 63})),
StatusIs(absl::StatusCode::kInvalidArgument,
"cannot cast uint64{9223372036854775808} to int64"));
}
TEST(CastOperatorTest, CastToUInt64) {
CastOp<uint64_t> to_uint64;
EXPECT_THAT(to_uint64(std::numeric_limits<int64_t>::max()),
IsOkAndHolds(uint64_t{std::numeric_limits<int64_t>::max()}));
EXPECT_THAT(to_uint64(double{1.0}), IsOkAndHolds(uint64_t{1}));
EXPECT_THAT(to_uint64(float{1.0f}), IsOkAndHolds(uint64_t{1}));
EXPECT_THAT(to_uint64(uint64_t{1}), Eq(uint64_t{1}));
EXPECT_THAT(to_uint64(float{-1.0f}),
StatusIs(absl::StatusCode::kInvalidArgument,
"cannot cast -1. to uint64"));
EXPECT_THAT(to_uint64(double{-1.0f}),
StatusIs(absl::StatusCode::kInvalidArgument,
"cannot cast float64{-1} to uint64"));
EXPECT_THAT(
to_uint64(int32_t{-1}),
StatusIs(absl::StatusCode::kInvalidArgument, "cannot cast -1 to uint64"));
EXPECT_THAT(to_uint64(int64_t{-1}),
StatusIs(absl::StatusCode::kInvalidArgument,
"cannot cast int64{-1} to uint64"));
}
TEST(CastOperatorTest, CastTo_SafeRange_FloatToInt) {
using Srcs = meta::type_list<float, double>;
using Dsts = meta::type_list<int32_t, int64_t, uint64_t>;
meta::foreach_type<Srcs>([](auto src_type) {
using SRC = typename decltype(src_type)::type;
meta::foreach_type<Dsts>([](auto dst_type) {
using DST = typename decltype(dst_type)::type;
using dst_limits = std::numeric_limits<DST>;
using src_limits = std::numeric_limits<SRC>;
const auto [range_min, range_max] =
CastOp<DST>::template safe_range<SRC>();
ASSERT_EQ(static_cast<DST>(range_min), dst_limits::min());
if (!std::is_unsigned_v<DST>) {
ASSERT_NE(
std::trunc(range_min),
std::trunc(std::nextafter(range_min, -src_limits::infinity())));
}
ASSERT_LE(static_cast<DST>(range_max), dst_limits::max());
ASSERT_GE(std::nextafter(range_max, src_limits::infinity()),
std::exp2(static_cast<SRC>(dst_limits::digits)));
});
});
}
TEST(CastOperatorTest, CastTo_SafeRange_IntToInt) {
ASSERT_EQ(CastOp<int32_t>::safe_range<uint64_t>(),
(std::tuple<uint64_t, uint64_t>(0, (1ull << 31) - 1)));
ASSERT_EQ(CastOp<int64_t>::safe_range<uint64_t>(),
(std::tuple<uint64_t, uint64_t>(0, (1ull << 63) - 1)));
ASSERT_EQ(CastOp<uint64_t>::safe_range<int32_t>(),
(std::tuple<uint32_t, uint32_t>(0, (1u << 31) - 1)));
ASSERT_EQ(CastOp<uint64_t>::safe_range<int64_t>(),
(std::tuple<int64_t, int64_t>(0, (1ull << 63) - 1)));
}
TEST(CastOperatorTest, CastTo_SafeRange_Unneeded) {
ASSERT_EQ(CastOp<int64_t>::safe_range<int32_t>(), std::tuple<>());
ASSERT_EQ(CastOp<int32_t>::safe_range<bool>(), std::tuple<>());
ASSERT_EQ(CastOp<float>::safe_range<double>(), std::tuple<>());
ASSERT_EQ(CastOp<double>::safe_range<float>(), std::tuple<>());
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/core/cast_operator.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/core/cast_operator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
df23d9ae-e864-4c4b-8284-7fedafb1249f | cpp | google/tensorstore | integer_overflow | tensorstore/internal/integer_overflow.h | tensorstore/internal/integer_overflow_test.cc | #ifndef TENSORSTORE_INTERNAL_INTEGER_OVERFLOW_H_
#define TENSORSTORE_INTERNAL_INTEGER_OVERFLOW_H_
#include <limits>
#include <type_traits>
#include "absl/base/attributes.h"
namespace tensorstore {
namespace internal {
namespace wrap_on_overflow {
#if ABSL_HAVE_ATTRIBUTE(no_sanitize) && defined(__clang__)
#define TENSORSTORE_ATTRIBUTE_NO_SANITIZE_UNSIGNED_INTEGER_OVERFLOW \
__attribute__((no_sanitize("unsigned-integer-overflow")))
#else
#define TENSORSTORE_ATTRIBUTE_NO_SANITIZE_UNSIGNED_INTEGER_OVERFLOW
#endif
#define TENSORSTORE_INTERNAL_DEFINE_WRAP_ON_OVERFLOW_OP(OP, NAME) \
template <typename T> \
TENSORSTORE_ATTRIBUTE_NO_SANITIZE_UNSIGNED_INTEGER_OVERFLOW \
std::enable_if_t<std::is_integral<T>::value, T> \
NAME(T a, T b) { \
using UnsignedT = std::make_unsigned_t<T>; \
return static_cast<T>(static_cast<UnsignedT>( \
static_cast<UnsignedT>(a) OP static_cast<UnsignedT>(b))); \
} \
TENSORSTORE_INTERNAL_DEFINE_WRAP_ON_OVERFLOW_OP(+, Add)
TENSORSTORE_INTERNAL_DEFINE_WRAP_ON_OVERFLOW_OP(-, Subtract)
TENSORSTORE_INTERNAL_DEFINE_WRAP_ON_OVERFLOW_OP(*, Multiply)
#undef TENSORSTORE_INTERNAL_DEFINE_WRAP_ON_OVERFLOW_OP
template <typename AccumType, typename T0, typename T1>
inline AccumType InnerProduct(std::ptrdiff_t n, const T0* a, const T1* b) {
AccumType sum = 0;
for (std::ptrdiff_t i = 0; i < n; ++i) {
sum = Add(sum, Multiply(static_cast<AccumType>(a[i]),
static_cast<AccumType>(b[i])));
}
return sum;
}
template <ptrdiff_t N, typename AccumType, typename T0, typename T1>
inline AccumType InnerProduct(const T0* a, const T1* b) {
AccumType sum = 0;
for (std::ptrdiff_t i = 0; i < N; ++i) {
sum = Add(sum, Multiply(static_cast<AccumType>(a[i]),
static_cast<AccumType>(b[i])));
}
return sum;
}
}
template <typename T>
constexpr bool AddOverflow(T a, T b, T* result) {
#if defined(__clang__) || !defined(_MSC_VER)
return __builtin_add_overflow(a, b, result);
#else
*result = wrap_on_overflow::Add(a, b);
return (a > 0 && (b > std::numeric_limits<T>::max() - a)) ||
(a < 0 && (b < std::numeric_limits<T>::min() - a));
#endif
}
template <typename T>
constexpr T AddSaturate(T a, T b) {
T result;
if (AddOverflow(a, b, &result)) {
result = (b >= 0 ? std::numeric_limits<T>::max()
: std::numeric_limits<T>::min());
}
return result;
}
template <typename T>
constexpr bool SubOverflow(T a, T b, T* result) {
#if defined(__clang__) || !defined(_MSC_VER)
return __builtin_sub_overflow(a, b, result);
#else
*result = wrap_on_overflow::Subtract(a, b);
return (b < 0 && (a > std::numeric_limits<T>::max() + b)) ||
(b > 0 && (a < std::numeric_limits<T>::min() + b));
#endif
}
template <typename T>
constexpr bool MulOverflow(T a, T b, T* result) {
#if defined(__clang__) || !defined(_MSC_VER)
return __builtin_mul_overflow(a, b, result);
#else
const T r = *result = wrap_on_overflow::Multiply(a, b);
return b && (r / b) != a;
#endif
}
}
}
#endif | #include "tensorstore/internal/integer_overflow.h"
#include <cstdint>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::internal::AddOverflow;
using ::tensorstore::internal::AddSaturate;
using ::tensorstore::internal::MulOverflow;
using ::tensorstore::internal::SubOverflow;
using ::tensorstore::internal::wrap_on_overflow::Add;
using ::tensorstore::internal::wrap_on_overflow::InnerProduct;
using ::tensorstore::internal::wrap_on_overflow::Multiply;
TEST(AddTest, Overflow) {
EXPECT_EQ(std::int32_t{-0x80000000LL},
Add(std::int32_t{0x40000000}, std::int32_t{0x40000000}));
}
TEST(MultiplyTest, Overflow) {
EXPECT_EQ(std::int32_t{-0x80000000LL},
Multiply(std::int32_t{0x40000000}, std::int32_t{2}));
}
TEST(InnerProductTest, Basic) {
const Index a[] = {1, 2, 3};
const Index b[] = {4, 5, 6};
EXPECT_EQ(1 * 4 + 2 * 5 + 3 * 6, InnerProduct<Index>(3, a, b));
}
TEST(InnerProductTest, Convert) {
const uint32_t a[] = {0x80000000};
const uint32_t b[] = {2};
EXPECT_EQ(Index{0x100000000}, InnerProduct<Index>(1, a, b));
}
TEST(InnerProductTest, WrapOnOverflowMultiply) {
const Index a[] = {Index(1) << 62, 2, 3};
const Index b[] = {4, 5, 6};
EXPECT_EQ(Index{2 * 5 + 3 * 6}, InnerProduct<Index>(3, a, b));
}
TEST(InnerProductTest, WrapOnOverflowAdd) {
const Index a[] = {Index(1) << 62, Index(1) << 62};
const Index b[] = {2, 2};
EXPECT_EQ(Index{0}, InnerProduct<Index>(2, a, b));
}
TEST(MulOverflow, Uint32) {
uint32_t a, b, c;
a = 0x7fffffff;
b = 2;
EXPECT_EQ(false, MulOverflow(a, b, &c));
EXPECT_EQ(uint32_t{0xfffffffe}, c);
EXPECT_EQ(false, MulOverflow(b, a, &c));
EXPECT_EQ(uint32_t{0xfffffffe}, c);
a = 0x80000000;
c = 2;
EXPECT_EQ(true, MulOverflow(a, b, &c));
EXPECT_EQ(uint32_t{0}, c);
EXPECT_EQ(true, MulOverflow(b, a, &c));
EXPECT_EQ(uint32_t{0}, c);
}
TEST(MulOverflow, Int32) {
std::int32_t a, b, c;
a = -0x40000000;
b = 2;
EXPECT_EQ(false, MulOverflow(a, b, &c));
EXPECT_EQ(std::int32_t{-0x80000000LL}, c);
EXPECT_EQ(false, MulOverflow(b, a, &c));
EXPECT_EQ(std::int32_t{-0x80000000LL}, c);
a = 0x40000000;
c = 2;
EXPECT_EQ(true, MulOverflow(a, b, &c));
EXPECT_EQ(std::int32_t{-0x80000000LL}, c);
EXPECT_EQ(true, MulOverflow(b, a, &c));
EXPECT_EQ(std::int32_t{-0x80000000LL}, c);
}
TEST(AddOverflow, Uint32) {
uint32_t a, b, c;
a = 0x7fffffff;
b = 0x80000000;
EXPECT_EQ(false, AddOverflow(a, b, &c));
EXPECT_EQ(uint32_t{0xffffffff}, c);
EXPECT_EQ(false, AddOverflow(b, a, &c));
EXPECT_EQ(uint32_t{0xffffffff}, c);
a = 0x80000000;
c = 0x80000000;
EXPECT_EQ(true, MulOverflow(a, b, &c));
EXPECT_EQ(uint32_t{0}, c);
}
TEST(AddOverflow, Int32) {
std::int32_t a, b, c;
a = 0x40000000;
b = 0x3fffffff;
EXPECT_EQ(false, AddOverflow(a, b, &c));
EXPECT_EQ(std::int32_t{0x7fffffff}, c);
EXPECT_EQ(false, AddOverflow(b, a, &c));
EXPECT_EQ(std::int32_t{0x7fffffff}, c);
a = -0x40000000;
b = -0x40000000;
EXPECT_EQ(false, AddOverflow(a, b, &c));
EXPECT_EQ(std::int32_t{-0x80000000LL}, c);
a = 0x40000000;
b = 0x40000000;
EXPECT_EQ(true, AddOverflow(a, b, &c));
EXPECT_EQ(std::int32_t{-0x80000000LL}, c);
}
TEST(AddSaturate, Int32) {
EXPECT_EQ(0x7fffffff, AddSaturate<int32_t>(0x40000000, 0x3fffffff));
EXPECT_EQ(0x7fffffff, AddSaturate<int32_t>(0x40000000, 0x40000000));
EXPECT_EQ(-0x80000000, AddSaturate<int32_t>(-0x40000000, -0x40000000));
EXPECT_EQ(-0x80000000, AddSaturate<int32_t>(-0x40000000, -0x41000000));
}
TEST(SubOverflow, Uint32) {
uint32_t a, b, c;
a = 0x80000000;
b = 0x7fffffff;
EXPECT_EQ(false, SubOverflow(a, b, &c));
EXPECT_EQ(uint32_t{1}, c);
a = 0x7fffffff;
b = 0x80000000;
EXPECT_EQ(true, SubOverflow(a, b, &c));
EXPECT_EQ(uint32_t{0xffffffff}, c);
}
TEST(SubOverflow, Int32) {
std::int32_t a, b, c;
a = -0x40000000;
b = 0x40000000;
EXPECT_EQ(false, SubOverflow(a, b, &c));
EXPECT_EQ(std::int32_t{-0x80000000LL}, c);
a = 0x40000000;
b = -0x40000000;
EXPECT_EQ(true, SubOverflow(a, b, &c));
EXPECT_EQ(std::int32_t{-0x80000000LL}, c);
a = -0x40000001;
b = 0x40000000;
EXPECT_EQ(true, SubOverflow(a, b, &c));
EXPECT_EQ(std::int32_t{0x7fffffff}, c);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/integer_overflow.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/integer_overflow_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
d67fd247-b4ef-42cf-bc1c-4fa0a8cfd35b | cpp | google/cel-cpp | type_param_type | common/types/type_param_type.h | common/types/type_param_type_test.cc | #ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_TYPE_PARAM_TYPE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_TYPE_PARAM_TYPE_H_
#include <ostream>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/strings/string_view.h"
#include "common/type_kind.h"
namespace cel {
class Type;
class TypeParameters;
class TypeParamType final {
public:
static constexpr TypeKind kKind = TypeKind::kTypeParam;
explicit TypeParamType(absl::string_view name ABSL_ATTRIBUTE_LIFETIME_BOUND)
: name_(name) {}
TypeParamType() = default;
TypeParamType(const TypeParamType&) = default;
TypeParamType(TypeParamType&&) = default;
TypeParamType& operator=(const TypeParamType&) = default;
TypeParamType& operator=(TypeParamType&&) = default;
static TypeKind kind() { return kKind; }
absl::string_view name() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return name_; }
static TypeParameters GetParameters();
std::string DebugString() const { return std::string(name()); }
friend void swap(TypeParamType& lhs, TypeParamType& rhs) noexcept {
using std::swap;
swap(lhs.name_, rhs.name_);
}
private:
absl::string_view name_;
};
inline bool operator==(const TypeParamType& lhs, const TypeParamType& rhs) {
return lhs.name() == rhs.name();
}
inline bool operator!=(const TypeParamType& lhs, const TypeParamType& rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, const TypeParamType& type) {
return H::combine(std::move(state), type.name());
}
inline std::ostream& operator<<(std::ostream& out, const TypeParamType& type) {
return out << type.DebugString();
}
}
#endif | #include "common/type.h"
#include <sstream>
#include "absl/hash/hash.h"
#include "internal/testing.h"
namespace cel {
namespace {
TEST(TypeParamType, Kind) {
EXPECT_EQ(TypeParamType("T").kind(), TypeParamType::kKind);
EXPECT_EQ(Type(TypeParamType("T")).kind(), TypeParamType::kKind);
}
TEST(TypeParamType, Name) {
EXPECT_EQ(TypeParamType("T").name(), "T");
EXPECT_EQ(Type(TypeParamType("T")).name(), "T");
}
TEST(TypeParamType, DebugString) {
{
std::ostringstream out;
out << TypeParamType("T");
EXPECT_EQ(out.str(), "T");
}
{
std::ostringstream out;
out << Type(TypeParamType("T"));
EXPECT_EQ(out.str(), "T");
}
}
TEST(TypeParamType, Hash) {
EXPECT_EQ(absl::HashOf(TypeParamType("T")), absl::HashOf(TypeParamType("T")));
}
TEST(TypeParamType, Equal) {
EXPECT_EQ(TypeParamType("T"), TypeParamType("T"));
EXPECT_EQ(Type(TypeParamType("T")), TypeParamType("T"));
EXPECT_EQ(TypeParamType("T"), Type(TypeParamType("T")));
EXPECT_EQ(Type(TypeParamType("T")), Type(TypeParamType("T")));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/type_param_type.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/type_param_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
54b89a8b-1450-4244-8eb6-f5d3b0bf5712 | cpp | google/quiche | hpack_decoder | quiche/http2/hpack/decoder/hpack_decoder.cc | quiche/http2/hpack/decoder/hpack_decoder_test.cc | #include "quiche/http2/hpack/decoder/hpack_decoder.h"
#include "quiche/http2/decoder/decode_status.h"
#include "quiche/common/platform/api/quiche_flag_utils.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
HpackDecoder::HpackDecoder(HpackDecoderListener* listener,
size_t max_string_size)
: decoder_state_(listener),
entry_buffer_(&decoder_state_, max_string_size),
block_decoder_(&entry_buffer_),
error_(HpackDecodingError::kOk) {}
HpackDecoder::~HpackDecoder() = default;
void HpackDecoder::set_max_string_size_bytes(size_t max_string_size_bytes) {
entry_buffer_.set_max_string_size_bytes(max_string_size_bytes);
}
void HpackDecoder::ApplyHeaderTableSizeSetting(uint32_t max_header_table_size) {
decoder_state_.ApplyHeaderTableSizeSetting(max_header_table_size);
}
bool HpackDecoder::StartDecodingBlock() {
QUICHE_DVLOG(3) << "HpackDecoder::StartDecodingBlock, error_detected="
<< (DetectError() ? "true" : "false");
if (DetectError()) {
return false;
}
block_decoder_.Reset();
decoder_state_.OnHeaderBlockStart();
return true;
}
bool HpackDecoder::DecodeFragment(DecodeBuffer* db) {
QUICHE_DVLOG(3) << "HpackDecoder::DecodeFragment, error_detected="
<< (DetectError() ? "true" : "false")
<< ", size=" << db->Remaining();
if (DetectError()) {
QUICHE_CODE_COUNT_N(decompress_failure_3, 3, 23);
return false;
}
DecodeStatus status = block_decoder_.Decode(db);
if (status == DecodeStatus::kDecodeError) {
ReportError(block_decoder_.error());
QUICHE_CODE_COUNT_N(decompress_failure_3, 4, 23);
return false;
} else if (DetectError()) {
QUICHE_CODE_COUNT_N(decompress_failure_3, 5, 23);
return false;
}
QUICHE_DCHECK_EQ(block_decoder_.before_entry(),
status == DecodeStatus::kDecodeDone)
<< status;
if (!block_decoder_.before_entry()) {
entry_buffer_.BufferStringsIfUnbuffered();
}
return true;
}
bool HpackDecoder::EndDecodingBlock() {
QUICHE_DVLOG(3) << "HpackDecoder::EndDecodingBlock, error_detected="
<< (DetectError() ? "true" : "false");
if (DetectError()) {
QUICHE_CODE_COUNT_N(decompress_failure_3, 6, 23);
return false;
}
if (!block_decoder_.before_entry()) {
ReportError(HpackDecodingError::kTruncatedBlock);
QUICHE_CODE_COUNT_N(decompress_failure_3, 7, 23);
return false;
}
decoder_state_.OnHeaderBlockEnd();
if (DetectError()) {
QUICHE_CODE_COUNT_N(decompress_failure_3, 8, 23);
return false;
}
return true;
}
bool HpackDecoder::DetectError() {
if (error_ != HpackDecodingError::kOk) {
return true;
}
if (decoder_state_.error() != HpackDecodingError::kOk) {
QUICHE_DVLOG(2) << "Error detected in decoder_state_";
QUICHE_CODE_COUNT_N(decompress_failure_3, 10, 23);
error_ = decoder_state_.error();
}
return error_ != HpackDecodingError::kOk;
}
void HpackDecoder::ReportError(HpackDecodingError error) {
QUICHE_DVLOG(3) << "HpackDecoder::ReportError is new="
<< (error_ == HpackDecodingError::kOk ? "true" : "false")
<< ", error: " << HpackDecodingErrorToString(error);
if (error_ == HpackDecodingError::kOk) {
error_ = error;
decoder_state_.listener()->OnHeaderErrorDetected(
HpackDecodingErrorToString(error));
}
}
} | #include "quiche/http2/hpack/decoder/hpack_decoder.h"
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/hpack/decoder/hpack_decoder_listener.h"
#include "quiche/http2/hpack/decoder/hpack_decoder_state.h"
#include "quiche/http2/hpack/decoder/hpack_decoder_tables.h"
#include "quiche/http2/hpack/http2_hpack_constants.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/test_tools/hpack_block_builder.h"
#include "quiche/http2/test_tools/hpack_example.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/random_util.h"
#include "quiche/http2/test_tools/verify_macros.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
using ::testing::AssertionResult;
using ::testing::AssertionSuccess;
using ::testing::ElementsAreArray;
using ::testing::Eq;
namespace http2 {
namespace test {
class HpackDecoderStatePeer {
public:
static HpackDecoderTables* GetDecoderTables(HpackDecoderState* state) {
return &state->decoder_tables_;
}
static void set_listener(HpackDecoderState* state,
HpackDecoderListener* listener) {
state->listener_ = listener;
}
};
class HpackDecoderPeer {
public:
static HpackDecoderState* GetDecoderState(HpackDecoder* decoder) {
return &decoder->decoder_state_;
}
static HpackDecoderTables* GetDecoderTables(HpackDecoder* decoder) {
return HpackDecoderStatePeer::GetDecoderTables(GetDecoderState(decoder));
}
};
namespace {
typedef std::pair<std::string, std::string> HpackHeaderEntry;
typedef std::vector<HpackHeaderEntry> HpackHeaderEntries;
class MockHpackDecoderListener : public HpackDecoderListener {
public:
MOCK_METHOD(void, OnHeaderListStart, (), (override));
MOCK_METHOD(void, OnHeader, (absl::string_view name, absl::string_view value),
(override));
MOCK_METHOD(void, OnHeaderListEnd, (), (override));
MOCK_METHOD(void, OnHeaderErrorDetected, (absl::string_view error_message),
(override));
};
class HpackDecoderTest : public quiche::test::QuicheTestWithParam<bool>,
public HpackDecoderListener {
protected:
HpackDecoderTest() : decoder_(this, 4096) {
fragment_the_hpack_block_ = GetParam();
}
~HpackDecoderTest() override = default;
void OnHeaderListStart() override {
ASSERT_FALSE(saw_start_);
ASSERT_FALSE(saw_end_);
saw_start_ = true;
header_entries_.clear();
}
void OnHeader(absl::string_view name, absl::string_view value) override {
ASSERT_TRUE(saw_start_);
ASSERT_FALSE(saw_end_);
header_entries_.emplace_back(name, value);
}
void OnHeaderListEnd() override {
ASSERT_TRUE(saw_start_);
ASSERT_FALSE(saw_end_);
ASSERT_TRUE(error_messages_.empty());
saw_end_ = true;
}
void OnHeaderErrorDetected(absl::string_view error_message) override {
ASSERT_TRUE(saw_start_);
error_messages_.push_back(std::string(error_message));
HpackDecoderStatePeer::set_listener(
HpackDecoderPeer::GetDecoderState(&decoder_), &mock_listener_);
}
AssertionResult DecodeBlock(absl::string_view block) {
QUICHE_VLOG(1) << "HpackDecoderTest::DecodeBlock";
HTTP2_VERIFY_FALSE(decoder_.DetectError());
HTTP2_VERIFY_TRUE(error_messages_.empty());
HTTP2_VERIFY_FALSE(saw_start_);
HTTP2_VERIFY_FALSE(saw_end_);
header_entries_.clear();
HTTP2_VERIFY_FALSE(decoder_.DetectError());
HTTP2_VERIFY_TRUE(decoder_.StartDecodingBlock());
HTTP2_VERIFY_FALSE(decoder_.DetectError());
if (fragment_the_hpack_block_) {
while (!block.empty()) {
size_t fragment_size = random_.RandomSizeSkewedLow(block.size());
DecodeBuffer db(block.substr(0, fragment_size));
HTTP2_VERIFY_TRUE(decoder_.DecodeFragment(&db));
HTTP2_VERIFY_EQ(0u, db.Remaining());
block.remove_prefix(fragment_size);
}
} else {
DecodeBuffer db(block);
HTTP2_VERIFY_TRUE(decoder_.DecodeFragment(&db));
HTTP2_VERIFY_EQ(0u, db.Remaining());
}
HTTP2_VERIFY_FALSE(decoder_.DetectError());
HTTP2_VERIFY_TRUE(decoder_.EndDecodingBlock());
if (saw_end_) {
HTTP2_VERIFY_FALSE(decoder_.DetectError());
HTTP2_VERIFY_TRUE(error_messages_.empty());
} else {
HTTP2_VERIFY_TRUE(decoder_.DetectError());
HTTP2_VERIFY_FALSE(error_messages_.empty());
}
saw_start_ = saw_end_ = false;
return AssertionSuccess();
}
const HpackDecoderTables& GetDecoderTables() {
return *HpackDecoderPeer::GetDecoderTables(&decoder_);
}
const HpackStringPair* Lookup(size_t index) {
return GetDecoderTables().Lookup(index);
}
size_t current_header_table_size() {
return GetDecoderTables().current_header_table_size();
}
size_t header_table_size_limit() {
return GetDecoderTables().header_table_size_limit();
}
void set_header_table_size_limit(size_t size) {
HpackDecoderPeer::GetDecoderTables(&decoder_)->DynamicTableSizeUpdate(size);
}
AssertionResult VerifyEntry(size_t dynamic_index, const char* name,
const char* value) {
const HpackStringPair* entry =
Lookup(dynamic_index + kFirstDynamicTableIndex - 1);
HTTP2_VERIFY_NE(entry, nullptr);
HTTP2_VERIFY_EQ(entry->name, name);
HTTP2_VERIFY_EQ(entry->value, value);
return AssertionSuccess();
}
AssertionResult VerifyNoEntry(size_t dynamic_index) {
const HpackStringPair* entry =
Lookup(dynamic_index + kFirstDynamicTableIndex - 1);
HTTP2_VERIFY_EQ(entry, nullptr);
return AssertionSuccess();
}
AssertionResult VerifyDynamicTableContents(
const std::vector<std::pair<const char*, const char*>>& entries) {
size_t index = 1;
for (const auto& entry : entries) {
HTTP2_VERIFY_SUCCESS(VerifyEntry(index, entry.first, entry.second));
++index;
}
HTTP2_VERIFY_SUCCESS(VerifyNoEntry(index));
return AssertionSuccess();
}
Http2Random random_;
HpackDecoder decoder_;
testing::StrictMock<MockHpackDecoderListener> mock_listener_;
HpackHeaderEntries header_entries_;
std::vector<std::string> error_messages_;
bool fragment_the_hpack_block_;
bool saw_start_ = false;
bool saw_end_ = false;
};
INSTANTIATE_TEST_SUITE_P(AllWays, HpackDecoderTest, ::testing::Bool());
TEST_P(HpackDecoderTest, C3_RequestExamples) {
std::string hpack_block = HpackExampleToStringOrDie(R"(
82 | == Indexed - Add ==
| idx = 2
| -> :method: GET
86 | == Indexed - Add ==
| idx = 6
| -> :scheme: http
84 | == Indexed - Add ==
| idx = 4
| -> :path: /
41 | == Literal indexed ==
| Indexed name (idx = 1)
| :authority
0f | Literal value (len = 15)
7777 772e 6578 616d 706c 652e 636f 6d | www.example.com
| -> :authority:
| www.example.com
)");
EXPECT_TRUE(DecodeBlock(hpack_block));
ASSERT_THAT(header_entries_,
ElementsAreArray({
HpackHeaderEntry{":method", "GET"},
HpackHeaderEntry{":scheme", "http"},
HpackHeaderEntry{":path", "/"},
HpackHeaderEntry{":authority", "www.example.com"},
}));
ASSERT_TRUE(VerifyDynamicTableContents({{":authority", "www.example.com"}}));
ASSERT_EQ(57u, current_header_table_size());
hpack_block = HpackExampleToStringOrDie(R"(
82 | == Indexed - Add ==
| idx = 2
| -> :method: GET
86 | == Indexed - Add ==
| idx = 6
| -> :scheme: http
84 | == Indexed - Add ==
| idx = 4
| -> :path: /
be | == Indexed - Add ==
| idx = 62
| -> :authority:
| www.example.com
58 | == Literal indexed ==
| Indexed name (idx = 24)
| cache-control
08 | Literal value (len = 8)
6e6f 2d63 6163 6865 | no-cache
| -> cache-control: no-cache
)");
EXPECT_TRUE(DecodeBlock(hpack_block));
ASSERT_THAT(header_entries_,
ElementsAreArray({
HpackHeaderEntry{":method", "GET"},
HpackHeaderEntry{":scheme", "http"},
HpackHeaderEntry{":path", "/"},
HpackHeaderEntry{":authority", "www.example.com"},
HpackHeaderEntry{"cache-control", "no-cache"},
}));
ASSERT_TRUE(VerifyDynamicTableContents(
{{"cache-control", "no-cache"}, {":authority", "www.example.com"}}));
ASSERT_EQ(110u, current_header_table_size());
hpack_block = HpackExampleToStringOrDie(R"(
82 | == Indexed - Add ==
| idx = 2
| -> :method: GET
87 | == Indexed - Add ==
| idx = 7
| -> :scheme: https
85 | == Indexed - Add ==
| idx = 5
| -> :path: /index.html
bf | == Indexed - Add ==
| idx = 63
| -> :authority:
| www.example.com
40 | == Literal indexed ==
0a | Literal name (len = 10)
6375 7374 6f6d 2d6b 6579 | custom-key
0c | Literal value (len = 12)
6375 7374 6f6d 2d76 616c 7565 | custom-value
| -> custom-key:
| custom-value
)");
EXPECT_TRUE(DecodeBlock(hpack_block));
ASSERT_THAT(header_entries_,
ElementsAreArray({
HpackHeaderEntry{":method", "GET"},
HpackHeaderEntry{":scheme", "https"},
HpackHeaderEntry{":path", "/index.html"},
HpackHeaderEntry{":authority", "www.example.com"},
HpackHeaderEntry{"custom-key", "custom-value"},
}));
ASSERT_TRUE(VerifyDynamicTableContents({{"custom-key", "custom-value"},
{"cache-control", "no-cache"},
{":authority", "www.example.com"}}));
ASSERT_EQ(164u, current_header_table_size());
}
TEST_P(HpackDecoderTest, C4_RequestExamplesWithHuffmanEncoding) {
std::string hpack_block = HpackExampleToStringOrDie(R"(
82 | == Indexed - Add ==
| idx = 2
| -> :method: GET
86 | == Indexed - Add ==
| idx = 6
| -> :scheme: http
84 | == Indexed - Add ==
| idx = 4
| -> :path: /
41 | == Literal indexed ==
| Indexed name (idx = 1)
| :authority
8c | Literal value (len = 12)
| Huffman encoded:
f1e3 c2e5 f23a 6ba0 ab90 f4ff | .....:k.....
| Decoded:
| www.example.com
| -> :authority:
| www.example.com
)");
EXPECT_TRUE(DecodeBlock(hpack_block));
ASSERT_THAT(header_entries_,
ElementsAreArray({
HpackHeaderEntry{":method", "GET"},
HpackHeaderEntry{":scheme", "http"},
HpackHeaderEntry{":path", "/"},
HpackHeaderEntry{":authority", "www.example.com"},
}));
ASSERT_TRUE(VerifyDynamicTableContents({{":authority", "www.example.com"}}));
ASSERT_EQ(57u, current_header_table_size());
hpack_block = HpackExampleToStringOrDie(R"(
82 | == Indexed - Add ==
| idx = 2
| -> :method: GET
86 | == Indexed - Add ==
| idx = 6
| -> :scheme: http
84 | == Indexed - Add ==
| idx = 4
| -> :path: /
be | == Indexed - Add ==
| idx = 62
| -> :authority:
| www.example.com
58 | == Literal indexed ==
| Indexed name (idx = 24)
| cache-control
86 | Literal value (len = 6)
| Huffman encoded:
a8eb 1064 9cbf | ...d..
| Decoded:
| no-cache
| -> cache-control: no-cache
)");
EXPECT_TRUE(DecodeBlock(hpack_block));
ASSERT_THAT(header_entries_,
ElementsAreArray({
HpackHeaderEntry{":method", "GET"},
HpackHeaderEntry{":scheme", "http"},
HpackHeaderEntry{":path", "/"},
HpackHeaderEntry{":authority", "www.example.com"},
HpackHeaderEntry{"cache-control", "no-cache"},
}));
ASSERT_TRUE(VerifyDynamicTableContents(
{{"cache-control", "no-cache"}, {":authority", "www.example.com"}}));
ASSERT_EQ(110u, current_header_table_size());
hpack_block = HpackExampleToStringOrDie(R"(
82 | == Indexed - Add ==
| idx = 2
| -> :method: GET
87 | == Indexed - Add ==
| idx = 7
| -> :scheme: https
85 | == Indexed - Add ==
| idx = 5
| -> :path: /index.html
bf | == Indexed - Add ==
| idx = 63
| -> :authority:
| www.example.com
40 | == Literal indexed ==
88 | Literal name (len = 8)
| Huffman encoded:
25a8 49e9 5ba9 7d7f | %.I.[.}.
| Decoded:
| custom-key
89 | Literal value (len = 9)
| Huffman encoded:
25a8 49e9 5bb8 e8b4 bf | %.I.[....
| Decoded:
| custom-value
| -> custom-key:
| custom-value
)");
EXPECT_TRUE(DecodeBlock(hpack_block));
ASSERT_THAT(header_entries_,
ElementsAreArray({
HpackHeaderEntry{":method", "GET"},
HpackHeaderEntry{":scheme", "https"},
HpackHeaderEntry{":path", "/index.html"},
HpackHeaderEntry{":authority", "www.example.com"},
HpackHeaderEntry{"custom-key", "custom-value"},
}));
ASSERT_TRUE(VerifyDynamicTableContents({{"custom-key", "custom-value"},
{"cache-control", "no-cache"},
{":authority", "www.example.com"}}));
ASSERT_EQ(164u, current_header_table_size());
}
TEST_P(HpackDecoderTest, C5_ResponseExamples) {
set_header_table_size_limit(256);
std::string hpack_block = HpackExampleToStringOrDie(R"(
48 | == Literal indexed ==
| Indexed name (idx = 8)
| :status
03 | Literal value (len = 3)
3330 32 | 302
| -> :status: 302
58 | == Literal indexed ==
| Indexed name (idx = 24)
| cache-control
07 | Literal value (len = 7)
7072 6976 6174 65 | private
| -> cache-control: private
61 | == Literal indexed ==
| Indexed name (idx = 33)
| date
1d | Literal value (len = 29)
4d6f 6e2c 2032 3120 4f63 7420 3230 3133 | Mon, 21 Oct 2013
2032 303a 3133 3a32 3120 474d 54 | 20:13:21 GMT
| -> date: Mon, 21 Oct 2013
| 20:13:21 GMT
6e | == Literal indexed ==
| Indexed name (idx = 46)
| location
17 | Literal value (len = 23)
6874 7470 733a 2f2f 7777 772e 6578 616d | https:
706c 652e 636f 6d | ple.com
| -> location:
| https:
)");
EXPECT_TRUE(DecodeBlock(hpack_block));
ASSERT_THAT(header_entries_,
ElementsAreArray({
HpackHeaderEntry{":status", "302"},
HpackHeaderEntry{"cache-control", "private"},
HpackHeaderEntry{"date", "Mon, 21 Oct 2013 20:13:21 GMT"},
HpackHeaderEntry{"location", "https:
}));
ASSERT_TRUE(
VerifyDynamicTableContents({{"location", "https:
{"date", "Mon, 21 Oct 2013 20:13:21 GMT"},
{"cache-control", "private"},
{":status", "302"}}));
ASSERT_EQ(222u, current_header_table_size());
hpack_block = HpackExampleToStringOrDie(R"(
48 | == Literal indexed ==
| Indexed name (idx = 8)
| :status
03 | Literal value (len = 3)
3330 37 | 307
| - evict: :status: 302
| -> :status: 307
c1 | == Indexed - Add ==
| idx = 65
| -> cache-control: private
c0 | == Indexed - Add ==
| idx = 64
| -> date: Mon, 21 Oct 2013
| 20:13:21 GMT
bf | == Indexed - Add ==
| idx = 63
| -> location:
| https:
)");
EXPECT_TRUE(DecodeBlock(hpack_block));
ASSERT_THAT(header_entries_,
ElementsAreArray({
HpackHeaderEntry{":status", "307"},
HpackHeaderEntry{"cache-control", "private"},
HpackHeaderEntry{"date", "Mon, 21 Oct 2013 20:13:21 GMT"},
HpackHeaderEntry{"location", "https:
}));
ASSERT_TRUE(
VerifyDynamicTableContents({{":status", "307"},
{"location", "https:
{"date", "Mon, 21 Oct 2013 20:13:21 GMT"},
{"cache-control", "private"}}));
ASSERT_EQ(222u, current_header_table_size());
hpack_block = HpackExampleToStringOrDie(R"(
88 | == Indexed - Add ==
| idx = 8
| -> :status: 200
c1 | == Indexed - Add ==
| idx = 65
| -> cache-control: private
61 | == Literal indexed ==
| Indexed name (idx = 33)
| date
1d | Literal value (len = 29)
4d6f 6e2c 2032 3120 4f63 7420 3230 3133 | Mon, 21 Oct 2013
2032 303a 3133 3a32 3220 474d 54 | 20:13:22 GMT
| - evict: cache-control:
| private
| -> date: Mon, 21 Oct 2013
| 20:13:22 GMT
c0 | == Indexed - Add ==
| idx = 64
| -> location:
| https:
5a | == Literal indexed ==
| Indexed name (idx = 26)
| content-encoding
04 | Literal value (len = 4)
677a 6970 | gzip
| - evict: date: Mon, 21 Oct
| 2013 20:13:21 GMT
| -> content-encoding: gzip
77 | == Literal indexed ==
| Indexed name (idx = 55)
| set-cookie
38 | Literal value (len = 56)
666f 6f3d 4153 444a 4b48 514b 425a 584f | foo=ASDJKHQKBZXO
5157 454f 5049 5541 5851 5745 4f49 553b | QWEOPIUAXQWEOIU;
206d 6178 2d61 6765 3d33 3630 303b 2076 | max-age=3600; v
6572 7369 6f6e 3d31 | ersion=1
| - evict: location:
| https:
| - evict: :status: 307
| -> set-cookie: foo=ASDJKHQ
| KBZXOQWEOPIUAXQWEOIU; ma
| x-age=3600; version=1
)");
EXPECT_TRUE(DecodeBlock(hpack_block));
ASSERT_THAT(
header_entries_,
ElementsAreArray({
HpackHeaderEntry{":status", "200"},
HpackHeaderEntry{"cache-control", "private"},
HpackHeaderEntry{"date", "Mon, 21 Oct 2013 20:13:22 GMT"},
HpackHeaderEntry{"location", "https:
HpackHeaderEntry{"content-encoding", "gzip"},
HpackHeaderEntry{
"set-cookie",
"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"},
}));
ASSERT_TRUE(VerifyDynamicTableContents(
{{"set-cookie",
"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"},
{"content-encoding", "gzip"},
{"date", "Mon, 21 Oct 2013 20:13:22 GMT"}}));
ASSERT_EQ(215u, current_header_table_size());
}
TEST_P(HpackDecoderTest, C6_ResponseExamplesWithHuffmanEncoding) {
set_header_table_size_limit(256);
std::string hpack_block = HpackExampleToStringOrDie(R"(
48 | == Literal indexed ==
| Indexed name (idx = 8)
| :status
03 | Literal value (len = 3)
3330 32 | 302
| -> :status: 302
58 | == Literal indexed ==
| Indexed name (idx = 24)
| cache-control
07 | Literal value (len = 7)
7072 6976 6174 65 | private
| -> cache-control: private
61 | == Literal indexed ==
| Indexed name (idx = 33)
| date
1d | Literal value (len = 29)
4d6f 6e2c 2032 3120 4f63 7420 3230 3133 | Mon, 21 Oct 2013
2032 303a 3133 3a32 3120 474d 54 | 20:13:21 GMT
| -> date: Mon, 21 Oct 2013
| 20:13:21 GMT
6e | == Literal indexed ==
| Indexed name (idx = 46)
| location
17 | Literal value (len = 23)
6874 7470 733a 2f2f 7777 772e 6578 616d | https:
706c 652e 636f 6d | ple.com
| -> location:
| https:
)");
EXPECT_TRUE(DecodeBlock(hpack_block));
ASSERT_THAT(header_entries_,
ElementsAreArray({
HpackHeaderEntry{":status", "302"},
HpackHeaderEntry{"cache-control", "private"},
HpackHeaderEntry{"date", "Mon, 21 Oct 2013 20:13:21 GMT"},
HpackHeaderEntry{"location", "https:
}));
ASSERT_TRUE(
VerifyDynamicTableContents({{"location", "https:
{"date", "Mon, 21 Oct 2013 20:13:21 GMT"},
{"cache-control", "private"},
{":status", "302"}}));
ASSERT_EQ(222u, current_header_table_size());
hpack_block = HpackExampleToStringOrDie(R"(
48 | == Literal indexed ==
| Indexed name (idx = 8)
| :status
03 | Literal value (len = 3)
3330 37 | 307
| - evict: :status: 302
| -> :status: 307
c1 | == Indexed - Add ==
| idx = 65
| -> cache-control: private
c0 | == Indexed - Add ==
| idx = 64
| -> date: Mon, 21 Oct 2013
| 20:13:21 GMT
bf | == Indexed - Add ==
| idx = 63
| -> location:
| https:
)");
EXPECT_TRUE(DecodeBlock(hpack_block));
ASSERT_THAT(header_entries_,
ElementsAreArray({
HpackHeaderEntry{":status", "307"},
HpackHeaderEntry{"cache-control", "private"},
HpackHeaderEntry{"date", "Mon, 21 Oct 2013 20:13:21 GMT"},
HpackHeaderEntry{"location", "https:
}));
ASSERT_TRUE(
VerifyDynamicTableContents({{":status", "307"},
{"location", "https:
{"date", "Mon, 21 Oct 2013 20:13:21 GMT"},
{"cache-control", "private"}}));
ASSERT_EQ(222u, current_header_table_size());
hpack_block = HpackExampleToStringOrDie(R"(
88 | == Indexed - Add ==
| idx = 8
| -> :status: 200
c1 | == Indexed - Add ==
| idx = 65
| -> cache-control: private
61 | == Literal indexed ==
| Indexed name (idx = 33)
| date
1d | Literal value (len = 29)
4d6f 6e2c 2032 3120 4f63 7420 3230 3133 | Mon, 21 Oct 2013
2032 303a 3133 3a32 3220 474d 54 | 20:13:22 GMT
| - evict: cache-control:
| private
| -> date: Mon, 21 Oct 2013
| 20:13:22 GMT
c0 | == Indexed - Add ==
| idx = 64
| -> location:
| https:
5a | == Literal indexed ==
| Indexed name (idx = 26)
| content-encoding
04 | Literal value (len = 4)
677a 6970 | gzip
| - evict: date: Mon, 21 Oct
| 2013 20:13:21 GMT
| -> content-encoding: gzip
77 | == Literal indexed ==
| Indexed name (idx = 55)
| set-cookie
38 | Literal value (len = 56)
666f 6f3d 4153 444a 4b48 514b 425a 584f | foo=ASDJKHQKBZXO
5157 454f 5049 5541 5851 5745 4f49 553b | QWEOPIUAXQWEOIU;
206d 6178 2d61 6765 3d33 3630 303b 2076 | max-age=3600; v
6572 7369 6f6e 3d31 | ersion=1
| - evict: location:
| https:
| - evict: :status: 307
| -> set-cookie: foo=ASDJKHQ
| KBZXOQWEOPIUAXQWEOIU; ma
| x-age=3600; version=1
)");
EXPECT_TRUE(DecodeBlock(hpack_block));
ASSERT_THAT(
header_entries_,
ElementsAreArray({
HpackHeaderEntry{":status", "200"},
HpackHeaderEntry{"cache-control", "private"},
HpackHeaderEntry{"date", "Mon, 21 Oct 2013 20:13:22 GMT"},
HpackHeaderEntry{"location", "https:
HpackHeaderEntry{"content-encoding", "gzip"},
HpackHeaderEntry{
"set-cookie",
"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"},
}));
ASSERT_TRUE(VerifyDynamicTableContents(
{{"set-cookie",
"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"},
{"content-encoding", "gzip"},
{"date", "Mon, 21 Oct 2013 20:13:22 GMT"}}));
ASSERT_EQ(215u, current_header_table_size());
}
TEST_P(HpackDecoderTest, ProcessesOptionalTableSizeUpdates) {
EXPECT_EQ(Http2SettingsInfo::DefaultHeaderTableSize(),
header_table_size_limit());
{
HpackBlockBuilder hbb;
hbb.AppendDynamicTableSizeUpdate(3000);
EXPECT_TRUE(DecodeBlock(hbb.buffer()));
EXPECT_EQ(3000u, header_table_size_limit());
EXPECT_EQ(0u, current_header_table_size());
EXPECT_TRUE(header_entries_.empty());
}
{
HpackBlockBuilder hbb;
hbb.AppendDynamicTableSizeUpdate(2000);
hbb.AppendDynamicTableSizeUpdate(2500);
EXPECT_TRUE(DecodeBlock(hbb.buffer()));
EXPECT_EQ(2500u, header_table_size_limit());
EXPECT_EQ(0u, current_header_table_size());
EXPECT_TRUE(header_entries_.empty());
}
{
HpackBlockBuilder hbb;
hbb.AppendDynamicTableSizeUpdate(1500);
hbb.AppendDynamicTableSizeUpdate(1000);
hbb.AppendDynamicTableSizeUpdate(500);
EXPECT_FALSE(DecodeBlock(hbb.buffer()));
EXPECT_EQ(HpackDecodingError::kDynamicTableSizeUpdateNotAllowed,
decoder_.error());
EXPECT_EQ(1u, error_messages_.size());
EXPECT_THAT(error_messages_[0],
Eq("Dynamic table size update not allowed"));
EXPECT_EQ(1000u, header_table_size_limit());
EXPECT_EQ(0u, current_header_table_size());
EXPECT_TRUE(header_entries_.empty());
}
DecodeBuffer db("\x80");
EXPECT_FALSE(decoder_.DecodeFragment(&db));
EXPECT_EQ(0u, db.Offset());
EXPECT_EQ(1u, error_messages_.size());
}
TEST_P(HpackDecoderTest, ProcessesRequiredTableSizeUpdate) {
EXPECT_EQ(4096u, decoder_.GetCurrentHeaderTableSizeSetting());
decoder_.ApplyHeaderTableSizeSetting(1024);
decoder_.ApplyHeaderTableSizeSetting(2048);
EXPECT_EQ(Http2SettingsInfo::DefaultHeaderTableSize(),
header_table_size_limit());
EXPECT_EQ(2048u, decoder_.GetCurrentHeaderTableSizeSetting());
{
HpackBlockBuilder hbb;
hbb.AppendDynamicTableSizeUpdate(1024);
hbb.AppendIndexedHeader(4);
EXPECT_TRUE(DecodeBlock(hbb.buffer()));
EXPECT_THAT(header_entries_,
ElementsAreArray({HpackHeaderEntry{":path", "/"}}));
EXPECT_EQ(1024u, header_table_size_limit());
EXPECT_EQ(0u, current_header_table_size());
}
decoder_.ApplyHeaderTableSizeSetting(1000);
decoder_.ApplyHeaderTableSizeSetting(1500);
EXPECT_EQ(1500u, decoder_.GetCurrentHeaderTableSizeSetting());
{
HpackBlockBuilder hbb;
hbb.AppendDynamicTableSizeUpdate(500);
hbb.AppendDynamicTableSizeUpdate(1250);
hbb.AppendIndexedHeader(5);
EXPECT_TRUE(DecodeBlock(hbb.buffer()));
EXPECT_THAT(header_entries_,
ElementsAreArray({HpackHeaderEntry{":path", "/index.html"}}));
EXPECT_EQ(1250u, header_table_size_limit());
EXPECT_EQ(0u, current_header_table_size());
}
decoder_.ApplyHeaderTableSizeSetting(500);
decoder_.ApplyHeaderTableSizeSetting(1000);
EXPECT_EQ(1000u, decoder_.GetCurrentHeaderTableSizeSetting());
{
HpackBlockBuilder hbb;
hbb.AppendDynamicTableSizeUpdate(200);
hbb.AppendDynamicTableSizeUpdate(700);
hbb.AppendDynamicTableSizeUpdate(900);
hbb.AppendIndexedHeader(5);
EXPECT_FALSE(DecodeBlock(hbb.buffer()));
EXPECT_FALSE(saw_end_);
EXPECT_EQ(HpackDecodingError::kDynamicTableSizeUpdateNotAllowed,
decoder_.error());
EXPECT_EQ(1u, error_messages_.size());
EXPECT_THAT(error_messages_[0],
Eq("Dynamic table size update not allowed"));
EXPECT_EQ(700u, header_table_size_limit());
EXPECT_EQ(0u, current_header_table_size());
EXPECT_TRUE(header_entries_.empty());
}
EXPECT_EQ(1000u, decoder_.GetCurrentHeaderTableSizeSetting());
EXPECT_FALSE(decoder_.StartDecodingBlock());
}
TEST_P(HpackDecoderTest, InvalidRequiredSizeUpdate) {
decoder_.ApplyHeaderTableSizeSetting(1);
decoder_.ApplyHeaderTableSizeSetting(1024);
HpackBlockBuilder hbb;
hbb.AppendDynamicTableSizeUpdate(2);
EXPECT_TRUE(decoder_.StartDecodingBlock());
DecodeBuffer db(hbb.buffer());
EXPECT_FALSE(decoder_.DecodeFragment(&db));
EXPECT_FALSE(saw_end_);
EXPECT_EQ(
HpackDecodingError::kInitialDynamicTableSizeUpdateIsAboveLowWaterMark,
decoder_.error());
EXPECT_EQ(1u, error_messages_.size());
EXPECT_THAT(error_messages_[0],
Eq("Initial dynamic table size update is above low water mark"));
EXPECT_EQ(Http2SettingsInfo::DefaultHeaderTableSize(),
header_table_size_limit());
}
TEST_P(HpackDecoderTest, RequiredTableSizeChangeBeforeEnd) {
decoder_.ApplyHeaderTableSizeSetting(1024);
EXPECT_FALSE(DecodeBlock(""));
EXPECT_EQ(HpackDecodingError::kMissingDynamicTableSizeUpdate,
decoder_.error());
EXPECT_EQ(1u, error_messages_.size());
EXPECT_THAT(error_messages_[0], Eq("Missing dynamic table size update"));
EXPECT_FALSE(saw_end_);
}
TEST_P(HpackDecoderTest, RequiredTableSizeChangeBeforeIndexedHeader) {
decoder_.ApplyHeaderTableSizeSetting(1024);
HpackBlockBuilder hbb;
hbb.AppendIndexedHeader(1);
EXPECT_FALSE(DecodeBlock(hbb.buffer()));
EXPECT_EQ(HpackDecodingError::kMissingDynamicTableSizeUpdate,
decoder_.error());
EXPECT_EQ(1u, error_messages_.size());
EXPECT_THAT(error_messages_[0], Eq("Missing dynamic table size update"));
EXPECT_FALSE(saw_end_);
EXPECT_TRUE(header_entries_.empty());
}
TEST_P(HpackDecoderTest, RequiredTableSizeChangeBeforeIndexedHeaderName) {
decoder_.ApplyHeaderTableSizeSetting(1024);
HpackBlockBuilder hbb;
hbb.AppendNameIndexAndLiteralValue(HpackEntryType::kIndexedLiteralHeader, 2,
false, "PUT");
EXPECT_FALSE(DecodeBlock(hbb.buffer()));
EXPECT_EQ(HpackDecodingError::kMissingDynamicTableSizeUpdate,
decoder_.error());
EXPECT_EQ(1u, error_messages_.size());
EXPECT_THAT(error_messages_[0], Eq("Missing dynamic table size update"));
EXPECT_FALSE(saw_end_);
EXPECT_TRUE(header_entries_.empty());
}
TEST_P(HpackDecoderTest, RequiredTableSizeChangeBeforeLiteralName) {
decoder_.ApplyHeaderTableSizeSetting(1024);
HpackBlockBuilder hbb;
hbb.AppendLiteralNameAndValue(HpackEntryType::kNeverIndexedLiteralHeader,
false, "name", false, "some data.");
EXPECT_FALSE(DecodeBlock(hbb.buffer()));
EXPECT_EQ(HpackDecodingError::kMissingDynamicTableSizeUpdate,
decoder_.error());
EXPECT_EQ(1u, error_messages_.size());
EXPECT_THAT(error_messages_[0], Eq("Missing dynamic table size update"));
EXPECT_FALSE(saw_end_);
EXPECT_TRUE(header_entries_.empty());
}
TEST_P(HpackDecoderTest, InvalidIndexedHeaderVarint) {
EXPECT_TRUE(decoder_.StartDecodingBlock());
DecodeBuffer db("\xff\x80\x80\x80\x80\x80\x80\x80\x80\x80\x80\x00");
EXPECT_FALSE(decoder_.DecodeFragment(&db));
EXPECT_TRUE(decoder_.DetectError());
EXPECT_FALSE(saw_end_);
EXPECT_EQ(HpackDecodingError::kIndexVarintError, decoder_.error());
EXPECT_EQ(1u, error_messages_.size());
EXPECT_THAT(error_messages_[0],
Eq("Index varint beyond implementation limit"));
EXPECT_TRUE(header_entries_.empty());
EXPECT_FALSE(decoder_.EndDecodingBlock());
}
TEST_P(HpackDecoderTest, InvalidIndex) {
EXPECT_TRUE(decoder_.StartDecodingBlock());
DecodeBuffer db("\x80");
EXPECT_FALSE(decoder_.DecodeFragment(&db));
EXPECT_TRUE(decoder_.DetectError());
EXPECT_FALSE(saw_end_);
EXPECT_EQ(HpackDecodingError::kInvalidIndex, decoder_.error());
EXPECT_EQ(1u, error_messages_.size());
EXPECT_THAT(error_messages_[0],
Eq("Invalid index in indexed header field representation"));
EXPECT_TRUE(header_entries_.empty());
EXPECT_FALSE(decoder_.EndDecodingBlock());
}
TEST_P(HpackDecoderTest, TruncatedBlock) {
HpackBlockBuilder hbb;
hbb.AppendDynamicTableSizeUpdate(3000);
EXPECT_EQ(3u, hbb.size());
hbb.AppendDynamicTableSizeUpdate(4000);
EXPECT_EQ(6u, hbb.size());
EXPECT_TRUE(DecodeBlock(hbb.buffer()));
EXPECT_EQ(4000u, header_table_size_limit());
EXPECT_TRUE(DecodeBlock(hbb.buffer()));
EXPECT_EQ(4000u, header_table_size_limit());
EXPECT_FALSE(DecodeBlock(hbb.buffer().substr(0, hbb.size() - 1)));
EXPECT_FALSE(saw_end_);
EXPECT_EQ(HpackDecodingError::kTruncatedBlock, decoder_.error());
EXPECT_EQ(1u, error_messages_.size());
EXPECT_THAT(error_messages_[0],
Eq("Block ends in the middle of an instruction"));
EXPECT_EQ(3000u, header_table_size_limit());
EXPECT_EQ(0u, current_header_table_size());
EXPECT_TRUE(header_entries_.empty());
}
TEST_P(HpackDecoderTest, OversizeStringDetected) {
HpackBlockBuilder hbb;
hbb.AppendLiteralNameAndValue(HpackEntryType::kNeverIndexedLiteralHeader,
false, "name", false, "some data.");
hbb.AppendLiteralNameAndValue(HpackEntryType::kUnindexedLiteralHeader, false,
"name2", false, "longer data");
EXPECT_TRUE(DecodeBlock(hbb.buffer()));
EXPECT_THAT(header_entries_,
ElementsAreArray({HpackHeaderEntry{"name", "some data."},
HpackHeaderEntry{"name2", "longer data"}}));
decoder_.set_max_string_size_bytes(10);
EXPECT_FALSE(DecodeBlock(hbb.buffer()));
EXPECT_THAT(header_entries_,
ElementsAreArray({HpackHeaderEntry{"name", "some data."}}));
EXPECT_FALSE(saw_end_);
EXPECT_EQ(HpackDecodingError::kValueTooLong, decoder_.error());
EXPECT_EQ(1u, error_messages_.size());
EXPECT_THAT(error_messages_[0], Eq("Value length exceeds buffer limit"));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/decoder/hpack_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/decoder/hpack_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
52d6522f-ddf2-4de4-9a6c-6004ec920947 | cpp | tensorflow/tensorflow | input_slices | third_party/xla/xla/service/gpu/fusions/legacy/input_slices.cc | third_party/xla/xla/service/gpu/fusions/legacy/input_slices_test.cc | #include "xla/service/gpu/fusions/legacy/input_slices.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Value.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/elemental_ir_emitter.h"
#include "xla/service/gpu/elemental_ir_emitter.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/parallel_loop_emitter.h"
#include "xla/service/llvm_ir/fused_ir_emitter.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/kernel_support_library.h"
#include "xla/service/llvm_ir/llvm_loop.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
absl::Status EmitElementForInputFusibleSlices(
ElementalIrEmitter& elemental_emitter,
const HloComputation* fused_computation,
const std::vector<llvm_ir::IrArray>& inputs,
const std::vector<llvm_ir::IrArray>& outputs,
const llvm_ir::IrArray::Index& index, llvm::IRBuilder<>* builder) {
VLOG(10) << "Emitting slice input fusion for "
<< fused_computation->ToString();
HloInstruction* slice_or_tuple = fused_computation->root_instruction();
auto slice_instructions = [&]() -> absl::Span<HloInstruction* const> {
if (slice_or_tuple->opcode() == HloOpcode::kSlice) {
return absl::Span<HloInstruction* const>(&slice_or_tuple, 1);
}
CHECK_EQ(slice_or_tuple->opcode(), HloOpcode::kTuple);
return slice_or_tuple->operands();
}();
std::vector<llvm::Value*> input_ir_values;
FusedIrEmitter fused_emitter(elemental_emitter);
for (int i = 0; i < fused_computation->num_parameters(); i++) {
fused_emitter.BindGenerator(
*fused_computation->parameter_instruction(i),
[&inputs, i, builder](llvm_ir::IrArray::Index index) {
return inputs[i].EmitReadArrayElement(index, builder);
});
}
for (const HloInstruction* slice : slice_instructions) {
auto input_generator = *fused_emitter.GetGenerator(*slice->operand(0));
input_ir_values.push_back(input_generator(index).value());
}
KernelSupportLibrary ksl(builder, llvm_ir::UnrollMode::kDefaultUnroll);
for (int64_t i = 0; i < slice_instructions.size(); ++i) {
HloInstruction* slice = slice_instructions[i];
std::vector<llvm::Value*> index_within_ranges;
for (size_t dim = 0; dim < slice->slice_starts().size(); ++dim) {
CHECK_EQ(slice->slice_strides(dim), 1);
auto larger_or_equal_than_start = builder->CreateICmpSGE(
index.multidim()[dim],
index.GetConstantWithIndexType(slice->slice_starts(dim)));
llvm::Value* smaller_than_limit = builder->CreateICmpSLT(
index.multidim()[dim],
index.GetConstantWithIndexType(slice->slice_limits(dim)));
llvm::Value* within_range =
builder->CreateAnd(larger_or_equal_than_start, smaller_than_limit);
index_within_ranges.push_back(within_range);
}
llvm::Value* guarding_cond = builder->CreateAnd(index_within_ranges);
auto emit_slice_elem_func = [&] {
const std::vector<llvm::Value*>& src_multidim = index.multidim();
std::vector<llvm::Value*> dst_multidim(src_multidim.size());
for (size_t dim = 0; dim < src_multidim.size(); ++dim) {
dst_multidim[dim] = builder->CreateSub(
src_multidim[dim],
index.GetConstantWithIndexType(slice->slice_starts(dim)));
}
const llvm_ir::IrArray& src_ir_array = outputs[i];
llvm_ir::IrArray::Index slice_dst_index(dst_multidim, slice->shape(),
index.GetType());
src_ir_array.EmitWriteArrayElement(slice_dst_index, input_ir_values[i],
builder);
};
ksl.If(absl::StrCat("slice", i), guarding_cond, emit_slice_elem_func);
}
return absl::OkStatus();
}
absl::StatusOr<Shape> GetConsistentInputShapeForRootSlices(
const HloComputation* fused_computation) {
const HloInstruction& root = *fused_computation->root_instruction();
if (root.opcode() == HloOpcode::kSlice) {
return root.operands()[0]->shape();
}
CHECK_EQ(root.opcode(), HloOpcode::kTuple);
const Shape& first_slice_operand_shape =
root.operands()[0]->operands()[0]->shape();
for (size_t i = 1; i < root.operands().size(); ++i) {
const HloInstruction* slice = root.operands()[i];
const Shape& operand_shape = slice->operands()[0]->shape();
if (!ShapeUtil::EqualIgnoringElementType(first_slice_operand_shape,
operand_shape)) {
return FailedPrecondition(
"Fused slices do not have the same input shape, fused computation = "
"%s.",
root.parent()->name());
}
}
return first_slice_operand_shape;
}
}
LaunchDimensions InputSlicesFusion::launch_dimensions() const {
const auto& root = analysis_.fusion_root(0).instruction();
const auto& shape = root.operand(0)->shape();
return CalculateLaunchDimensions(shape, analysis_.device_info(),
{unroll_factor_});
}
std::optional<IndexingMap> InputSlicesFusion::ComputeThreadIdToOutputIndexing(
int64_t output_id, mlir::MLIRContext* ctx) const {
auto launch_dims = launch_dimensions();
const auto& shape = analysis_.fusion_root(output_id).shape();
return GetDefaultThreadIdIndexingMap(launch_dims, unroll_factor_, shape, ctx);
}
absl::Status InputSlicesFusion::EmitKernel(
IrEmitterContext& ir_emitter_context, const HloFusionInstruction& fusion,
const LaunchDimensions& launch_dims, std::vector<llvm_ir::IrArray> inputs,
std::vector<llvm_ir::IrArray> outputs, llvm::IRBuilder<>* builder) const {
TF_ASSIGN_OR_RETURN(Shape element_shape,
GetConsistentInputShapeForRootSlices(
fusion.fused_instructions_computation()));
LaunchDimensionsConfig launch_config;
launch_config.unroll_factor = unroll_factor_;
GpuElementalIrEmitter elemental_emitter(ir_emitter_context, builder);
return ParallelLoopEmitter(
[&](const llvm_ir::IrArray::Index index) -> absl::Status {
return EmitElementForInputFusibleSlices(
elemental_emitter, fusion.fused_instructions_computation(),
inputs, outputs, index, builder);
},
element_shape, launch_dims, builder, launch_config)
.EmitLoop(
fusion.name(),
GetIndexTypeForKernel(&fusion, launch_dims.launch_bound(), builder));
}
}
} | #include "xla/service/gpu/fusions/legacy/input_slices.h"
#include <optional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/IR/MLIRContext.h"
#include "xla/service/gpu/fusions/fusions.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/indexing_map_serialization.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
class InputSlicesTest : public HloTestBase {
protected:
DebugOptions GetDebugOptionsForTest() override {
auto opts = HloTestBase::GetDebugOptionsForTest();
opts.set_xla_gpu_mlir_emitter_level(0);
return opts;
}
mlir::MLIRContext mlir_context_;
};
TEST_F(InputSlicesTest, ThreadIndexing) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
fused_computation {
%input = f32[2,3,5,7]{2,1,0,3} parameter(0)
slice0 = f32[1,2,3,5]{2,1,0,3} slice(input), slice={[0:1],[1:3],[0:3],[2:7]}
slice1 = f32[1,2,3,5]{2,1,0,3} slice(input), slice={[0:1],[0:2],[0:3],[2:7]}
ROOT tuple = (f32[1,2,3,5]{2,1,0,3}, f32[1,2,3,5]{2,1,0,3}) tuple(slice0, slice1)
}
ENTRY entry {
%input = f32[2,3,5,7]{2,1,0,3} parameter(0)
ROOT %fusion = (f32[1,2,3,5]{2,1,0,3}, f32[1,2,3,5]{2,1,0,3}) fusion(%input), kind=kLoop, calls=fused_computation
})")
.value();
stream_executor::DeviceDescription device_info =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis_fused = HloFusionAnalysis::Create(*root, device_info);
auto emitter =
GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis_fused});
auto fusion = dynamic_cast<InputSlicesFusion*>(emitter.get());
ASSERT_NE(fusion, nullptr);
auto thread_id_to_output_indexing =
fusion->ComputeThreadIdToOutputIndexing(0, &mlir_context_);
EXPECT_THAT(ToString(*thread_id_to_output_indexing,
{"th_x", "th_y", "th_z", "bl_x", "bl_y", "bl_z"},
{"chunk_id", "unroll_id"}, {}),
MatchIndexingString(R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (0,
((bl_x * 128 + th_x) floordiv 3) mod 2,
(bl_x * 128 + th_x) mod 3,
(bl_x * 128 + th_x) floordiv 6),
domain:
th_x in [0, 127],
th_y in [0, 0],
th_z in [0, 0],
bl_x in [0, 1],
bl_y in [0, 0],
bl_z in [0, 0],
chunk_id in [0, 0],
unroll_id in [0, 0],
bl_x * 128 + th_x in [0, 29]
)"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/legacy/input_slices.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/legacy/input_slices_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d2b37d13-4f6d-45ff-bfc3-4017594a9cec | cpp | tensorflow/tensorflow | gemm_fusion_autotuner | third_party/xla/xla/service/gpu/autotuning/gemm_fusion_autotuner.cc | third_party/xla/xla/service/gpu/autotuning/gemm_fusion_autotuner_test.cc | #include "xla/service/gpu/autotuning/gemm_fusion_autotuner.h"
#include <algorithm>
#include <array>
#include <atomic>
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "third_party/gpus/cuda/include/cublas_v2.h"
#include "xla/autotune_results.pb.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/primitive_util.h"
#include "xla/service/algorithm_util.h"
#include "xla/service/call_inliner.h"
#include "xla/service/dump.h"
#include "xla/service/executable.h"
#include "xla/service/float_normalization.h"
#include "xla/service/gpu/autotuning/autotuner_compile_util.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/buffer_comparator.h"
#include "xla/service/gpu/gpu_float_support.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/kernels/custom_kernel_fusion.h"
#include "xla/service/gpu/kernels/custom_kernel_fusion_pattern.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/split_k_gemm_rewriter.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/gpu/transforms/cudnn_fusion_compiler.h"
#include "xla/service/gpu/transforms/custom_kernel_fusion_rewriter.h"
#include "xla/service/gpu/transforms/fusion_wrapper.h"
#include "xla/service/gpu/transforms/gemm_rewriter.h"
#include "xla/service/gpu/transforms/priority_fusion.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/shaped_buffer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/tools/hlo_decomposer.h"
#include "xla/tsl/lib/core/bits.h"
#include "xla/tsl/util/proto/proto_utils.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/scoped_annotation.h"
namespace xla {
namespace gpu {
using BackendConfig = GemmFusionAutotunerImpl::BackendConfig;
using BackendConfigs = GemmFusionAutotunerImpl::BackendConfigs;
using ProfilingOutput = AutotunerCompileUtil::ProfilingOutput;
namespace {
constexpr int kMinTileSize = 16;
constexpr TritonGemmConfig kDefaultGemmTiling = {32, 32, 32, 1, 1, 4};
constexpr int kMaxWavesForSplitK = 5;
constexpr std::array<int, 6> kBlockSizes = {16, 32, 64, 128, 256, 512};
constexpr std::array<int, 4> kNumStages = {1, 2, 3, 4};
constexpr std::array<int, 4> kNumWarps = {2, 4, 8, 16};
constexpr std::array<int, 5> kSplitK = {1, 2, 4, 8, 16};
constexpr std::array<int, 5> kNumCtas = {1, 2, 4, 8, 16};
using AutoTuneCacheKeyCount = absl::flat_hash_map<AutotuneCacheKey, uint64_t>;
class GemmConfigSetCollector : public ConstDfsHloVisitorWithDefault {
public:
explicit GemmConfigSetCollector(GemmFusionAutotunerImpl* impl)
: impl_(impl) {}
absl::StatusOr<BackendConfigs> CollectGemmConfigSets(
const HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads = {}) {
error_out_on_cache_miss_ =
module->config()
.debug_options()
.xla_gpu_require_complete_aot_autotune_results();
gemm_config_sets_.clear();
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_RETURN_IF_ERROR(computation->Accept(this));
}
return std::move(gemm_config_sets_);
}
AutoTuneCacheKeyCount GetFusionsCount() {
return std::move(fusion_count_map_);
}
absl::Status HandleFusion(const HloInstruction* hlo) override {
const HloFusionInstruction* fusion = Cast<HloFusionInstruction>(hlo);
TF_ASSIGN_OR_RETURN(auto gpu_config,
hlo->backend_config<GpuBackendConfig>());
const FusionBackendConfig& backend_config =
gpu_config.fusion_backend_config();
AutotuneCacheKey key = AutotunerUtil::GetKey(hlo, impl_->GetConfig());
auto [iterator, inserted] = fusion_count_map_.insert({key, 1});
if (!inserted) {
++(iterator->second);
}
TF_ASSIGN_OR_RETURN(bool is_in_cache,
AutotunerUtil::IsInCache(key, impl_->GetConfig()));
if (is_in_cache || handled_fusions_.contains(key)) {
return absl::OkStatus();
}
bool missing_config = (backend_config.kind() == kTritonGemmFusionKind &&
!backend_config.has_triton_gemm_config()) ||
(backend_config.kind() == kCuDnnFusionKind &&
!backend_config.has_cudnn_fusion_config()) ||
(backend_config.kind() == kCustomFusionKind &&
!backend_config.has_custom_fusion_config());
if (missing_config) {
if (error_out_on_cache_miss_) {
return absl::NotFoundError(absl::StrCat(
"Complete autotuning results are required, but no cache result "
"found for key: ",
key.ToString()));
}
TF_ASSIGN_OR_RETURN(std::vector<BackendConfig> configs,
impl_->GenerateConfigs(*fusion));
gemm_config_sets_.push_back({fusion, std::move(configs)});
}
handled_fusions_.insert(key);
return absl::OkStatus();
}
absl::Status DefaultAction(const HloInstruction* hlo) override {
return absl::OkStatus();
}
private:
bool error_out_on_cache_miss_;
GemmFusionAutotunerImpl* impl_;
BackendConfigs gemm_config_sets_;
AutoTuneCacheKeyCount fusion_count_map_;
absl::flat_hash_set<AutotuneCacheKey> handled_fusions_;
};
struct TileSizeLimit {
int block_m = 0;
int block_n = 0;
int block_k = 0;
};
absl::StatusOr<TileSizeLimit> GetLimits(const HloDotInstruction& dot) {
TF_ASSIGN_OR_RETURN(int64_t non_contracting_index_lhs,
NonContractingDimensionIndex(dot, 0));
TF_ASSIGN_OR_RETURN(int64_t non_contracting_index_rhs,
NonContractingDimensionIndex(dot, 1));
TF_ASSIGN_OR_RETURN(int64_t contracting_index,
ContractingDimensionIndex(dot, 1));
const int max_m = tsl::NextPowerOfTwoS64(
dot.operand(0)->shape().dimensions(non_contracting_index_lhs));
const int max_n = tsl::NextPowerOfTwoS64(
dot.operand(1)->shape().dimensions(non_contracting_index_rhs));
const int max_k = tsl::NextPowerOfTwoS64(
dot.operand(1)->shape().dimensions(contracting_index));
return TileSizeLimit{
std::max(max_m, kMinTileSize),
std::max(max_n, kMinTileSize),
std::max(max_k, kMinTileSize),
};
}
int GetLogEveryN() { return VLOG_IS_ON(3) ? 100 : 1000; }
int64_t PriorityFusionShapeSize(const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
HloCostAnalysis::Options PriorityFusionOptions() {
return {PriorityFusionShapeSize,
{},
{},
true};
}
absl::StatusOr<std::unique_ptr<HloModule>> TritonGemmAutotuneExtractor(
const TritonGemmConfig& config,
const se::DeviceDescription& gpu_device_info,
const HloFusionInstruction* fusion, DebugOptions debug_opts,
bool allow_filtering_kernels_spilling_registers) {
std::unique_ptr<HloModule> new_module =
ExtractInstructionIntoNewModule(*fusion);
if (!allow_filtering_kernels_spilling_registers) {
debug_opts.set_xla_gpu_filter_kernels_spilling_registers_on_autotuning(
false);
}
new_module->mutable_config().set_debug_options(debug_opts);
HloComputation* entry_computation = new_module->entry_computation();
HloInstruction* cloned_dot_fusion = entry_computation->root_instruction();
TF_ASSIGN_OR_RETURN(auto gpu_config,
cloned_dot_fusion->backend_config<GpuBackendConfig>());
FusionBackendConfig& backend_config =
*gpu_config.mutable_fusion_backend_config();
*backend_config.mutable_triton_gemm_config() = config.ToProto();
TF_RETURN_IF_ERROR(cloned_dot_fusion->set_backend_config(gpu_config));
if (config.split_k > 1) {
TF_RETURN_IF_ERROR(MakeDotSplitKBatch(cloned_dot_fusion, config));
for (PrimitiveType type :
{BF16, F8E5M2, F8E4M3FN, F8E4M3B11FNUZ, F8E5M2FNUZ, F8E4M3FNUZ}) {
GpuFloatSupport float_support(gpu_device_info.cuda_compute_capability(),
type);
FloatNormalization float_normalization(&float_support);
TF_RETURN_IF_ERROR(float_normalization.Run(new_module.get()).status());
}
PriorityFusion priority_fusion(
nullptr, gpu_device_info, PriorityFusionOptions());
TF_RETURN_IF_ERROR(priority_fusion.Run(new_module.get()).status());
FusionWrapper fusion_wrapper;
TF_RETURN_IF_ERROR(fusion_wrapper.Run(new_module.get()).status());
}
return new_module;
}
absl::StatusOr<std::unique_ptr<HloModule>> CublasGemmAutotuneExtractor(
const AutotuneConfig& config, const se::DeviceDescription& gpu_device_info,
const se::SemanticVersion& toolkit_version,
const HloFusionInstruction* fusion, const DebugOptions& debug_opts) {
const HloComputation* fusion_computation =
fusion->called_computations().at(0);
std::unique_ptr<HloModule> new_module =
ExtractComputationIntoNewModule(*fusion_computation);
new_module->mutable_config().set_debug_options(debug_opts);
auto* dot = hlo_query::GetFirstInstructionWithOpcode(
*new_module->entry_computation(), HloOpcode::kDot);
if (dot->precision_config().algorithm() ==
PrecisionConfig::ALG_DOT_BF16_BF16_F32_X3 ||
dot->precision_config().algorithm() ==
PrecisionConfig::ALG_DOT_BF16_BF16_F32_X6 ||
dot->precision_config().algorithm() ==
PrecisionConfig::ALG_DOT_TF32_TF32_F32_X3) {
dot->mutable_precision_config()->set_algorithm(
PrecisionConfig::ALG_DOT_F32_F32_F32);
}
for (GemmRewriterOptions::DType dtype :
{GemmRewriterOptions::DType::kFp8Only,
GemmRewriterOptions::DType::kNonFp8Only}) {
GemmRewriter rewriter(config.GetGpuComputeCapability(), toolkit_version,
GemmRewriterOptions{dtype});
PriorityFusion fusion_pass(
nullptr, gpu_device_info, PriorityFusionOptions());
TF_RETURN_IF_ERROR(rewriter.Run(new_module.get()).status());
TF_RETURN_IF_ERROR(fusion_pass.Run(new_module.get()).status());
}
return new_module;
}
absl::Status UpdateFusionInstructionKernelIndex(
HloInstruction* fusion_instruction, int kernel_index) {
GpuBackendConfig gpu_config =
fusion_instruction->backend_config<GpuBackendConfig>().value();
gpu_config.mutable_fusion_backend_config()
->mutable_custom_fusion_config()
->set_kernel_index(kernel_index);
TF_RETURN_IF_ERROR(fusion_instruction->set_backend_config(gpu_config));
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<HloModule>> CustomFusionKernelAutotuneExtractor(
const GemmFusionAutotunerImpl::CustomKernelFusionConfig& cutlass_config,
const AutotuneConfig& config, const se::SemanticVersion& toolkit_version,
const HloFusionInstruction* fusion, const DebugOptions& debug_opts) {
const HloComputation* fusion_computation = fusion->called_computation();
std::unique_ptr<HloModule> new_module =
ExtractComputationIntoNewModule(*fusion_computation);
new_module->mutable_config().set_debug_options(debug_opts);
CustomKernelFusionRewriter rewriter(
&config.GetExecutor()->GetDeviceDescription());
PriorityFusion fusion_pass(
nullptr, config.GetExecutor()->GetDeviceDescription(),
PriorityFusionOptions());
TF_RETURN_IF_ERROR(rewriter.Run(new_module.get()).status());
TF_RETURN_IF_ERROR(fusion_pass.Run(new_module.get()).status());
HloInstruction* custom_kernel_fusion =
hlo_query::GetFirstInstructionWithOpcode(*new_module->entry_computation(),
HloOpcode::kFusion);
int64_t kernel_index = cutlass_config.kernel_index;
TF_RETURN_IF_ERROR(
UpdateFusionInstructionKernelIndex(custom_kernel_fusion, kernel_index));
return new_module;
}
absl::StatusOr<std::unique_ptr<HloModule>> FusionExtractor(
const HloFusionInstruction& fusion, const DebugOptions& debug_opts) {
std::unique_ptr<HloModule> module = ExtractInstructionIntoNewModule(fusion);
module->mutable_config().set_debug_options(debug_opts);
return module;
}
absl::StatusOr<std::unique_ptr<HloModule>> CuDnnFusionExtractor(
const HloFusionInstruction& fusion, const DebugOptions& debug_opts,
const int plan_id) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
FusionExtractor(fusion, debug_opts));
GpuBackendConfig gpu_config;
FusionBackendConfig& backend_config =
*gpu_config.mutable_fusion_backend_config();
backend_config.set_kind(std::string(kCuDnnFusionKind));
backend_config.mutable_cudnn_fusion_config()->set_plan_id(plan_id);
TF_RETURN_IF_ERROR(
module->entry_computation()->root_instruction()->set_backend_config(
gpu_config));
return module;
}
bool IsFusionKind(const HloInstruction& hlo, absl::string_view kind) {
auto gpu_config = hlo.backend_config<GpuBackendConfig>();
if (!gpu_config.ok()) {
return false;
}
return gpu_config->fusion_backend_config().kind() == kind;
}
int GetCuDnnPlanCount(const HloInstruction& hlo,
const AutotuneConfig& autotune_config) {
if (auto gpu_config = hlo.backend_config<GpuBackendConfig>();
!gpu_config.ok() ||
gpu_config->fusion_backend_config().has_cudnn_fusion_config()) {
return {};
}
return CuDnnFusionCompiler::GetAvailablePlanCount(
*autotune_config.GetExecutor(), *DynCast<HloFusionInstruction>(&hlo));
}
AutotuneResult FromConfig(const BackendConfig& config) {
AutotuneResult res;
if (std::holds_alternative<GemmFusionAutotunerImpl::CuBlasConfig>(config)) {
res.mutable_gemm()->set_algorithm(CUBLAS_GEMM_DEFAULT);
} else if (std::holds_alternative<
GemmFusionAutotunerImpl::CustomKernelFusionConfig>(config)) {
res.mutable_custom_kernel_fusion()->set_kernel_index(
std::get<GemmFusionAutotunerImpl::CustomKernelFusionConfig>(config)
.kernel_index);
} else if (std::holds_alternative<GemmFusionAutotunerImpl::CuDnnConfig>(
config)) {
res.mutable_algorithm()->set_algo_id(
std::get<GemmFusionAutotunerImpl::CuDnnConfig>(config).plan_id);
} else if (std::holds_alternative<TritonGemmConfig>(config)) {
*res.mutable_triton() = std::get<TritonGemmConfig>(config).ToProto();
} else {
LOG(FATAL) << "Unsupported config type: " << config.index();
}
return res;
}
absl::Status DumpOriginalFusion(AutotunerCompileUtil& util,
const HloFusionInstruction& fusion,
int fusion_id) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
util.ExtractModule([&](const DebugOptions& debug_opts) {
return FusionExtractor(fusion, debug_opts);
}));
module->set_name(std::string(fusion.name()));
std::string rendered_graph_name =
absl::StrCat("gemm_fusion_", fusion_id, ".", module->name(), ".dot");
std::string rendered_graph = RenderGraph(rendered_graph_name, *module,
RenderedGraphFormat::kDot, true);
DumpToFileInDir(
*fusion.GetModule(),
"",
rendered_graph_name,
rendered_graph);
DumpToFileInDirOrStdout(
*fusion.GetModule(),
"",
absl::StrCat("gemm_fusion_", fusion_id, ".", module->name(), ".txt"),
module->ToString());
return absl::OkStatus();
}
absl::Status DumpAutotunedFusion(const AutotuneConfig& autotune_config,
const se::SemanticVersion& toolkit_version,
AutotunerCompileUtil& util,
const AutotuneResult result,
const HloFusionInstruction* fusion,
int fusion_id) {
TritonGemmConfig triton_gemm_config;
if (result.has_triton()) {
TF_ASSIGN_OR_RETURN(triton_gemm_config,
TritonGemmConfig::FromProto(result.triton()));
}
const se::DeviceDescription& device_desc =
autotune_config.GetExecutor()->GetDeviceDescription();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> module,
util.ExtractModule([&](const DebugOptions& debug_opts) {
if (result.has_algorithm()) {
return CuDnnFusionExtractor(*fusion, debug_opts,
result.algorithm().algo_id());
} else if (result.has_triton()) {
return TritonGemmAutotuneExtractor(
triton_gemm_config, device_desc, fusion, debug_opts,
true);
} else if (result.has_gemm()) {
return CublasGemmAutotuneExtractor(autotune_config, device_desc,
toolkit_version, fusion,
debug_opts);
} else {
LOG(FATAL) << "Unknown result type: " << result.DebugString();
}
}));
module->set_name(std::string(fusion->name()));
DumpToFileInDirOrStdout(
*fusion->GetModule(),
"",
absl::StrCat("gemm_fusion_", fusion_id, ".", module->name(),
".optimized.txt"),
module->ToString());
return absl::OkStatus();
}
std::string Serialize(const BackendConfig& config) {
if (auto triton_config = std::get_if<TritonGemmConfig>(&config)) {
tsl::protobuf::TextFormat::Printer printer;
printer.SetSingleLineMode(true);
std::string result;
printer.PrintToString(triton_config->ToProto(), &result);
return result;
}
return GemmFusionAutotunerImpl::ToString(config);
}
}
absl::Status RewriteGemmFusionToCall(HloInstruction* fusion_instr) {
HloComputation* const computation = fusion_instr->parent();
HloInstruction* const call =
computation->AddInstruction(HloInstruction::CreateCall(
fusion_instr->shape(), fusion_instr->operands(),
fusion_instr->fused_instructions_computation()));
return computation->ReplaceInstruction(fusion_instr, call);
}
absl::Status RewriteGemmFusionToCustomKernelFusion(
HloInstruction* fusion_instr, se::DeviceDescription device_description,
int64_t kernel_index) {
HloComputation* const computation = fusion_instr->parent();
HloInstruction* const call =
computation->AddInstruction(HloInstruction::CreateCall(
fusion_instr->shape(), fusion_instr->operands(),
fusion_instr->fused_instructions_computation()));
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(fusion_instr, call));
HloPassPipeline pipeline("autotuner_custom_kernel_fusion_rewriter");
pipeline.AddPass<CallInliner>();
pipeline.AddPass<CustomKernelFusionRewriter>(&device_description,
kernel_index);
HloModule* hlo_module = call->GetModule();
return pipeline.Run(hlo_module).status();
}
absl::Status HandleTritonGemm(HloInstruction* fusion_instr,
FusionBackendConfig& fusion_backend_config) {
TF_ASSIGN_OR_RETURN(
const TritonGemmConfig config,
TritonGemmConfig::FromProto(fusion_backend_config.triton_gemm_config()));
if (config.split_k > 1) {
TF_RETURN_IF_ERROR(MakeDotSplitKBatch(fusion_instr, config));
}
return absl::OkStatus();
}
absl::Status GemmFusionAutotunerRewriterVisitor::HandleFusion(
HloInstruction* fusion_instr) {
TF_ASSIGN_OR_RETURN(auto gpu_config,
fusion_instr->backend_config<GpuBackendConfig>());
FusionBackendConfig& fusion_backend_config =
*gpu_config.mutable_fusion_backend_config();
if (fusion_backend_config.kind() != kTritonGemmFusionKind &&
fusion_backend_config.kind() != kCuDnnFusionKind &&
fusion_backend_config.kind() != kCustomFusionKind) {
return absl::OkStatus();
}
if (fusion_backend_config.has_triton_gemm_config()) {
TF_RETURN_IF_ERROR(HandleTritonGemm(fusion_instr, fusion_backend_config));
MarkAsChanged();
return absl::OkStatus();
}
if (fusion_backend_config.has_cudnn_fusion_config() ||
fusion_backend_config.has_custom_fusion_config()) {
return absl::OkStatus();
}
VLOG(4) << "Autotuning fusion instruction: " << fusion_instr->ToString();
TF_ASSIGN_OR_RETURN(
AutotuneResult autotune_result,
AutotunerUtil::Autotune(
fusion_instr, config_, [&]() -> absl::StatusOr<AutotuneResult> {
if (config_.IsDeviceless()) {
return absl::InternalError(absl::StrCat(
"Expect autotune result cache hit for deviceless "
"compilation (HLO: ",
fusion_instr->ToString(), ")"));
}
return absl::InternalError("Expect autotune result cache hit.");
}));
VLOG(4) << "Autotuning result: " << autotune_result.ShortDebugString();
if (autotune_result.has_triton()) {
*fusion_backend_config.mutable_triton_gemm_config() =
autotune_result.triton();
TF_RETURN_IF_ERROR(fusion_instr->set_backend_config(gpu_config));
TF_RETURN_IF_ERROR(HandleTritonGemm(fusion_instr, fusion_backend_config));
MarkAsChanged();
return absl::OkStatus();
}
if (autotune_result.has_gemm()) {
TF_RETURN_IF_ERROR(RewriteGemmFusionToCall(fusion_instr));
MarkAsChanged();
return absl::OkStatus();
}
if (autotune_result.has_custom_kernel_fusion()) {
TF_RETURN_IF_ERROR(RewriteGemmFusionToCustomKernelFusion(
fusion_instr, config_.GetExecutor()->GetDeviceDescription(),
autotune_result.custom_kernel_fusion().kernel_index()));
MarkAsChanged();
return absl::OkStatus();
}
CHECK(autotune_result.has_algorithm());
fusion_backend_config.set_kind(std::string(kCuDnnFusionKind));
fusion_backend_config.mutable_cudnn_fusion_config()->set_plan_id(
autotune_result.algorithm().algo_id());
TF_RETURN_IF_ERROR(fusion_instr->set_backend_config(gpu_config));
MarkAsChanged();
return absl::OkStatus();
}
bool GemmFusionAutotunerImpl::CuBlasConfig::operator<(
const CuBlasConfig& other) const {
return false;
}
bool GemmFusionAutotunerImpl::CuDnnConfig::operator<(
const CuDnnConfig& other) const {
return plan_id < other.plan_id;
}
bool GemmFusionAutotunerImpl::CustomKernelFusionConfig::operator<(
const CustomKernelFusionConfig& other) const {
return false;
}
bool GemmFusionAutotunerImpl::IsAutotuningEnabled() const {
return debug_options_.xla_gpu_autotune_level() > 0 &&
!debug_options_.xla_gpu_deterministic_ops();
}
std::string GemmFusionAutotunerImpl::ToString(
const BackendConfig& config) {
if (std::holds_alternative<TritonGemmConfig>(config)) {
return std::get<TritonGemmConfig>(config).ToString();
} else if (std::holds_alternative<CuDnnConfig>(config)) {
return absl::StrFormat("cuDNN plan %d",
std::get<CuDnnConfig>(config).plan_id);
} else if (std::holds_alternative<CuBlasConfig>(config)) {
return "reference (cublas)";
} else {
LOG(FATAL) << "Unsupported config type: " << config.index();
}
}
std::vector<BackendConfig> GenerateCustomKernelFusionConfigs(
const HloFusionInstruction& fusion,
se::DeviceDescription device_description) {
std::vector<BackendConfig> configs;
const CustomKernelFusionPatternRegistry* patterns =
CustomKernelFusionPatternRegistry::Default();
HloComputation* computation = fusion.called_computation();
HloInstruction* dot_instruction =
hlo_query::GetFirstInstructionWithOpcode(*computation, HloOpcode::kDot);
std::vector<CustomKernelFusionPattern::Match> match =
patterns->Match(device_description, dot_instruction);
if (match.size() == 1) {
CustomKernelFusionRegistry* registry =
CustomKernelFusionRegistry::Default();
auto* custom_kernel_fusion = registry->Lookup(match[0].config().name());
if (custom_kernel_fusion != nullptr) {
const HloComputation* fusion_computation = fusion.called_computation();
std::unique_ptr<HloModule> new_module =
ExtractComputationIntoNewModule(*fusion_computation);
CustomKernelFusionRewriter rewriter(&device_description);
absl::StatusOr<bool> changed = rewriter.Run(new_module.get());
if (!changed.ok() || !changed.value()) {
VLOG(2) << "Skip custom kernel config. Failed to rewrite custom kernel "
"fusion: "
<< changed.status();
return configs;
}
HloInstruction* custom_kernel_fusion_instr =
hlo_query::GetFirstInstructionWithOpcode(
*new_module->entry_computation(), HloOpcode::kFusion);
if (custom_kernel_fusion_instr == nullptr) {
VLOG(2) << "Skip custom kernel config. Failed to find custom kernel "
"fusion instruction in the rewritten module.";
return configs;
}
absl::StatusOr<std::vector<CustomKernel>> kernels =
custom_kernel_fusion->LoadKernels(
device_description,
custom_kernel_fusion_instr->fused_instructions_computation());
if (!kernels.ok()) {
VLOG(2) << "Skip custom kernel config. Failed to load custom kernels: "
<< kernels.status();
} else {
for (int i = 0; i < kernels.value().size(); ++i) {
GemmFusionAutotunerImpl::CustomKernelFusionConfig config{
i};
configs.push_back(config);
}
}
}
}
return configs;
}
absl::StatusOr<std::vector<BackendConfig>>
GemmFusionAutotunerImpl::GenerateConfigs(const HloFusionInstruction& fusion) {
const HloDotInstruction* dot =
Cast<HloDotInstruction>(hlo_query::GetFirstInstructionWithOpcode(
*fusion.called_computations().at(0), HloOpcode::kDot));
std::vector<BackendConfig> configs;
if (!debug_options_.xla_gpu_experimental_disable_binary_libraries()) {
if (algorithm_util::IsSupportedByCublasOrCublasLt(
dot->precision_config().algorithm(), GetComputeCapability()) &&
!dot->sparse_operands() && IsAutotuningEnabled()) {
configs.push_back(CuBlasConfig{});
}
bool is_hopper =
!config_.IsDeviceless() && GetComputeCapability().IsAtLeastHopper();
bool is_cudnn_enabled =
debug_options_.xla_gpu_cudnn_gemm_fusion_level() > 0 && is_hopper &&
GetDnnVersionInfoOrDefault(config_.GetExecutor()).major_version() >= 9;
if ((IsFusionKind(fusion, kCuDnnFusionKind) && IsAutotuningEnabled()) ||
(IsFusionKind(fusion, kTritonGemmFusionKind) && is_cudnn_enabled &&
algorithm_util::IsSupportedByCudnn(
dot->precision_config().algorithm()) &&
!dot->sparse_operands() && IsAutotuningEnabled())) {
const int plan_count = GetCuDnnPlanCount(fusion, config_);
for (int plan_id = 0; plan_id < plan_count; ++plan_id) {
configs.push_back(CuDnnConfig{plan_id});
}
}
if (IsFusionKind(fusion, kCuDnnFusionKind)) {
if (!IsAutotuningEnabled()) {
configs.push_back(CuDnnConfig{-1});
}
return configs;
}
}
if ((IsFusionKind(fusion, kCustomFusionKind) ||
IsFusionKind(fusion, kTritonGemmFusionKind)) &&
IsAutotuningEnabled() && !config_.IsDeviceless()) {
std::vector<BackendConfig> custom_kernel_fusion_configs =
GenerateCustomKernelFusionConfigs(
fusion, config_.GetExecutor()->GetDeviceDescription());
configs.insert(configs.end(), custom_kernel_fusion_configs.begin(),
custom_kernel_fusion_configs.end());
}
TF_ASSIGN_OR_RETURN(std::vector<TritonGemmConfig> triton_configs,
GenerateTritonConfigs(*dot));
for (TritonGemmConfig& config : triton_configs) {
configs.push_back(std::move(config));
}
return configs;
}
absl::StatusOr<std::vector<TritonGemmConfig>>
GemmFusionAutotunerImpl::GenerateTritonConfigs(const HloDotInstruction& dot) {
std::vector<const HloInstruction*> converts =
HloBfsFindAll({&dot}, [&](const HloInstruction* node) {
return node->opcode() == HloOpcode::kConvert;
});
int minBitWidth = primitive_util::BitWidth(dot.shape().element_type());
for (auto convert : converts) {
auto in_type = convert->operand(0)->shape().element_type();
auto out_type = convert->shape().element_type();
minBitWidth = std::min({minBitWidth, primitive_util::BitWidth(in_type),
primitive_util::BitWidth(out_type)});
}
std::vector<TritonGemmConfig> result_configs;
TF_ASSIGN_OR_RETURN(TileSizeLimit limits, GetLimits(dot));
if (triton_configs_.empty()) {
triton_configs_ = !IsAutotuningEnabled()
? std::vector(1, kDefaultGemmTiling)
: debug_options_.xla_gpu_exhaustive_tiling_search()
? GetExhaustiveTritonConfigs()
: GetDefaultTritonConfigs();
}
constexpr int kMinGemmElements = 32 * 32;
bool small_dot =
ShapeUtil::ElementsIn(dot.operand(0)->shape()) <= kMinGemmElements &&
ShapeUtil::ElementsIn(dot.operand(1)->shape()) <= kMinGemmElements;
std::vector<TritonGemmConfig> triton_configs =
small_dot ? std::vector(1, kDefaultGemmTiling) : triton_configs_;
const int kCoreCount =
!config_.IsDeviceless()
? config_.GetExecutor()->GetDeviceDescription().core_count()
: 100;
const int64_t kSufficientNumberOfTiles = kMaxWavesForSplitK * kCoreCount;
const int64_t result_size = ShapeUtil::ElementsIn(dot.shape());
absl::flat_hash_set<TritonGemmConfig> added;
bool is_hopper =
!config_.IsDeviceless() && GetComputeCapability().IsAtLeastHopper();
for (TritonGemmConfig& config : triton_configs) {
config.block_m = std::min(config.block_m, limits.block_m);
config.block_n = std::min(config.block_n, limits.block_n);
config.block_k = std::min(config.block_k, limits.block_k);
int max_split_k = 1;
if (debug_options_.xla_gpu_enable_split_k_autotuning()) {
int64_t ratio = kSufficientNumberOfTiles * config.block_m *
config.block_n / result_size;
max_split_k = 1 << std::max<int>(tsl::Log2Floor64(ratio), 0);
}
config.split_k = std::min(config.split_k, max_split_k);
constexpr int kLdmatrixGranularity = 256;
config.block_k =
std::max(config.block_k, kLdmatrixGranularity / minBitWidth);
if (dot.sparse_operands()) {
if (is_hopper) {
config.block_m = std::max(config.block_m, 64);
config.num_warps = std::max(config.num_warps, 4);
}
config.block_k = std::max(
config.block_k,
2 * std::max(kMinTileSize, kLdmatrixGranularity / minBitWidth));
int meta_elements = config.block_m * config.block_k / 16;
config.num_warps =
std::min<int>(config.num_warps, meta_elements / WarpSize());
}
if (added.insert(config).second) {
result_configs.push_back(config);
}
}
return result_configs;
}
absl::StatusOr<absl::flat_hash_map<
const HloFusionInstruction*,
std::vector<GemmFusionAutotunerImpl::ExecutableCandidate>>>
GemmFusionAutotunerImpl::CompileAll(AutotunerCompileUtil& compile_util,
const BackendConfigs& task) {
tsl::profiler::ScopedAnnotation annotation("XlaAutotunerCompilation");
absl::Mutex results_mu;
absl::flat_hash_map<const HloFusionInstruction*,
std::vector<ExecutableCandidate>>
results;
if (task.empty()) {
return results;
}
const int log_every_n = GetLogEveryN();
int64_t config_count = 0;
for (const auto& [unused, configs] : task) {
config_count += configs.size();
}
std::atomic<int> done_count = 0;
std::atomic<int> good_count = 0;
auto log = [&](bool success) {
const int done_so_far = done_count.fetch_add(1) + 1;
const int good_so_far =
success ? good_count.fetch_add(1) + 1 : good_count.load();
if (done_so_far % log_every_n == 0) {
VLOG(2) << "Compiled " << done_so_far << " of " << config_count
<< " configs (successful: " << good_so_far << ")";
}
};
auto compile = [&](const HloFusionInstruction* fusion,
const BackendConfig& config,
bool allow_filtering_kernels_spilling_registers)
-> absl::StatusOr<bool> {
std::unique_ptr<Executable> executable;
if (std::holds_alternative<TritonGemmConfig>(config)) {
TF_ASSIGN_OR_RETURN(
executable, compile_util.Compile([&](const DebugOptions& opts) {
return TritonGemmAutotuneExtractor(
std::get<TritonGemmConfig>(config),
config_.GetExecutor()->GetDeviceDescription(), fusion, opts,
allow_filtering_kernels_spilling_registers);
}));
} else if (std::holds_alternative<CuDnnConfig>(config)) {
executable =
compile_util
.Compile([&](const DebugOptions& opts) {
return CuDnnFusionExtractor(
*fusion, opts, std::get<CuDnnConfig>(config).plan_id);
})
.value_or(nullptr);
} else if (std::holds_alternative<CuBlasConfig>(config)) {
TF_ASSIGN_OR_RETURN(
executable, compile_util.Compile([&](const DebugOptions& opts) {
return CublasGemmAutotuneExtractor(
config_, config_.GetExecutor()->GetDeviceDescription(),
toolkit_version_, fusion, opts);
}));
} else if (std::holds_alternative<CustomKernelFusionConfig>(config)) {
TF_ASSIGN_OR_RETURN(executable,
compile_util.Compile([&](const DebugOptions& opts) {
return CustomFusionKernelAutotuneExtractor(
std::get<CustomKernelFusionConfig>(config),
config_, toolkit_version_, fusion, opts);
}));
} else {
LOG(FATAL) << "Unsupported config type: " << config.index();
}
if (executable != nullptr) {
absl::MutexLock lock(&results_mu);
results[fusion].push_back({config, std::move(executable)});
return true;
}
return false;
};
if (thread_pool_ && thread_pool_->NumThreads() > 1 &&
debug_options_.xla_gpu_force_compilation_parallelism() != 1) {
if (task.size() == 1) {
absl::string_view fusion_name = task.begin()->first->name();
VLOG(1) << "Compiling " << config_count << " configs for " << fusion_name
<< " on " << thread_pool_->NumThreads() << " threads.";
} else {
VLOG(1) << "Compiling " << config_count << " configs for " << task.size()
<< " fusions on " << thread_pool_->NumThreads() << " threads.";
}
tsl::BlockingCounter counter(config_count);
for (const auto& key_value : task) {
const HloFusionInstruction* fusion = key_value.first;
const std::vector<BackendConfig>& gemm_config_set = key_value.second;
VLOG(10) << "Compiling fusion: " << fusion->name();
VLOG(10) << "Dumping fusion computation: "
<< fusion->called_computation()->ToString();
for (const BackendConfig& config : gemm_config_set) {
thread_pool_->Schedule([&, fusion] {
VLOG(10) << "Trying configuration forceable through: "
"--xla_gpu_override_gemm_autotuner='"
<< Serialize(config) << "'";
VLOG(10) << "WARNING: you are running in multithreaded-mode, the "
"last configuration printed out might not be the one "
"causing issues! Use "
"--xla_gpu_force_compilation_parallelism=1 to fix.";
absl::StatusOr<bool> has_executable =
compile(fusion, config, gemm_config_set.size() > 1);
TF_CHECK_OK(has_executable.status())
<< "Failure occured when compiling fusion " << fusion->name()
<< " with config '" << ToString(config)
<< "'\nFused HLO computation:\n"
<< fusion->fused_instructions_computation()->ToString();
log(has_executable.value());
counter.DecrementCount();
});
}
}
counter.Wait();
} else {
if (task.size() == 1) {
absl::string_view fusion_name = task.begin()->first->name();
LOG(WARNING) << "Compiling " << config_count << " configs for "
<< fusion_name << " on a single thread.";
} else {
LOG(WARNING) << "Compiling " << config_count << " configs for "
<< task.size() << " fusions on a single thread.";
}
for (const auto& [fusion, gemm_config_set] : task) {
VLOG(10) << "Compiling fusion: " << fusion->name();
VLOG(10) << "Dumping fusion computation: "
<< fusion->called_computation()->ToString();
for (const BackendConfig& config : gemm_config_set) {
VLOG(10) << "Trying configuration forceable through: "
"--xla_gpu_override_gemm_autotuner='"
<< Serialize(config) << "'";
TF_ASSIGN_OR_RETURN(
bool has_executable,
compile(fusion, config, gemm_config_set.size() > 1));
log(has_executable);
}
}
}
VLOG(1) << "Done compiling (successful: " << good_count.load() << ").";
return results;
}
absl::StatusOr<std::vector<AutotuneResult>> GemmFusionAutotunerImpl::Profile(
AutotunerCompileUtil& compile_util, const HloFusionInstruction& fusion,
absl::Span<const ExecutableCandidate> candidates) {
const HloComputation* fusion_computation = fusion.called_computations().at(0);
se::StreamExecutor* stream_exec = config_.GetExecutor();
if (!stream_exec->SynchronizeAllActivity()) {
return Internal("Failed to synchronize GPU for autotuning.");
}
tsl::profiler::ScopedAnnotation annotation([&] {
return absl::StrFormat("XlaAutotunerMeasurement:#hlo_op=%s#",
fusion.name());
});
se::DeviceMemoryAllocator* allocator = config_.GetAllocator();
std::unique_ptr<se::DeviceMemoryAllocator> owned_allocator;
if (allocator == nullptr) {
owned_allocator =
std::make_unique<se::StreamExecutorMemoryAllocator>(stream_exec);
allocator = owned_allocator.get();
}
TF_ASSIGN_OR_RETURN(se::Stream* const stream, config_.GetStream());
const HloInstruction& root = *fusion_computation->root_instruction();
BufferComparator comparator(root.shape(),
debug_options_.xla_gpu_autotune_gemm_rtol());
TF_ASSIGN_OR_RETURN(auto rz_buffers,
RedzoneBuffers::FromInstruction(
*fusion_computation->FusionInstruction(), config_,
debug_options_, RedzoneBuffers::kAllInputs));
const int log_every_n = GetLogEveryN();
std::vector<AutotuneResult> results;
std::optional<ScopedShapedBuffer> reference_buffer;
for (const ExecutableCandidate& candidate : candidates) {
VLOG(5) << "Trying : " << ToString(candidate.config);
AutotuneResult res = FromConfig(candidate.config);
std::optional<ProfilingOutput> profiling_output;
if (IsAutotuningEnabled()) {
TF_ASSIGN_OR_RETURN(
profiling_output,
compile_util.ProfileExecutable(candidate.executable.get(), stream,
rz_buffers.input_buffers(),
rz_buffers.input_shapes()));
if (std::holds_alternative<CuBlasConfig>(candidate.config) &&
config_.should_check_correctness()) {
reference_buffer = std::move(profiling_output->output);
}
int ran_so_far = results.size() + 1;
if (ran_so_far % log_every_n == 0) {
VLOG(2) << "Ran " << ran_so_far << " configs of " << candidates.size()
<< ".";
}
if (!profiling_output) {
VLOG(5) << "Skipping this tiling.";
continue;
}
VLOG(5) << "Running the kernel took: " << profiling_output->duration;
if (profiling_output->duration >= absl::Seconds(1)) {
LOG(WARNING) << "Slow kernel for "
<< fusion.called_computations()[0]->ToString()
<< " took: " << profiling_output->duration << ". "
<< ToString(candidate.config);
}
*res.mutable_run_time() =
tsl::proto_utils::ToDurationProto(profiling_output->duration);
}
if (reference_buffer.has_value() &&
!std::holds_alternative<CuBlasConfig>(candidate.config)) {
TF_ASSIGN_OR_RETURN(
se::RedzoneAllocator::RedzoneCheckStatus rz_check_status,
rz_buffers.RedzoneAllocator().CheckRedzones());
if (!rz_check_status.ok()) {
LOG(ERROR) << "Red zone modified";
res.mutable_failure()->set_kind(AutotuneResult::REDZONE_MODIFIED);
res.mutable_failure()->set_msg(rz_check_status.RedzoneFailureMsg());
CHECK(!config_.should_crash_on_check_failure());
continue;
}
TF_ASSIGN_OR_RETURN(
bool outputs_match,
comparator.CompareEqual(
stream, profiling_output->output.root_buffer(),
reference_buffer->root_buffer()));
if (!outputs_match) {
const char kMessage[] =
"Results do not match the reference. This is likely a "
"bug/unexpected loss of precision.";
LOG(ERROR) << kMessage;
CHECK(!config_.should_crash_on_check_failure());
res.mutable_failure()->set_kind(AutotuneResult::DISQUALIFIED);
res.mutable_failure()->set_msg(kMessage);
}
}
results.push_back(std::move(res));
}
VLOG(2) << "Done running.";
return results;
}
std::vector<TritonGemmConfig>
GemmFusionAutotunerImpl::GetExhaustiveTritonConfigs() const {
std::vector<TritonGemmConfig> configs;
se::CudaComputeCapability cc = GetComputeCapability();
bool should_tune_ctas =
debug_options_.xla_gpu_exhaustive_tiling_search() && cc.IsAtLeastHopper();
for (int num_stages : kNumStages) {
for (int tile_m : kBlockSizes) {
for (int tile_n : kBlockSizes) {
for (int tile_k : kBlockSizes) {
const int tile_lhs = tile_m * tile_k;
const int tile_rhs = tile_k * tile_n;
for (int num_warps : kNumWarps) {
if (num_warps * WarpSize() > std::min(tile_lhs, tile_rhs)) {
break;
}
for (int split_k : kSplitK) {
if (!debug_options_.xla_gpu_enable_split_k_autotuning() &&
split_k > 1) {
break;
}
if (should_tune_ctas) {
for (int num_ctas : kNumCtas) {
if (num_ctas <= num_warps) {
configs.push_back(TritonGemmConfig(tile_m, tile_n, tile_k,
split_k, num_stages,
num_warps, num_ctas));
}
}
} else {
configs.push_back(TritonGemmConfig(tile_m, tile_n, tile_k,
split_k, num_stages,
num_warps, 1));
}
}
}
}
}
}
}
return configs;
}
std::vector<TritonGemmConfig> GemmFusionAutotunerImpl::GetDefaultTritonConfigs()
const {
using Config = TritonGemmConfig;
std::vector<Config> configs = {
Config(32, 32, 256, 1, 1, 4), Config(64, 32, 32, 16, 1, 4),
Config(32, 64, 64, 4, 1, 4), Config(128, 128, 64, 4, 1, 4),
Config(16, 16, 256, 1, 1, 4), Config(16, 128, 32, 16, 1, 4),
Config(16, 64, 128, 1, 1, 4), Config(16, 128, 32, 8, 1, 4),
Config(16, 16, 512, 1, 1, 4), Config(32, 16, 512, 1, 1, 4),
Config(64, 32, 64, 1, 2, 8), Config(128, 256, 32, 1, 3, 8),
Config(256, 128, 32, 1, 3, 8), Config(256, 64, 32, 1, 4, 4),
Config(64, 256, 32, 1, 4, 4), Config(128, 64, 32, 1, 4, 4),
Config(64, 128, 32, 1, 4, 4), Config(256, 128, 128, 1, 3, 8),
Config(256, 64, 128, 1, 4, 4), Config(64, 256, 128, 1, 4, 4),
Config(128, 128, 128, 1, 4, 4), Config(128, 64, 64, 1, 4, 4),
Config(64, 128, 64, 1, 4, 4), Config(128, 32, 64, 1, 4, 4),
Config(64, 32, 64, 1, 4, 4), Config(32, 128, 32, 1, 4, 4),
Config(128, 128, 32, 1, 4, 4), Config(16, 16, 256, 1, 3, 4),
Config(128, 128, 64, 2, 1, 8), Config(64, 64, 64, 1, 2, 4),
Config(16, 64, 256, 8, 1, 4), Config(256, 256, 128, 1, 3, 8)};
if (GetComputeCapability().IsAtLeastHopper()) {
absl::c_copy(
std::vector<Config>{
Config(16, 32, 32, 8, 1, 2),
Config(16, 64, 128, 8, 1, 4),
Config(16, 64, 128, 16, 3, 4),
},
std::back_inserter(configs));
}
return configs;
}
absl::Status DumpAutotuningLogs(const DebugOptions& debug_opts,
const AutotuningLogs& autotuning_logs) {
if (absl::string_view file_path = debug_opts.xla_gpu_dump_autotune_logs_to();
!file_path.empty()) {
std::string resolved_path;
if (!tsl::io::ResolveTestPrefixes(file_path, resolved_path)) {
return FailedPrecondition("File path can not be resolved: %s", file_path);
}
std::string textproto;
tsl::protobuf::TextFormat::PrintToString(autotuning_logs, &textproto);
TF_RETURN_IF_ERROR(
tsl::WriteStringToFile(tsl::Env::Default(), resolved_path, textproto));
LOG(INFO) << "Autotune logs serialized to file: " << resolved_path;
}
return absl::OkStatus();
}
absl::Status GemmFusionAutotunerImpl::Autotune(
AutotunerCompileUtil& compile_util, const BackendConfigs& gemm_config_sets,
AutoTuneCacheKeyCount fusion_count_map) {
TF_ASSIGN_OR_RETURN(auto executable_sets,
CompileAll(compile_util, gemm_config_sets));
for (auto& [unused, candidates] : executable_sets) {
absl::c_sort(candidates, [](const auto& a, const auto& b) {
return a.config < b.config;
});
}
AutotuningLogs autotuning_logs;
int fusion_id = 0;
for (const auto& [fusion, candidates] : executable_sets) {
TF_ASSIGN_OR_RETURN(std::vector<AutotuneResult> results,
Profile(compile_util, *fusion, candidates));
if (!debug_options_.xla_gpu_cublas_fallback() &&
results.front().has_gemm()) {
results.erase(results.begin());
}
const HloInstruction* root =
fusion->called_computations().at(0)->root_instruction();
TF_ASSIGN_OR_RETURN(
AutotuneResult best,
PickBestResult(results, root->ToString(), root->GetModule()->config()));
VLOG(2) << "Best time: "
<< tsl::proto_utils::FromDurationProto(best.run_time());
if (debug_options_.xla_gpu_dump_autotuned_gemm_fusions()) {
TF_RETURN_IF_ERROR(DumpOriginalFusion(compile_util, *fusion, fusion_id));
TF_RETURN_IF_ERROR(DumpAutotunedFusion(
config_, toolkit_version_, compile_util, best, fusion, fusion_id++));
}
const AutotuneCacheKey key = AutotunerUtil::GetKey(fusion, config_);
TF_ASSIGN_OR_RETURN(
bool added, AutotunerUtil::AddResult(key, std::move(best), config_));
if (!added) {
LOG(WARNING) << "AutotunerUtil::AddResult already existed: "
<< key.ToString();
}
if (!debug_options_.xla_gpu_dump_autotune_logs_to().empty()) {
auto autotuning_log = autotuning_logs.add_logs();
autotuning_log->set_fusion_name(std::string(fusion->name()));
for (const auto& autotune_result : results) {
auto log_result = autotuning_log->add_results();
log_result->CopyFrom(autotune_result);
}
if (auto fusion_key_count = fusion_count_map.find(key);
fusion_key_count != fusion_count_map.end()) {
auto fusion_key = fusion_key_count->first;
auto fusion_count = fusion_key_count->second;
autotuning_log->set_fusion_count(fusion_count);
}
}
}
TF_RETURN_IF_ERROR(DumpAutotuningLogs(debug_options_, autotuning_logs));
return absl::OkStatus();
}
static BackendConfigs TrimConfigs(const BackendConfigs& gemm_config_sets,
const int shard_index,
const int shard_count) {
const uint64_t bucket_size =
(gemm_config_sets.size() + shard_count - 1) / shard_count;
const uint64_t start = bucket_size * shard_index;
const uint64_t end = std::min(start + bucket_size, gemm_config_sets.size());
if (start >= end) {
return {};
}
return BackendConfigs(gemm_config_sets.cbegin() + start,
gemm_config_sets.cbegin() + end);
}
absl::Status ExchangeResults(KeyValueStoreInterface& key_value_store,
const int module_id, const int shard_index,
const int shard_count) {
AutotuneResults results;
TF_RETURN_IF_ERROR(AutotunerUtil::SerializeAutotuneResults(&results));
TF_ASSIGN_OR_RETURN(std::string results_str,
AutotuneResultsToString(results, true));
constexpr absl::string_view kKeyPrefix = "gemm_fusion_autotuning_results";
TF_RETURN_IF_ERROR(key_value_store.Set(
absl::StrFormat("%s_%d_%d", kKeyPrefix, module_id, shard_index),
results_str));
VLOG(2) << "Rank " << shard_index << ": published results";
for (int i = 0; i < shard_count; ++i) {
if (i == shard_index) {
continue;
}
VLOG(2) << "Rank " << shard_index << ": waiting for results from rank " << i
<< " / " << shard_count;
TF_ASSIGN_OR_RETURN(
std::string autotune_results_str,
key_value_store.Get(
absl::StrFormat("%s_%d_%d", kKeyPrefix, module_id, i),
absl::Hours(24)));
TF_RETURN_IF_ERROR(
AutotunerUtil::LoadAutotuneResults(autotune_results_str, true));
}
return absl::OkStatus();
}
absl::StatusOr<bool> GemmFusionAutotuner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_SCOPED_LOGGING_TIMER("GEMM fusion autotuner");
const DebugOptions& debug_options = module->config().debug_options();
GemmFusionAutotunerImpl autotuner(config_, toolkit_version_, debug_options,
thread_pool_);
GemmConfigSetCollector gemm_config_set_collector(&autotuner);
TF_ASSIGN_OR_RETURN(BackendConfigs gemm_config_sets,
gemm_config_set_collector.CollectGemmConfigSets(
module, execution_threads));
const int total_fusion_count = gemm_config_sets.size();
AutoTuneCacheKeyCount fusion_count_map =
gemm_config_set_collector.GetFusionsCount();
if (!autotuner.IsAutotuningEnabled()) {
for (const auto& [fusion, tilings] : gemm_config_sets) {
const AutotuneCacheKey key = AutotunerUtil::GetKey(fusion, config_);
AutotuneResult res = FromConfig(tilings[0]);
*res.mutable_run_time() =
tsl::proto_utils::ToDurationProto(absl::ZeroDuration());
TF_RETURN_IF_ERROR(AutotunerUtil::AddResult(key, res, config_).status());
}
} else if (!debug_options.xla_gpu_override_gemm_autotuner().empty()) {
AutotuneResult::TritonGemmKey gemm_key;
CHECK(tsl::protobuf::TextFormat::ParseFromString(
debug_options.xla_gpu_override_gemm_autotuner(), &gemm_key));
VLOG(1) << "Overriding GEMM autotuner with the following config: "
<< gemm_key.DebugString();
for (const auto& [fusion, unused] : gemm_config_sets) {
const AutotuneCacheKey key = AutotunerUtil::GetKey(fusion, config_);
AutotuneResult res;
*res.mutable_triton() = gemm_key;
*res.mutable_run_time() =
tsl::proto_utils::ToDurationProto(absl::ZeroDuration());
TF_RETURN_IF_ERROR(AutotunerUtil::AddResult(key, res, config_).status());
}
} else if (!config_.IsDeviceless()) {
TF_ASSIGN_OR_RETURN(std::optional<AutotunerCompileUtil> opt_compile_util,
AutotunerCompileUtil::Create(config_, debug_options));
TF_RET_CHECK(opt_compile_util.has_value());
std::string correctness_check_str = config_.should_check_correctness()
? "(with correctness check)"
: "(without correctness check)";
const bool shard_autotuning = debug_options.xla_gpu_shard_autotuning() &&
key_value_store_.process_count > 1 &&
total_fusion_count > 0;
if (shard_autotuning) {
if (key_value_store_.key_value_store == nullptr) {
return absl::FailedPreconditionError(
"Sharded autotuning requested but key-value store is missing.");
}
gemm_config_sets =
TrimConfigs(gemm_config_sets, key_value_store_.process_index,
key_value_store_.process_count);
}
VLOG(1) << absl::StrFormat(
"Shard %d / %d: autotuning %d / %d fusions for %s %s.",
key_value_store_.process_index + 1, key_value_store_.process_count,
gemm_config_sets.size(), total_fusion_count, module->name(),
correctness_check_str);
TF_RETURN_IF_ERROR(autotuner.Autotune(*opt_compile_util, gemm_config_sets,
std::move(fusion_count_map)));
VLOG(1) << "Done autotuning.";
if (shard_autotuning) {
TF_RETURN_IF_ERROR(ExchangeResults(
*key_value_store_.key_value_store, module->unique_id(),
key_value_store_.process_index, key_value_store_.process_count));
}
}
return GemmFusionAutotunerRewriterVisitor(config_).RunOnModule(
module, execution_threads);
}
}
} | #include "xla/service/gpu/autotuning/gemm_fusion_autotuner.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/autotuning.pb.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/service/call_inliner.h"
#include "xla/service/dump.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuning/autotuner_compile_util.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/transforms/gemm_fusion.h"
#include "xla/service/gpu/transforms/gemm_rewriter.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_description.pb.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tools/hlo_decomposer.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using HloExtractionTest = HloTestBase;
TEST_F(HloExtractionTest, InstructionExtractionIsCorrect) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
HloModule module
triton_gemm_dot {
p0 = s8[10,10] parameter(0)
p1 = f32[10,10] parameter(1)
c0 = f32[10,10] convert(p0)
ROOT dot.0 = f32[10,10] dot(c0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY entry {
p0 = s8[10,10] parameter(0)
p1 = f32[10,10] parameter(1)
s = f32[10,10] sqrt(p1)
d = f32[10,10] fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot
ROOT r = f32[10,10] add(d, s)
})")
.value();
std::unique_ptr<HloModule> extracted_module = ExtractInstructionIntoNewModule(
*module->entry_computation()->root_instruction()->operand(0));
module = nullptr;
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
EXPECT_EQ(extracted_module->entry_computation()->instruction_count(), 3);
TF_EXPECT_OK(VerifyHloModule(extracted_module.get(),
true,
false));
}
TEST_F(HloExtractionTest, ComputationExtractionIsCorrect) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
HloModule module
triton_gemm_dot {
p0 = s8[10,10] parameter(0)
p1 = f32[10,10] parameter(1)
c0 = f32[10,10] convert(p0)
ROOT dot.0 = f32[10,10] dot(c0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY entry {
p0 = s8[10,10] parameter(0)
p1 = f32[10,10] parameter(1)
s = f32[10,10] sqrt(p1)
d = f32[10,10] fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot
ROOT r = f32[10,10] add(d, s)
})")
.value();
std::unique_ptr<HloModule> extracted_module =
ExtractComputationIntoNewModule(*module->entry_computation()
->root_instruction()
->operand(0)
->fused_instructions_computation());
module = nullptr;
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
GmockMatch(m::Dot(m::Convert(m::Parameter()), m::Parameter())));
EXPECT_EQ(extracted_module->entry_computation()->instruction_count(), 4);
TF_EXPECT_OK(VerifyHloModule(extracted_module.get(),
true,
false));
}
class StatelessAutotunerTest : public HloTestBase {
public:
StatelessAutotunerTest()
: HloTestBase(true,
false) {}
se::SemanticVersion GetToolkitVersion() const {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.runtime_version();
}
void SetUp() override {
AutotunerUtil::ClearAutotuneResults();
HloTestBase::SetUp();
}
void TearDown() override {
AutotunerUtil::ClearAutotuneResults();
HloTestBase::TearDown();
}
absl::StatusOr<std::vector<GemmFusionAutotunerImpl::BackendConfig>>
GetPossibleMatmulAutotuneConfigs(
const HloModule& module,
const se::CudaComputeCapability& compute_capability,
const se::SemanticVersion& toolkit_version,
const DebugOptions& debug_options) {
const HloFusionInstruction& fusion = *Cast<HloFusionInstruction>(
module.entry_computation()->root_instruction());
se::GpuDeviceInfoProto deviceless_proto;
auto ccc = deviceless_proto.mutable_cuda_compute_capability();
ccc->set_major(compute_capability.major);
ccc->set_minor(compute_capability.minor);
DeviceConfig test_config{backend().default_stream_executor(),
backend().memory_allocator()};
AutotuneConfig autotune_config{test_config, debug_options};
GemmFusionAutotunerImpl autotuner(autotune_config, toolkit_version,
debug_options, nullptr);
return autotuner.GenerateConfigs(fusion);
}
se::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
absl::StatusOr<std::vector<GemmFusionAutotunerImpl::BackendConfig>>
GetPossibleMatmulAutotuneConfigs(const HloModule& module) {
DeviceConfig device_config{backend().default_stream_executor(),
backend().memory_allocator()};
AutotuneConfig autotune_config{device_config, GetDebugOptionsForTest()};
GemmFusionAutotunerImpl autotuner(autotune_config, GetToolkitVersion(),
GetDebugOptionsForTest(), nullptr);
const HloFusionInstruction& fusion = *Cast<HloFusionInstruction>(
module.entry_computation()->root_instruction());
return autotuner.GenerateConfigs(fusion);
}
bool hasCublasConfig(
const std::vector<GemmFusionAutotunerImpl::BackendConfig>& configs) {
return std::any_of(
configs.begin(), configs.end(),
[](const GemmFusionAutotunerImpl::BackendConfig& config) {
return std::holds_alternative<GemmFusionAutotunerImpl::CuBlasConfig>(
config);
});
}
};
constexpr absl::string_view kHloDotFusionWithAlgorithm = R"(
HloModule module
computation {
p0 = f32[1024,1024] parameter(0)
p1 = f32[1024,1024] parameter(1)
ROOT r = f32[1024,1024] dot(p0, p1),
algorithm=$0,
lhs_contracting_dims={1},
rhs_contracting_dims={0}
}
ENTRY main {
p0 = f32[1024,1024] parameter(0)
p1 = f32[1024,1024] parameter(1)
ROOT computation = f32[1024,1024] fusion(f32[1024,1024] p0,f32[1024,1024] p1),
kind=kCustom,
calls=computation
}
)";
TEST_F(StatelessAutotunerTest, NoCublasFallbackForTf32Tf32F32X3Algorithm) {
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(absl::Substitute(
kHloDotFusionWithAlgorithm, "dot_tf32_tf32_f32_x3")));
TF_ASSERT_OK_AND_ASSIGN(auto configs,
GetPossibleMatmulAutotuneConfigs(*module));
EXPECT_FALSE(hasCublasConfig(configs))
<< "There is no cublas implementation for dot_tf32_tf32_f32_x3. That is "
"why we don't want to fallback to cublas.";
}
TEST_F(StatelessAutotunerTest,
NoCublasFallbackForBf16Bf16F32AlgorithmOnHopper) {
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(absl::Substitute(
kHloDotFusionWithAlgorithm, "dot_bf16_bf16_f32")));
TF_ASSERT_OK_AND_ASSIGN(auto configs,
GetPossibleMatmulAutotuneConfigs(*module));
switch (GetCudaComputeCapability().major) {
case se::CudaComputeCapability::AMPERE:
EXPECT_TRUE(hasCublasConfig(configs))
<< "There is a cublas implementation for dot_bf16_bf16_f32 on Ampere";
break;
case se::CudaComputeCapability::HOPPER:
EXPECT_FALSE(hasCublasConfig(configs))
<< "There is no cublas implementation for dot_bf16_bf16_f32 on "
"Hopper. That is why we don't want to fallback to cublas.";
break;
default:
EXPECT_FALSE(hasCublasConfig(configs));
}
}
class GemmFusionAutotunerTest : public StatelessAutotunerTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
StatelessAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_triton_gemm(true);
debug_options.set_xla_gpu_cublas_fallback(false);
debug_options.set_xla_gpu_cudnn_gemm_fusion_level(0);
return debug_options;
}
void CheckTritonAutotuning(absl::string_view hlo,
absl::string_view expected) {
HloPassPipeline pipeline("gemm_rewrite");
pipeline.AddPass<GemmFusion>(backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability());
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "",
tsl::port::MaxParallelism());
DebugOptions opts;
MultiProcessKeyValueStore key_value_store;
pipeline.AddPass<GemmFusionAutotuner>(
AutotuneConfig{DeviceConfig{backend().default_stream_executor(),
backend().memory_allocator()},
opts},
GetToolkitVersion(), &thread_pool, key_value_store);
RunAndFilecheckHloRewrite(
hlo, std::move(pipeline), expected, [](const HloModule* m) {
VLOG(5) << m->ToString();
const HloInstruction* dot_fusion =
m->entry_computation()->root_instruction();
if (dot_fusion->opcode() == HloOpcode::kReduce) {
dot_fusion = dot_fusion->operand(0);
}
CHECK_EQ(dot_fusion->opcode(), HloOpcode::kFusion);
if (!dot_fusion->backend_config<GpuBackendConfig>()
->fusion_backend_config()
.has_cudnn_fusion_config()) {
CHECK_GT(dot_fusion->backend_config<GpuBackendConfig>()
.value()
.fusion_backend_config()
.triton_gemm_config()
.block_m(),
0);
}
});
}
};
class GemmFusionAutotunerTestWithMorePreciseReduction
: public GemmFusionAutotunerTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
GemmFusionAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_triton_gemm_disable_reduced_precision_reduction(
true);
return debug_options;
}
};
absl::StatusOr<std::vector<TritonGemmConfig>>
GetPossibleMatmulAutotuneTritonConfigs(
const HloDotInstruction& dot,
const se::CudaComputeCapability& compute_capability,
const se::SemanticVersion& toolkit_version,
const DebugOptions& debug_options) {
se::GpuDeviceInfoProto deviceless_proto;
auto ccc = deviceless_proto.mutable_cuda_compute_capability();
ccc->set_major(compute_capability.major);
ccc->set_minor(compute_capability.minor);
DevicelessConfig test_config{se::DeviceDescription{deviceless_proto}};
AutotuneConfig autotune_config{test_config, debug_options};
GemmFusionAutotunerImpl autotuner(autotune_config, toolkit_version,
debug_options, nullptr);
return autotuner.GenerateTritonConfigs(dot);
}
TEST_F(GemmFusionAutotunerTest, AmpereUsesMoreThanTwoStages) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f32[1024,1024] parameter(0)
p1 = f32[1024,1024] parameter(1)
ROOT r = f32[1024,1024] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<TritonGemmConfig> configs,
GetPossibleMatmulAutotuneTritonConfigs(
*Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
compute_capability, GetToolkitVersion(), GetDebugOptionsForTest()));
EXPECT_TRUE(std::any_of(
configs.begin(), configs.end(),
[](const TritonGemmConfig& config) { return config.num_stages > 2; }));
}
TEST_F(GemmFusionAutotunerTest, SmallOutputCanUseLargeSplitK) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f32[1024,1024] parameter(0)
p1 = f32[1024,1024] parameter(1)
ROOT r = f32[1024,1024] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<TritonGemmConfig> configs,
GetPossibleMatmulAutotuneTritonConfigs(
*Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
compute_capability, GetToolkitVersion(), GetDebugOptionsForTest()));
EXPECT_TRUE(std::any_of(
configs.begin(), configs.end(),
[](const TritonGemmConfig& config) { return config.split_k >= 4; }));
}
TEST_F(GemmFusionAutotunerTest, LargeOutputDoesNotUseLargeSplitK) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f32[20480,20480] parameter(0)
p1 = f32[20480,20480] parameter(1)
ROOT r = f32[20480,20480] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<TritonGemmConfig> configs,
GetPossibleMatmulAutotuneTritonConfigs(
*Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
compute_capability, GetToolkitVersion(), GetDebugOptionsForTest()));
EXPECT_FALSE(std::any_of(
configs.begin(), configs.end(),
[](const TritonGemmConfig& config) { return config.split_k > 1; }));
}
TEST_F(GemmFusionAutotunerTest, Int8FusedGemm) {
const std::string hlo = R"(
HloModule module
ENTRY e {
x = s8[128,64] parameter(0)
c = f16[128,64] convert(x)
y = f16[64,6144] parameter(1)
ROOT out = f16[128,6144] dot(c, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
CheckTritonAutotuning(hlo, R"(
)");
EXPECT_TRUE(RunAndCompare(hlo, ErrorSpec{5e-3, 5e-3}));
}
TEST_F(GemmFusionAutotunerTest, Int8FusedGemm256) {
const std::string hlo = R"(
HloModule module
ENTRY e {
x = s8[128,256] parameter(0)
c = f16[128,256] convert(x)
y = f16[256,6144] parameter(1)
ROOT out = f16[128,6144] dot(c, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
CheckTritonAutotuning(hlo, R"(
)");
EXPECT_TRUE(RunAndCompare(hlo, ErrorSpec{1e-2, 1e-2}));
}
TEST_F(GemmFusionAutotunerTest, SelectsSplitK) {
const std::string kHloText = R"(
HloModule t
ENTRY e {
p0 = s8[7,8192] parameter(0)
p0c = f16[7,8192] convert(p0)
p1 = f16[8192,18] parameter(1)
ROOT dot.0 = f16[7,18] dot(p0c, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
MatchOptimizedHlo(kHloText, R"(
; CHECK: reduce
; CHECK: ENTRY
; CHECK-NEXT: parameter
; CHECK-NEXT: parameter
; CHECK-NEXT: kCustom
; CHECK-NEXT: kLoop
)");
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1, 0.5}));
}
TEST_F(GemmFusionAutotunerTestWithMorePreciseReduction, SelectsSplitK) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
p0 = s8[7,8192] parameter(0)
p0c = f16[7,8192] convert(p0)
p1 = f16[8192,18] parameter(1)
ROOT dot.0 = f16[7,18] dot(p0c, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
MatchOptimizedHlo(kHloText, R"(
; CHECK: reduce
; CHECK: ENTRY
; CHECK-NEXT: parameter
; CHECK-NEXT: parameter
; CHECK-NEXT: kCustom
; CHECK-NEXT: kLoop
)");
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-2, 1e-3}));
}
TEST_F(GemmFusionAutotunerTest, ApplySplitKWithoutAlteringTiling) {
const std::string kHloText = R"(
triton_dot {
p0 = f16[55,120] parameter(0)
p1 = f16[120,20] parameter(1)
ROOT dot = f16[55,20] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f16[55,120]{1,0} parameter(0)
p1 = f16[120,20]{1,0} parameter(1)
ROOT _ = f16[55,20] fusion(p0, p1), kind=kCustom, calls=triton_dot,
backend_config={"fusion_backend_config":{kind: "__triton_gemm", triton_gemm_config: {"block_m":16,"block_n":64,"block_k":32,"split_k":3,"num_stages":1,"num_warps":2,"num_ctas":1}}}
})";
MatchOptimizedHlo(kHloText, R"(
; CHECK: f16[3,55,20]
; CHECK: {"block_m":16,"block_n":64,"block_k":32,"split_k":3,"num_stages":1,"num_warps":2,"num_ctas":1}
; CHECK: f16[55,20]{1,0} {{(reduce|fusion)}}
)");
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
TEST_F(GemmFusionAutotunerTest, DoNotRunAutotuningKernelSpillingRegisters) {
const std::string kHloText = R"(
HloModule m
%triton_gemm_dot {
%p1 = s8[4,12288]{1,0} parameter(1)
%p0 = s8[12288,1536]{1,0} parameter(0)
%convert.p0 = f16[12288,1536]{1,0} convert(s8[12288,1536]{1,0} %p0)
%convert.p1 = f16[4,12288]{1,0} convert(s8[4,12288]{1,0} %p1)
%dot = f16[4,1536]{1,0} dot(f16[4,12288]{1,0} %convert.p1, f16[12288,1536]{1,0} %convert.p0), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT %convert = s8[4,1536]{1,0} convert(f16[4,1536]{1,0} %dot)
}
ENTRY %e {
%get-tuple-element.7020 = s8[12288,1536]{1,0} parameter(0)
%convert = s8[4,12288]{1,0} parameter(1)
ROOT %triton = s8[4,1536]{1,0} fusion(s8[12288,1536]{1,0} %get-tuple-element.7020, s8[4,12288]{1,0} %convert), kind=kCustom, calls=%triton_gemm_dot,
backend_config={"fusion_backend_config":{"kind":"__triton_gemm","triton_gemm_config":{"block_m":"256","block_n":"256","block_k":"16","split_k":"1","num_stages":"1","num_warps":"16","num_ctas":"1"}}}
})";
auto module = ParseAndReturnVerifiedModule(kHloText).value();
EXPECT_THAT(backend().compiler()->RunBackend(
std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
true}),
::testing::AnyOf(
tsl::testing::StatusIs(
tsl::error::CANCELLED,
"Compilation result discarded due to register spilling"),
tsl::testing::StatusIs(
tsl::error::RESOURCE_EXHAUSTED,
::testing::HasSubstr("Register allocation failed")),
tsl::testing::StatusIs(
tsl::error::RESOURCE_EXHAUSTED,
::testing::HasSubstr("Insufficient registers"))));
}
TEST_F(GemmFusionAutotunerTest,
DoNotFilterOutAutotuningKernelSpillingRegisters) {
if (GetCudaComputeCapability().IsAtLeastHopper()) {
GTEST_SKIP() << "Hopper and newer runs out of registers for such HLOs";
}
const std::string kHloText = R"(
HloModule m
%triton_gemm_dot {
%p1 = s8[4,12288]{1,0} parameter(1)
%p0 = s8[12288,1536]{1,0} parameter(0)
%convert.p0 = f16[12288,1536]{1,0} convert(s8[12288,1536]{1,0} %p0)
%convert.p1 = f16[4,12288]{1,0} convert(s8[4,12288]{1,0} %p1)
%dot = f16[4,1536]{1,0} dot(f16[4,12288]{1,0} %convert.p1, f16[12288,1536]{1,0} %convert.p0), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT %convert = s8[4,1536]{1,0} convert(f16[4,1536]{1,0} %dot)
}
ENTRY %e {
%get-tuple-element.7020 = s8[12288,1536]{1,0} parameter(0)
%convert = s8[4,12288]{1,0} parameter(1)
ROOT %triton = s8[4,1536]{1,0} fusion(s8[12288,1536]{1,0} %get-tuple-element.7020, s8[4,12288]{1,0} %convert), kind=kCustom, calls=%triton_gemm_dot,
backend_config={"fusion_backend_config":{"kind":"__triton_gemm","triton_gemm_config":{"block_m":"256","block_n":"256","block_k":"16","split_k":"1","num_stages":"1","num_warps":"16","num_ctas":"1"}}}
})";
auto module = ParseAndReturnVerifiedModule(kHloText).value();
HloModuleConfig config = module->config();
DebugOptions debug_options = config.debug_options();
debug_options.set_xla_gpu_filter_kernels_spilling_registers_on_autotuning(
false);
config.set_debug_options(debug_options);
module->set_config(config);
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
true})
.value();
EXPECT_NE(executable, nullptr);
}
TEST_F(GemmFusionAutotunerTest, RunAutotuningKernelNotSpillingRegisters) {
const std::string kHloText = R"(
HloModule m
%triton_gemm_dot {
%p1 = f16[4,12288]{1,0} parameter(1)
%p0 = s8[12288,1536]{1,0} parameter(0)
%convert.10406 = f16[12288,1536]{1,0} convert(s8[12288,1536]{1,0} %p0)
ROOT %dot = f16[4,1536]{1,0} dot(f16[4,12288]{1,0} %p1, f16[12288,1536]{1,0} %convert.10406), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY %e {
%p0 = s8[12288,1536]{1,0} parameter(0)
%p1 = f16[4,12288]{1,0} parameter(1)
ROOT %triton_dot = f16[4,1536]{1,0} fusion(s8[12288,1536]{1,0} %p0, f16[4,12288]{1,0} %p1), kind=kCustom, calls=%triton_gemm_dot,
backend_config={"fusion_backend_config":{"kind":"__triton_gemm","triton_gemm_config":{"block_m":"16","block_n":"32","block_k":"16","split_k":"1","num_stages":"1","num_warps":"2","num_ctas":"1"}}}
})";
auto module = ParseAndReturnVerifiedModule(kHloText).value();
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
true})
.value();
EXPECT_NE(executable, nullptr);
}
using GemmFusionAutotunerDumpTest = GemmFusionAutotunerTest;
TEST_F(GemmFusionAutotunerDumpTest, Fp8CublasltFallbackSupport) {
const std::string kHloText = R"(
HloModule o
gemm_fusion {
p0 = f8e4m3fn[64,6144]{1,0} parameter(0)
p1 = f8e4m3fn[64,6144]{1,0} parameter(1)
ROOT %dot.0 = f32[64,64]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY main {
p0 = f8e4m3fn[64,6144]{1,0} parameter(0)
p1 = f8e4m3fn[64,6144]{1,0} parameter(1)
ROOT %dot.0 = f32[64,64]{1,0} fusion(p0, p1), kind=kCustom, calls=gemm_fusion, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__triton_gemm"},"force_earliest_schedule":false}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
DebugOptions opts;
AutotuneConfig autotune_config{
DeviceConfig{backend().default_stream_executor(),
backend().memory_allocator()},
opts};
AutotuneCacheKey cache_key(autotune_config.GetModelStr(),
*module->entry_computation()->root_instruction());
TF_ASSERT_OK_AND_ASSIGN(AutotuneResults autotune_results_override,
ParseTextProto<AutotuneResults>(R"pb(
version: 3
results {
device: "..."
hlo: "..."
result {
gemm { algorithm: -1 }
run_time { nanos: 14 }
}
})pb"));
autotune_results_override.mutable_results(0)->set_device(
std::string(cache_key.GetModelStr()));
autotune_results_override.mutable_results(0)->set_hlo(
std::string(cache_key.GetHlo()));
CHECK_OK(AutotunerUtil::LoadAutotuneResults(autotune_results_override));
HloPassPipeline pipeline("gemm_autotune");
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "",
tsl::port::MaxParallelism());
MultiProcessKeyValueStore key_value_store;
pipeline.AddPass<GemmFusionAutotuner>(autotune_config, GetToolkitVersion(),
&thread_pool, key_value_store);
pipeline.AddPass<CallInliner>();
for (GemmRewriterOptions::DType dtype :
{GemmRewriterOptions::DType::kFp8Only,
GemmRewriterOptions::DType::kNonFp8Only}) {
pipeline.AddPass<GemmRewriter>(autotune_config.GetGpuComputeCapability(),
GetToolkitVersion(),
GemmRewriterOptions{dtype});
}
TF_EXPECT_OK(HloTestBase::RunHloPass(&pipeline, module.get()));
const bool is_at_least_hopper =
std::holds_alternative<se::CudaComputeCapability>(
autotune_config.GetGpuComputeCapability()) &&
std::get<se::CudaComputeCapability>(
autotune_config.GetGpuComputeCapability())
.IsAtLeastHopper();
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matches,
RunFileCheck(module->ToString(), is_at_least_hopper
? "
: "
EXPECT_TRUE(filecheck_matches);
}
TEST_F(GemmFusionAutotunerDumpTest, DumpingWorks) {
HloModuleConfig config;
DebugOptions options = GetDebugOptionsForTest();
options.set_xla_gpu_cublas_fallback(true);
options.set_xla_gpu_dump_autotuned_gemm_fusions(true);
std::string output_directory;
if (!tsl::io::GetTestUndeclaredOutputsDir(&output_directory)) {
output_directory = tsl::testing::TmpDir();
}
options.set_xla_dump_to(output_directory);
config.set_debug_options(options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion1 {
p0 = f32[333,333] parameter(0)
s = f32[333,333] sine(p0)
p1 = f32[333,333] parameter(1)
c = f32[333,333] cosine(p1)
ROOT dot = f32[333,333] dot(s, c),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[333,333] parameter(0)
p1 = f32[333,333] parameter(1)
ROOT rr = f32[333,333] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__triton_gemm"}}
})",
config));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(std::move(module)));
std::string dump;
TF_EXPECT_OK(tsl::ReadFileToString(
tsl::Env::Default(),
tsl::io::JoinPath(output_directory,
FilenameFor(*optimized_module, "",
"gemm_fusion_0.rr.txt")),
&dump));
EXPECT_TRUE(*RunFileCheck(dump, R"(
CHECK: HloModule rr
CHECK-NOT: cublas
CHECK: __triton_gemm
CHECK-NOT: block_m
)"));
dump.clear();
TF_EXPECT_OK(tsl::ReadFileToString(
tsl::Env::Default(),
tsl::io::JoinPath(
output_directory,
FilenameFor(*optimized_module, "",
"gemm_fusion_0.rr.optimized.txt")),
&dump));
EXPECT_TRUE(*RunFileCheck(dump, R"(
CHECK: HloModule rr
CHECK-NOT: triton
CHECK: cublas
)"));
}
TEST_F(GemmFusionAutotunerTest, AutotuneCuDnnFusion) {
const std::string kHlo = R"(
fusion1 {
p0 = f32[3,28,32] parameter(0)
p1 = f32[3,28,32] parameter(1)
ROOT d = f32[3,32,32] dot(p0, p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = f32[3,28,32] parameter(0)
p1 = f32[3,28,32] parameter(1)
ROOT _ = f32[3,32,32] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})";
CheckTritonAutotuning(kHlo, R"(
)");
}
class GemmFusionAutotunerLevelTest : public StatelessAutotunerTest,
public ::testing::WithParamInterface<int> {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
StatelessAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_autotune_level(GetParam());
debug_options.set_xla_gpu_cublas_fallback(false);
return debug_options;
}
};
TEST_P(GemmFusionAutotunerLevelTest, AllAutotuningLevelsWorkCorrectly) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = pred[64,10] parameter(0)
p0c = f32[64,10] convert(p0)
p1 = f32[10,128] parameter(1)
ROOT r = f32[64,128] dot(p0c, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
MatchOptimizedHlo(kHloText, R"(
; CHECK: kind=kCustom
; CHECK-SAME: block_m
)");
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
TEST_P(GemmFusionAutotunerLevelTest, Deviceless) {
const std::string hlo = R"(
HloModule module
ENTRY e {
x = s8[16,16] parameter(0)
c = f16[16,16] convert(x)
y = f16[16,16] parameter(1)
ROOT out = f16[16,16] dot(c, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
HloPassPipeline pipeline("gemm_rewrite_deviceless");
pipeline.AddPass<GemmFusion>(backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability());
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "",
tsl::port::MaxParallelism());
DebugOptions opts;
MultiProcessKeyValueStore key_value_store;
pipeline.AddPass<GemmFusionAutotuner>(
AutotuneConfig{
DevicelessConfig{
backend().default_stream_executor()->GetDeviceDescription()},
opts},
GetToolkitVersion(), &thread_pool, key_value_store);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo));
if (GetDebugOptionsForTest().xla_gpu_autotune_level() == 0) {
TF_ASSERT_OK_AND_ASSIGN(bool changed,
HloTestBase::RunHloPass(&pipeline, module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matches,
RunFileCheck(
module->ToString(HloPrintOptions{}.set_print_operand_shape(false)),
R"(
)"));
EXPECT_TRUE(filecheck_matches);
} else {
EXPECT_THAT(HloTestBase::RunHloPass(&pipeline, module.get()),
tsl::testing::StatusIs(
tsl::error::INTERNAL,
::testing::HasSubstr(
"Expect autotune result cache hit for deviceless")));
}
}
INSTANTIATE_TEST_SUITE_P(GemmFusionAutotunerLevelSweep,
GemmFusionAutotunerLevelTest, ::testing::Range(0, 5));
class GemmFusionAutotunerExhaustiveTest : public GemmFusionAutotunerTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
GemmFusionAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_exhaustive_tiling_search(true);
return debug_options;
}
};
TEST_F(GemmFusionAutotunerExhaustiveTest, DISABLED_CompileOnly) {
const std::string hlo = R"(
HloModule module
ENTRY e {
x = s8[16,16] parameter(0)
c = f16[16,16] convert(x)
y = f16[16,16] parameter(1)
ROOT out = f16[16,16] dot(c, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
CheckTritonAutotuning(hlo, R"(
)");
}
TEST_F(GemmFusionAutotunerExhaustiveTest, SkipsCrashingTileKConfig) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
HloModule module
ENTRY e {
x = s8[33,33]{1,0} parameter(0)
c = f16[33,33]{1,0} convert(x)
y = f16[33,33]{1,0} parameter(1)
ROOT out = f16[33,33]{1,0} dot(c, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)")
.value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<TritonGemmConfig> configs,
GetPossibleMatmulAutotuneTritonConfigs(
*Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
compute_capability, GetToolkitVersion(), GetDebugOptionsForTest()));
EXPECT_TRUE(std::all_of(
configs.begin(), configs.end(),
[](const TritonGemmConfig& config) { return config.block_k > 16; }));
}
class GemmFusionAutotunerDisableSplitK : public GemmFusionAutotunerTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
GemmFusionAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_split_k_autotuning(false);
return debug_options;
}
};
TEST_F(GemmFusionAutotunerDisableSplitK, SplitKIsDisabled) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f32[1024,1024] parameter(0)
p1 = f32[1024,1024] parameter(1)
ROOT r = f32[1024,1024] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<TritonGemmConfig> configs,
GetPossibleMatmulAutotuneTritonConfigs(
*Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
compute_capability, GetToolkitVersion(), GetDebugOptionsForTest()));
EXPECT_TRUE(std::all_of(
configs.begin(), configs.end(),
[](const TritonGemmConfig& config) { return config.split_k == 1; }));
}
class GemmFusionAutotunerConfigTest
: public StatelessAutotunerTest,
public ::testing::WithParamInterface<bool> {};
TEST_P(GemmFusionAutotunerConfigTest, SparseDotDiscardsUnsupportedTiles) {
const std::string kHloText = R"(
HloModule test
ENTRY wais {
lhs = f16[5,1600] parameter(0)
rhs = f16[3200,10] parameter(1)
meta = u16[5,200] parameter(2)
ROOT dot = f32[5,10] dot(lhs, rhs, meta),
lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHloText));
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_exhaustive_tiling_search(GetParam());
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<TritonGemmConfig> configs,
GetPossibleMatmulAutotuneTritonConfigs(
*Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
compute_capability, GetToolkitVersion(), debug_options));
for (const auto& config : configs) {
int metadata_size = config.block_m * config.block_k / 16;
EXPECT_LE(config.num_warps * WarpSize(), metadata_size);
EXPECT_GT(config.block_k, 16);
}
}
INSTANTIATE_TEST_SUITE_P(GemmFusionAutotunerConfigSweep,
GemmFusionAutotunerConfigTest, ::testing::Bool());
TEST_F(StatelessAutotunerTest,
ExhaustiveAutotuningTunesNumberOfCtasFromHopper) {
const std::string kHloText = R"(
HloModule test
ENTRY main {
lhs = f32[5,1600] parameter(0)
rhs = f32[1600,10] parameter(1)
ROOT dot = f32[5,10] dot(lhs, rhs),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHloText));
DebugOptions debug_options_with_exhaustive_autotuning =
GetDebugOptionsForTest();
debug_options_with_exhaustive_autotuning.set_xla_gpu_exhaustive_tiling_search(
true);
auto get_configs = [&](const se::CudaComputeCapability& cc,
const DebugOptions& debug_options) {
return GetPossibleMatmulAutotuneTritonConfigs(
*Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
cc, GetToolkitVersion(), debug_options)
.value();
};
for (const auto& config :
get_configs(se::CudaComputeCapability::Ampere(),
debug_options_with_exhaustive_autotuning)) {
EXPECT_EQ(config.num_ctas, 1);
}
absl::flat_hash_set<int> config_num_ctas;
for (const auto& config :
get_configs(se::CudaComputeCapability::Hopper(),
debug_options_with_exhaustive_autotuning)) {
config_num_ctas.insert(config.num_ctas);
}
EXPECT_GT(config_num_ctas.size(), 1);
DebugOptions debug_options_without_exhaustive_autotuning =
GetDebugOptionsForTest();
debug_options_without_exhaustive_autotuning
.set_xla_gpu_exhaustive_tiling_search(false);
for (const auto& config :
get_configs(se::CudaComputeCapability::Hopper(),
debug_options_without_exhaustive_autotuning)) {
EXPECT_EQ(config.num_ctas, 1);
}
}
TEST_F(GemmFusionAutotunerTest, SplitKFLoatNormalization) {
if (!GetCudaComputeCapability().IsAtLeastHopper()) {
GTEST_SKIP() << "f8 types are only supported from Hopper onwards.";
}
const se::CudaComputeCapability compute_capability =
GetCudaComputeCapability();
se::GpuDeviceInfoProto deviceless_proto;
auto ccc = deviceless_proto.mutable_cuda_compute_capability();
ccc->set_major(compute_capability.major);
ccc->set_minor(compute_capability.minor);
DeviceConfig test_config{backend().default_stream_executor(),
backend().memory_allocator()};
AutotuneConfig autotune_config{test_config, GetDebugOptionsForTest()};
GemmFusionAutotunerImpl autotuner(autotune_config, GetToolkitVersion(),
GetDebugOptionsForTest(), nullptr);
TF_ASSERT_OK_AND_ASSIGN(
auto compile_util,
AutotunerCompileUtil::Create(autotune_config, GetDebugOptionsForTest()))
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
HloModule module
%gemm_fusion_dot_computation (parameter_0: f8e5m2[256,256], parameter_1: f8e4m3fn[128,256]) -> f8e5m2[256,128] {
%parameter_0 = f8e5m2[256,256]{1,0} parameter(0)
%parameter_1 = f8e4m3fn[128,256]{1,0} parameter(1)
%dot.1 = f32[256,128]{1,0} dot(f8e5m2[256,256]{1,0} %parameter_0, f8e4m3fn[128,256]{1,0} %parameter_1), lhs_contracting_dims={0}, rhs_contracting_dims={1}
ROOT %convert.2 = f8e5m2[256,128]{1,0} convert(f32[256,128]{1,0} %dot.1)
}
ENTRY entry {
%p0 = f8e5m2[256,256]{1,0} parameter(0)
%p1 = f8e4m3fn[128,256]{1,0} parameter(1)
ROOT r = f8e5m2[256,128]{1,0} fusion(f8e5m2[256,256]{1,0} %p0, f8e4m3fn[128,256]{1,0} %p1), kind=kCustom, calls=%gemm_fusion_dot_computation, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__triton_gemm"},"force_earliest_schedule":false}
})")
.value();
GemmFusionAutotunerImpl::BackendConfigs configs;
configs.emplace_back(
DynCast<HloFusionInstruction>(
module->entry_computation()->root_instruction()),
std::vector<GemmFusionAutotunerImpl::BackendConfig>{
GemmFusionAutotunerImpl::BackendConfig(TritonGemmConfig(
32,
64,
64,
4,
1,
4,
1))});
CHECK_OK(autotuner.CompileAll(*compile_util, configs));
}
TEST_F(GemmFusionAutotunerTest, CreatesCustomKernelFusionConfigs) {
const std::string kHlo = R"(
HloModule module, entry_computation_layout={(bf16[1024,1024]{1,0}, bf16[1024,1024]{1,0})->f32[1024,1024]{1,0}}
%gemm_fusion_r_computation {
%parameter_0 = bf16[1024,1024]{1,0} parameter(0)
%convert.2 = f32[1024,1024]{1,0} convert(%parameter_0)
%parameter_1 = bf16[1024,1024]{1,0} parameter(1)
%convert.3 = f32[1024,1024]{1,0} convert(%parameter_1)
ROOT %r.1 = f32[1024,1024]{1,0} dot(%convert.2, %convert.3), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY main {
%p0 = bf16[1024,1024]{1,0} parameter(0)
%p1 = bf16[1024,1024]{1,0} parameter(1)
ROOT %gemm_fusion_r = f32[1024,1024]{1,0} fusion(%p0, %p1), kind=kCustom, calls=gemm_fusion_r_computation, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__triton_gemm"},"force_earliest_schedule":false}
})";
std::unique_ptr<VerifiedHloModule> module =
ParseAndReturnVerifiedModule(kHlo).value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<GemmFusionAutotunerImpl::BackendConfig> configs,
GetPossibleMatmulAutotuneConfigs(*module, compute_capability,
GetToolkitVersion(),
GetDebugOptionsForTest()));
EXPECT_TRUE(std::any_of(
configs.begin(), configs.end(),
[](const GemmFusionAutotunerImpl::BackendConfig& config) {
return std::holds_alternative<
GemmFusionAutotunerImpl::CustomKernelFusionConfig>(config);
}));
}
TEST_F(GemmFusionAutotunerTest, GeneratesConfigForUpcastGemmWithPrologue) {
const std::string kHlo = R"(
HloModule module
%gemm_fusion_r_computation (parameter_0.1: f32[1,256,4,4096], parameter_1.1: bf16[1,4,4096,4096]) -> f32[256,4096] {
%parameter_0.1 = f32[1,256,4,4096]{3,2,1,0} parameter(0)
%bitcast.60 = f32[256,16384]{1,0} bitcast(f32[1,256,4,4096]{3,2,1,0} %parameter_0.1)
%parameter_1.1 = bf16[1,4,4096,4096]{3,2,1,0} parameter(1)
%bitcast.61 = bf16[16384,4096]{1,0} bitcast(bf16[1,4,4096,4096]{3,2,1,0} %parameter_1.1)
%convert.22 = f32[16384,4096]{1,0} convert(bf16[16384,4096]{1,0} %bitcast.61)
ROOT r = f32[256,4096]{1,0} dot(f32[256,16384]{1,0} %bitcast.60, f32[16384,4096]{1,0} %convert.22), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY main {
%p0 = f32[1,256,4,4096] parameter(0)
%p1 = bf16[1,4,4096,4096] parameter(1)
ROOT %gemm_fusion_r = f32[256,4096] fusion(%p0, %p1), kind=kCustom,
calls=gemm_fusion_r_computation,
backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__triton_gemm"},"force_earliest_schedule":false}
}
)";
std::unique_ptr<VerifiedHloModule> module =
ParseAndReturnVerifiedModule(kHlo).value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<GemmFusionAutotunerImpl::BackendConfig> configs,
GetPossibleMatmulAutotuneConfigs(*module, compute_capability,
GetToolkitVersion(),
GetDebugOptionsForTest()));
EXPECT_TRUE(std::any_of(
configs.begin(), configs.end(),
[](const GemmFusionAutotunerImpl::BackendConfig& config) {
return std::holds_alternative<
GemmFusionAutotunerImpl::CustomKernelFusionConfig>(config);
}));
}
TEST_F(GemmFusionAutotunerTest,
GeneratesConfigForUpcastGemmWithPrologueAndEpilogue) {
const std::string kHlo = R"(
HloModule module
%gemm_fusion_r_computation (parameter_0.1: f32[1,256,4,4096], parameter_1.1: bf16[1,4,4096,4096]) -> bf16[1048576] {
%parameter_0.1 = f32[1,256,4,4096]{3,2,1,0} parameter(0)
%bitcast.60 = f32[256,16384]{1,0} bitcast(f32[1,256,4,4096]{3,2,1,0} %parameter_0.1)
%parameter_1.1 = bf16[1,4,4096,4096]{3,2,1,0} parameter(1)
%bitcast.61 = bf16[16384,4096]{1,0} bitcast(bf16[1,4,4096,4096]{3,2,1,0} %parameter_1.1)
%convert.22 = f32[16384,4096]{1,0} convert(bf16[16384,4096]{1,0} %bitcast.61)
%dot.5 = f32[256,4096]{1,0} dot(f32[256,16384]{1,0} %bitcast.60, f32[16384,4096]{1,0} %convert.22), lhs_contracting_dims={1}, rhs_contracting_dims={0}
%convert.23 = bf16[256,4096]{1,0} convert(f32[256,4096]{1,0} %dot.5)
%bitcast.62 = bf16[1,256,4096]{2,1,0} bitcast(bf16[256,4096]{1,0} %convert.23)
%transpose.18 = bf16[1,4096,256]{2,1,0} transpose(bf16[1,256,4096]{2,1,0} %bitcast.62), dimensions={0,2,1}
ROOT %bitcast.63 = bf16[1048576]{0} bitcast(bf16[1,4096,256]{2,1,0} %transpose.18)
}
ENTRY main {
%p0 = f32[1,256,4,4096] parameter(0)
%p1 = bf16[1,4,4096,4096] parameter(1)
ROOT %gemm_fusion_r = bf16[1048576] fusion(%p0, %p1), kind=kCustom,
calls=gemm_fusion_r_computation,
backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__triton_gemm"},"force_earliest_schedule":false}
}
)";
std::unique_ptr<VerifiedHloModule> module =
ParseAndReturnVerifiedModule(kHlo).value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<GemmFusionAutotunerImpl::BackendConfig> configs,
GetPossibleMatmulAutotuneConfigs(*module, compute_capability,
GetToolkitVersion(),
GetDebugOptionsForTest()));
EXPECT_TRUE(std::any_of(
configs.begin(), configs.end(),
[](const GemmFusionAutotunerImpl::BackendConfig& config) {
return std::holds_alternative<
GemmFusionAutotunerImpl::CustomKernelFusionConfig>(config);
}));
}
TEST_F(GemmFusionAutotunerTest, RewritesGemmFusionToCustomKernelFusion) {
const std::string kHlo = R"(
HloModule module, entry_computation_layout={(bf16[1024,1024]{1,0}, bf16[1024,1024]{1,0})->f32[1024,1024]{1,0}}
%gemm_fusion_r_computation {
%parameter_0 = bf16[1024,1024]{1,0} parameter(0)
%convert.2 = f32[1024,1024]{1,0} convert(%parameter_0)
%parameter_1 = bf16[1024,1024]{1,0} parameter(1)
%convert.3 = f32[1024,1024]{1,0} convert(%parameter_1)
ROOT %r.1 = f32[1024,1024]{1,0} dot(%convert.2, %convert.3), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY main {
%p0 = bf16[1024,1024]{1,0} parameter(0)
%p1 = bf16[1024,1024]{1,0} parameter(1)
ROOT %gemm_fusion_r = f32[1024,1024]{1,0} fusion(%p0, %p1), kind=kCustom, calls=gemm_fusion_r_computation, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__triton_gemm"},"force_earliest_schedule":false}
}
)";
std::unique_ptr<VerifiedHloModule> module =
ParseAndReturnVerifiedModule(kHlo).value();
DebugOptions opts;
AutotuneConfig autotune_config{
DeviceConfig{backend().default_stream_executor(),
backend().memory_allocator()},
opts};
AutotuneCacheKey cache_key(autotune_config.GetModelStr(),
*module->entry_computation()->root_instruction());
TF_ASSERT_OK_AND_ASSIGN(AutotuneResults autotune_results_override,
ParseTextProto<AutotuneResults>(R"pb(
version: 3
results {
device: "..."
hlo: "..."
result {
custom_kernel_fusion { kernel_index: 1 }
run_time { nanos: 14 }
}
})pb"));
autotune_results_override.mutable_results(0)->set_device(
std::string(cache_key.GetModelStr()));
autotune_results_override.mutable_results(0)->set_hlo(
std::string(cache_key.GetHlo()));
GemmFusionAutotunerRewriterVisitor visitor(autotune_config);
CHECK_OK(AutotunerUtil::LoadAutotuneResults(autotune_results_override));
visitor.RunOnModule(module.get(), {}).value();
std::string pattern = R"(
CHECK: ROOT %cutlass_gemm_with_upcast
CHECK-SAME: fusion
CHECK-SAME: kind=kCustom
CHECK-SAME: "kernel_index":1
)";
TF_ASSERT_OK_AND_ASSIGN(bool file_check_matches,
RunFileCheck(module->ToString(), pattern));
EXPECT_TRUE(file_check_matches);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/gemm_fusion_autotuner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/gemm_fusion_autotuner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0e944d9e-4edc-4707-94c2-94790f462ce2 | cpp | google/arolla | dynamic_compiled_expr | arolla/expr/eval/dynamic_compiled_expr.cc | arolla/expr/eval/dynamic_compiled_expr_test.cc | #include "arolla/expr/eval/dynamic_compiled_expr.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/derived_qtype_cast_operator.h"
#include "arolla/expr/eval/compile_std_function_operator.h"
#include "arolla/expr/eval/compile_where_operator.h"
#include "arolla/expr/eval/compile_while_operator.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/eval/executable_builder.h"
#include "arolla/expr/eval/extensions.h"
#include "arolla/expr/eval/prepare_expression.h"
#include "arolla/expr/eval/slot_allocator.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_stack_trace.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/expr/operators/std_function_operator.h"
#include "arolla/expr/operators/while_loop/while_loop.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/tuple_expr_operator.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qexpr/operators/core/utility_operators.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/demangle.h"
#include "arolla/util/fast_dynamic_downcast_final.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr::eval_internal {
namespace {
const OperatorDirectory& GetOperatorDirectory(
const DynamicEvaluationEngineOptions& options) {
return options.operator_directory != nullptr
? *options.operator_directory
: *OperatorRegistry::GetInstance();
}
struct OutputInfo {
ExprNodePtr expr;
TypedSlot forced_output_slot;
};
absl::Status VerifySlotsCount(absl::string_view op_name,
absl::Span<const TypedSlot> input_slots,
int64_t expected_count) {
if (input_slots.size() != expected_count) {
return absl::InvalidArgumentError(
absl::StrFormat("%s operator expects %d argument(s), got %d", op_name,
expected_count, input_slots.size()));
}
return absl::OkStatus();
}
class EvalVisitor {
public:
EvalVisitor(DynamicEvaluationEngineOptions options,
const absl::flat_hash_map<std::string, TypedSlot>& input_slots,
OutputInfo output_info, ExecutableBuilder* executable_builder,
const std::vector<std::string>& side_output_names,
absl::flat_hash_map<Fingerprint, QTypePtr> node_types,
eval_internal::SlotAllocator& slot_allocator)
: options_(std::move(options)),
expr_input_slots_(input_slots),
output_info_(std::move(output_info)),
executable_builder_(executable_builder),
side_output_names_(side_output_names),
node_types_(std::move(node_types)),
slot_allocator_(slot_allocator),
compiler_extensions_(CompilerExtensionRegistry::GetInstance()
.GetCompilerExtensionSet()) {}
absl::StatusOr<TypedSlot> operator()(
const ExprNodePtr& node, absl::Span<const TypedSlot* const> visits) {
auto inputs = DereferenceVisitPointers(visits);
ASSIGN_OR_RETURN(QTypePtr output_type, LookupQType(node, node_types_));
if (output_type == nullptr) {
return absl::FailedPreconditionError(
absl::StrFormat("unable to deduce output type of the node %s",
GetDebugSnippet(node)));
}
ASSIGN_OR_RETURN(
TypedSlot output_slot, ConstructOutputSlot(node, inputs, output_type),
_ << "while compiling node " << GetDebugSnippet(node)
<< "; the expression is likely not fully compiled and is using "
"derived operators that are not supported in the backend");
if (output_slot.GetType() != output_type) {
return absl::FailedPreconditionError(absl::StrFormat(
"unexpected output type of the node %s: MetaEval: %s, "
"backend: %s; operator signatures "
"are inconsistent on argument types %s",
GetDebugSnippet(node), output_type->name(),
output_slot.GetType()->name(),
FormatTypeVector(SlotsToTypes(inputs))));
}
if (node->op() != eval_internal::InternalRootOperator()) {
RETURN_IF_ERROR(slot_allocator_.ReleaseSlotsNotNeededAfter(node));
}
return output_slot;
}
private:
using AddSlotFn = std::function<TypedSlot(bool allow_recycled)>;
using CopySlotFn = std::function<absl::StatusOr<TypedSlot>(
TypedSlot slot, const ExprNodePtr& slot_origin)>;
absl::StatusOr<TypedSlot> ConstructOutputSlot(
const ExprNodePtr& node, absl::Span<const TypedSlot> input_slots,
QTypePtr output_type) {
std::optional<TypedSlot> forced_output_slot;
if (node.get() == output_info_.expr.get()) {
forced_output_slot = output_info_.forced_output_slot;
}
AddSlotFn maybe_add_output_slot =
[this, output_type, &node, forced_output_slot](bool allow_recycled) {
if (forced_output_slot.has_value()) {
return *forced_output_slot;
}
auto slot =
slot_allocator_.AddSlotForNode(node, output_type, allow_recycled);
return slot;
};
CopySlotFn maybe_copy_slot =
[this, &node, forced_output_slot](
TypedSlot slot,
const ExprNodePtr& slot_origin) -> absl::StatusOr<TypedSlot> {
if (forced_output_slot.has_value()) {
RETURN_IF_ERROR(this->executable_builder_
->BindEvalOp(*MakeCopyOp(slot.GetType()), {slot},
*forced_output_slot, "core._copy")
.status());
slot = *forced_output_slot;
} else {
RETURN_IF_ERROR(slot_allocator_.ExtendSlotLifetime(slot_origin, node));
}
return slot;
};
switch (node->type()) {
case ExprNodeType::kPlaceholder:
return absl::InternalError(
absl::StrFormat("placeholder should be substituted before "
"evaluation: P.%s",
node->placeholder_key()));
case ExprNodeType::kLeaf: {
if (!expr_input_slots_.contains(node->leaf_key())) {
return absl::InvalidArgumentError(
absl::StrCat("unbound leaf: ", node->leaf_key()));
}
return maybe_copy_slot({expr_input_slots_.at(node->leaf_key())}, node);
}
case ExprNodeType::kLiteral: {
TypedSlot output_slot =
slot_allocator_.AddSlotForNode(node, output_type,
false);
RETURN_IF_ERROR(executable_builder_->AddLiteralInitialization(
*node->qvalue(), output_slot));
return maybe_copy_slot(output_slot, node);
}
case ExprNodeType::kOperator: {
ASSIGN_OR_RETURN(auto op, DecayRegisteredOperator(node->op()));
if (!HasBuiltinExprOperatorTag(op) && !HasBackendExprOperatorTag(op)) {
return absl::InvalidArgumentError(
absl::StrCat(node->op()->display_name(),
" is not a builtin or backend ExprOperator"));
}
const auto& op_typeid = typeid(*op);
if (HasBackendExprOperatorTag(op)) {
if (op->display_name() == "core.has._optional") {
return HandleHas(node->node_deps(), input_slots, maybe_copy_slot,
maybe_add_output_slot);
}
return CompileBackendOperator(
op->display_name(), input_slots,
maybe_add_output_slot(true), node);
} else if (HasAnnotationExprOperatorTag(op)) {
return maybe_copy_slot(input_slots[0], node->node_deps()[0]);
} else if (op == eval_internal::InternalRootOperator()) {
return HandleInternalRoot(input_slots);
} else if (op_typeid == typeid(GetNthOperator)) {
return HandleGetNth(op, node->node_deps(), input_slots,
maybe_copy_slot);
} else if (auto* where_op =
fast_dynamic_downcast_final<const PackedWhereOp*>(
op.get())) {
DynamicEvaluationEngineOptions options(options_);
options.allow_overriding_input_slots = false;
return CompileWhereOperator(
options, *where_op, input_slots,
maybe_add_output_slot(true),
executable_builder_);
} else if (auto* while_op = fast_dynamic_downcast_final<
const expr_operators::WhileLoopOperator*>(op.get())) {
DynamicEvaluationEngineOptions options(options_);
options.allow_overriding_input_slots = false;
auto output_slot = maybe_add_output_slot(true);
RETURN_IF_ERROR(eval_internal::CompileWhileOperator(
options, *while_op, input_slots, output_slot,
*executable_builder_));
return output_slot;
} else if (op_typeid == typeid(DerivedQTypeUpcastOperator) ||
op_typeid == typeid(DerivedQTypeDowncastOperator)) {
return HandleDerivedQTypeCast(*op, node->node_deps(), input_slots,
maybe_copy_slot);
} else if (auto* std_function_op =
dynamic_cast<const expr_operators::StdFunctionOperator*>(
op.get())) {
auto output_slot = maybe_add_output_slot(true);
RETURN_IF_ERROR(eval_internal::CompileStdFunctionOperator(
*std_function_op, input_slots, output_slot, *executable_builder_,
node));
return output_slot;
}
auto output_slot = maybe_add_output_slot(true);
if (auto result =
compiler_extensions_.compile_operator_fn(CompileOperatorFnArgs{
.options = options_,
.op = op,
.input_slots = input_slots,
.output_slot = output_slot,
.executable_builder = executable_builder_});
result.has_value()) {
RETURN_IF_ERROR(*result);
return output_slot;
}
return absl::InvalidArgumentError(absl::StrCat(
"unsupported builtin ExprOperator: name=",
node->op()->display_name(), ", CxxType=", TypeName(op_typeid)));
}
}
return absl::InternalError(absl::StrFormat("unexpected ExprNodeType: %d",
static_cast<int>(node->type())));
}
absl::StatusOr<TypedSlot> HandleInternalRoot(
absl::Span<const TypedSlot> input_slots) const {
if (input_slots.size() != 1 + side_output_names_.size()) {
return absl::InternalError(
absl::StrFormat("InternalRootOperator bound with %d "
"arguments, %d expected",
input_slots.size(), 1 + side_output_names_.size()));
}
if (input_slots[0] != output_info_.forced_output_slot) {
return absl::InternalError(
"InternalRootOperator first slot was handled incorrectly");
}
for (size_t i = 0; i < side_output_names_.size(); ++i) {
RETURN_IF_ERROR(executable_builder_->AddNamedOutput(side_output_names_[i],
input_slots[i + 1]));
}
return input_slots[0];
}
absl::StatusOr<TypedSlot> HandleHas(absl::Span<const ExprNodePtr> node_deps,
absl::Span<const TypedSlot> input_slots,
CopySlotFn copy_slot_fn,
AddSlotFn add_slot_fn) {
RETURN_IF_ERROR(VerifySlotsCount("core.has._optional", input_slots, 1));
if (!IsOptionalQType(input_slots[0].GetType())) {
return CompileBackendOperator("core.has._optional", input_slots,
add_slot_fn(true));
}
static_assert(sizeof(OptionalUnit) == sizeof(bool));
static_assert(alignof(OptionalUnit) == alignof(bool));
auto mask_slot = FrameLayout::Slot<OptionalUnit>::UnsafeSlotFromOffset(
input_slots[0].byte_offset());
RETURN_IF_ERROR(executable_builder_->layout_builder()->RegisterUnsafeSlot(
mask_slot, true));
DCHECK_EQ(node_deps.size(), 1);
return copy_slot_fn(TypedSlot::FromSlot(mask_slot), node_deps[0]);
}
absl::StatusOr<TypedSlot> HandleGetNth(
const ExprOperatorPtr& op, absl::Span<const ExprNodePtr> node_deps,
absl::Span<const TypedSlot> input_slots, CopySlotFn copy_slot_fn) const {
RETURN_IF_ERROR(VerifySlotsCount(op->display_name(), input_slots, 1));
const GetNthOperator& get_nth =
*static_cast<const GetNthOperator*>(op.get());
if (get_nth.index() < 0 ||
get_nth.index() >= input_slots[0].SubSlotCount()) {
return absl::InternalError(
absl::StrFormat("input type %s is not compatible with %s, index %d "
"is out of range",
input_slots[0].GetType()->name(),
get_nth.display_name(), get_nth.index()));
}
DCHECK_EQ(node_deps.size(), 1);
return copy_slot_fn(input_slots[0].SubSlot(get_nth.index()), node_deps[0]);
}
absl::StatusOr<TypedSlot> HandleDerivedQTypeCast(
const ExprOperator& op, absl::Span<const ExprNodePtr> node_deps,
absl::Span<const TypedSlot> input_slots, CopySlotFn copy_slot_fn) const {
RETURN_IF_ERROR(VerifySlotsCount(op.display_name(), input_slots, 1));
DCHECK(typeid(op) == typeid(DerivedQTypeUpcastOperator) ||
typeid(op) == typeid(DerivedQTypeDowncastOperator));
ASSIGN_OR_RETURN(
auto output_attr,
op.InferAttributes({ExprAttributes(input_slots[0].GetType())}));
DCHECK_EQ(node_deps.size(), 1);
DCHECK(output_attr.qtype());
return copy_slot_fn(TypedSlot::UnsafeFromOffset(
output_attr.qtype(), input_slots[0].byte_offset()),
node_deps[0]);
}
absl::StatusOr<TypedSlot> CompileBackendOperator(
absl::string_view name, absl::Span<const TypedSlot> input_slots,
TypedSlot output_slot, absl::Nullable<ExprNodePtr> node = nullptr) {
ASSIGN_OR_RETURN(
auto op, GetOperatorDirectory(options_).LookupOperator(
name, SlotsToTypes(input_slots), output_slot.GetType()));
ASSIGN_OR_RETURN(auto ip, executable_builder_->BindEvalOp(
*op, input_slots, output_slot, name));
if (node != nullptr) {
executable_builder_->RegisterStacktrace(ip, node);
}
return output_slot;
}
DynamicEvaluationEngineOptions options_;
const absl::flat_hash_map<std::string, TypedSlot>& expr_input_slots_;
OutputInfo output_info_;
ExecutableBuilder* executable_builder_;
const std::vector<std::string>& side_output_names_;
absl::flat_hash_map<Fingerprint, QTypePtr> node_types_;
eval_internal::SlotAllocator& slot_allocator_;
CompilerExtensionSet compiler_extensions_;
};
}
DynamicCompiledExpr::DynamicCompiledExpr(
DynamicEvaluationEngineOptions options,
absl::flat_hash_map<std::string, QTypePtr> input_types,
QTypePtr output_type,
absl::flat_hash_map<std::string, QTypePtr> named_output_types,
ExprNodePtr prepared_expr, std::vector<std::string> side_output_names,
absl::flat_hash_map<Fingerprint, QTypePtr> types,
std::shared_ptr<const ExprStackTrace> stack_trace)
: CompiledExpr(std::move(input_types), output_type,
std::move(named_output_types)),
options_(std::move(options)),
prepared_expr_(std::move(prepared_expr)),
side_output_names_(std::move(side_output_names)),
types_(std::move(types)),
stack_trace_(std::move(stack_trace)) {}
absl::StatusOr<std::unique_ptr<BoundExpr>> DynamicCompiledExpr::Bind(
FrameLayout::Builder* layout_builder,
const absl::flat_hash_map<std::string, TypedSlot>& input_slots,
std::optional<TypedSlot> output_slot) const {
ExecutableBuilder executable_builder(
layout_builder,
options_.collect_op_descriptions,
stack_trace_);
if (!output_slot.has_value()) {
output_slot = AddSlot(output_type(), layout_builder);
}
RETURN_IF_ERROR(
BindToExecutableBuilder(executable_builder, input_slots, *output_slot));
return std::move(executable_builder).Build(input_slots, *output_slot);
}
absl::Status DynamicCompiledExpr::BindToExecutableBuilder(
ExecutableBuilder& executable_builder,
const absl::flat_hash_map<std::string, TypedSlot>& input_slots,
TypedSlot output_slot) const {
RETURN_IF_ERROR(VerifySlotTypes(input_types(), input_slots,
false,
true));
ExprNodePtr output_expr = prepared_expr_;
if (output_expr->op() == eval_internal::InternalRootOperator()) {
if (output_expr->node_deps().empty()) {
return absl::InternalError("InternalRootOperator bound with 0 arguments");
}
output_expr = output_expr->node_deps()[0];
}
eval_internal::SlotAllocator slot_allocator(
prepared_expr_, *executable_builder.layout_builder(), input_slots,
options_.allow_overriding_input_slots);
EvalVisitor visitor(options_, input_slots, {output_expr, output_slot},
&executable_builder, side_output_names_, types_,
slot_allocator);
ASSIGN_OR_RETURN(TypedSlot new_output_slot,
PostOrderTraverse(prepared_expr_, std::ref(visitor)));
if (output_slot != new_output_slot) {
return absl::InternalError(
absl::StrFormat("expression %s bound to a wrong output slot",
GetDebugSnippet(prepared_expr_)));
}
return absl::OkStatus();
}
} | #include "arolla/expr/eval/dynamic_compiled_expr.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/eval/executable_builder.h"
#include "arolla/expr/eval/prepare_expression.h"
#include "arolla/expr/eval/test_utils.h"
#include "arolla/expr/expr.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/fingerprint.h"
namespace arolla::expr::eval_internal {
namespace {
TEST(DynamicCompiledExprTest, BindToExecutableBuilder) {
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp("math.add", {Leaf("x"), Literal<int32_t>(1)}));
absl::flat_hash_map<std::string, QTypePtr> input_types = {
{"x", GetQType<int32_t>()}};
ASSERT_OK_AND_ASSIGN(
expr,
PrepareExpression(expr, input_types, DynamicEvaluationEngineOptions{}));
absl::flat_hash_map<Fingerprint, QTypePtr> node_types;
ASSERT_OK_AND_ASSIGN(expr, ExtractQTypesForCompilation(expr, &node_types));
DynamicCompiledExpr compiled_expr(
DynamicEvaluationEngineOptions{}, input_types,
GetQType<int32_t>(), {}, expr,
{}, node_types);
FrameLayout::Builder layout_builder;
ExecutableBuilder executable_builder(&layout_builder,
true);
auto x1_slot = layout_builder.AddSlot<int32_t>();
auto x2_slot = layout_builder.AddSlot<int32_t>();
auto result_slot = layout_builder.AddSlot<int32_t>();
auto other_result_slot = layout_builder.AddSlot<int32_t>();
ASSERT_OK(compiled_expr.BindToExecutableBuilder(
executable_builder, {{"x", TypedSlot::FromSlot(x1_slot)}},
TypedSlot::FromSlot(result_slot)));
ASSERT_OK(compiled_expr.BindToExecutableBuilder(
executable_builder, {{"x", TypedSlot::FromSlot(x1_slot)}},
TypedSlot::FromSlot(other_result_slot)));
ASSERT_OK(compiled_expr.BindToExecutableBuilder(
executable_builder, {{"x", TypedSlot::FromSlot(x2_slot)}},
TypedSlot::FromSlot(other_result_slot)));
std::unique_ptr<BoundExpr> executable_expr =
std::move(executable_builder)
.Build({{"x1", TypedSlot::FromSlot(x1_slot)},
{"x2", TypedSlot::FromSlot(x2_slot)}},
TypedSlot::FromSlot(result_slot));
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre(
"INT32 [0x10] = 1\n"
"INT32 [0x14] = 1\n"
"INT32 [0x18] = 1"),
EvalOperationsAre(
"INT32 [0x08] = math.add(INT32 [0x00], INT32 [0x10])",
"INT32 [0x0C] = math.add(INT32 [0x00], INT32 [0x14])",
"INT32 [0x0C] = math.add(INT32 [0x04], INT32 [0x18])")));
auto layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&layout);
alloc.frame().Set(x1_slot, 56);
alloc.frame().Set(x2_slot, 1);
EvaluationContext ctx;
executable_expr->InitializeLiterals(&ctx, alloc.frame());
executable_expr->Execute(&ctx, alloc.frame());
EXPECT_OK(ctx.status());
EXPECT_EQ(alloc.frame().Get(result_slot), 57);
EXPECT_EQ(alloc.frame().Get(other_result_slot), 2);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/dynamic_compiled_expr.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/dynamic_compiled_expr_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
8e4c68dd-bf7a-461f-8359-b00cdc6e5b6d | cpp | google/tensorstore | gcs_grpc | tensorstore/kvstore/gcs_grpc/gcs_grpc.cc | tensorstore/kvstore/gcs_grpc/gcs_grpc_test.cc | #include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/thread_annotations.h"
#include "absl/crc/crc32c.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "grpcpp/client_context.h"
#include "grpcpp/security/credentials.h"
#include "grpcpp/support/client_callback.h"
#include "grpcpp/support/status.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/grpc/utils.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/histogram.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/internal/thread/schedule_at.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/batch_util.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/common_metrics.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/gcs/gcs_resource.h"
#include "tensorstore/kvstore/gcs/validate.h"
#include "tensorstore/kvstore/gcs_grpc/get_credentials.h"
#include "tensorstore/kvstore/gcs_grpc/storage_stub_pool.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/generic_coalescing_batch_util.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/kvstore/url_registry.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#include "tensorstore/internal/cache_key/absl_time.h"
#include "tensorstore/internal/cache_key/std_optional.h"
#include "tensorstore/internal/context_binding.h"
#include "tensorstore/internal/json_binding/absl_time.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/enum.h"
#include "tensorstore/serialization/absl_time.h"
#include "tensorstore/serialization/fwd.h"
#include "google/protobuf/empty.pb.h"
#include "google/storage/v2/storage.grpc.pb.h"
#include "google/storage/v2/storage.pb.h"
using ::tensorstore::internal::DataCopyConcurrencyResource;
using ::tensorstore::internal::GrpcStatusToAbslStatus;
using ::tensorstore::internal::ScheduleAt;
using ::tensorstore::internal_gcs_grpc::GetCredentialsForEndpoint;
using ::tensorstore::internal_gcs_grpc::GetSharedStorageStubPool;
using ::tensorstore::internal_gcs_grpc::StorageStubPool;
using ::tensorstore::internal_storage_gcs::GcsUserProjectResource;
using ::tensorstore::internal_storage_gcs::IsRetriable;
using ::tensorstore::internal_storage_gcs::IsValidBucketName;
using ::tensorstore::internal_storage_gcs::IsValidObjectName;
using ::tensorstore::internal_storage_gcs::IsValidStorageGeneration;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ListReceiver;
using ::tensorstore::kvstore::SupportedFeatures;
using ::google::storage::v2::DeleteObjectRequest;
using ::google::storage::v2::ListObjectsRequest;
using ::google::storage::v2::ListObjectsResponse;
using ::google::storage::v2::ReadObjectRequest;
using ::google::storage::v2::ReadObjectResponse;
using ::google::storage::v2::ServiceConstants;
using ::google::storage::v2::WriteObjectRequest;
using ::google::storage::v2::WriteObjectResponse;
using ::google::storage::v2::Storage;
namespace {
static constexpr char kUriScheme[] = "gcs_grpc";
static constexpr size_t kMaxWriteBytes =
ServiceConstants::MAX_WRITE_CHUNK_BYTES;
}
namespace tensorstore {
namespace {
namespace jb = tensorstore::internal_json_binding;
struct GcsMetrics : public internal_kvstore::CommonMetrics {
internal_metrics::Counter<int64_t>& retries;
};
auto gcs_grpc_metrics = []() -> GcsMetrics {
return {TENSORSTORE_KVSTORE_COMMON_METRICS(gcs_grpc),
TENSORSTORE_KVSTORE_COUNTER_IMPL(
gcs_grpc, retries,
"Ccunt of all retried requests (read/write/delete)")};
}();
ABSL_CONST_INIT internal_log::VerboseFlag gcs_grpc_logging("gcs_grpc");
struct GcsGrpcKeyValueStoreSpecData {
std::string bucket;
std::string endpoint;
uint32_t num_channels = 0;
absl::Duration timeout = absl::ZeroDuration();
absl::Duration wait_for_connection = absl::ZeroDuration();
Context::Resource<GcsUserProjectResource> user_project;
Context::Resource<internal_storage_gcs::GcsRequestRetries> retries;
Context::Resource<DataCopyConcurrencyResource> data_copy_concurrency;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.bucket, x.endpoint, x.num_channels, x.timeout,
x.wait_for_connection, x.user_project, x.retries,
x.data_copy_concurrency);
};
constexpr static auto default_json_binder = jb::Object(
jb::Member(
"bucket",
jb::Projection<&GcsGrpcKeyValueStoreSpecData::bucket>(
jb::Validate([](const auto& options, const std::string* x) {
if (!IsValidBucketName(*x)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid GCS bucket name: ", QuoteString(*x)));
}
return absl::OkStatus();
}))),
jb::Member("endpoint",
jb::Projection<&GcsGrpcKeyValueStoreSpecData::endpoint>(
jb::DefaultInitializedValue())),
jb::Member("num_channels",
jb::Projection<&GcsGrpcKeyValueStoreSpecData::num_channels>(
jb::DefaultInitializedValue())),
jb::Member("timeout",
jb::Projection<&GcsGrpcKeyValueStoreSpecData::timeout>(
jb::DefaultValue<jb::kNeverIncludeDefaults>(
[](auto* x) { *x = absl::ZeroDuration(); }))),
jb::Member(
"wait_for_connection",
jb::Projection<&GcsGrpcKeyValueStoreSpecData::wait_for_connection>(
jb::DefaultValue<jb::kNeverIncludeDefaults>(
[](auto* x) { *x = absl::ZeroDuration(); }))),
jb::Member(GcsUserProjectResource::id,
jb::Projection<&GcsGrpcKeyValueStoreSpecData::user_project>()),
jb::Member(internal_storage_gcs::GcsRequestRetries::id,
jb::Projection<&GcsGrpcKeyValueStoreSpecData::retries>()),
jb::Member(
DataCopyConcurrencyResource::id,
jb::Projection<
&GcsGrpcKeyValueStoreSpecData::data_copy_concurrency>()),
jb::DiscardExtraMembers);
};
class GcsGrpcKeyValueStoreSpec
: public internal_kvstore::RegisteredDriverSpec<
GcsGrpcKeyValueStoreSpec, GcsGrpcKeyValueStoreSpecData> {
public:
static constexpr char id[] = "gcs_grpc";
Future<kvstore::DriverPtr> DoOpen() const override;
absl::Status NormalizeSpec(std::string& path) override {
if (!path.empty() && !IsValidObjectName(path)) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Invalid GCS path: ", QuoteString(path)));
}
return absl::OkStatus();
}
Result<std::string> ToUrl(std::string_view path) const override {
if (!data_.endpoint.empty()) {
return absl::UnimplementedError(
"URL representation does not support test endpoints");
}
return tensorstore::StrCat(kUriScheme, ":
internal::PercentEncodeUriPath(path));
}
};
class GcsGrpcKeyValueStore
: public internal_kvstore::RegisteredDriver<GcsGrpcKeyValueStore,
GcsGrpcKeyValueStoreSpec> {
public:
internal_kvstore_batch::CoalescingOptions GetBatchReadCoalescingOptions()
const {
return internal_kvstore_batch::kDefaultRemoteStorageCoalescingOptions;
}
Future<ReadResult> Read(Key key, ReadOptions options) override;
Future<ReadResult> ReadImpl(Key&& key, ReadOptions&& options);
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override;
void ListImpl(ListOptions options, ListReceiver receiver) override;
Future<const void> DeleteRange(KeyRange range) override;
absl::Status GetBoundSpecData(SpecData& spec) const {
spec = spec_;
return absl::OkStatus();
}
SupportedFeatures GetSupportedFeatures(
const KeyRange& key_range) const final {
return SupportedFeatures::kSingleKeyAtomicReadModifyWrite |
SupportedFeatures::kAtomicWriteWithoutOverwrite;
}
const Executor& executor() const {
return spec_.data_copy_concurrency->executor;
}
std::string bucket_name() { return bucket_; }
std::shared_ptr<Storage::StubInterface> get_stub() {
return storage_stub_pool_->get_next_stub();
}
std::unique_ptr<grpc::ClientContext> AllocateContext() {
auto context = std::make_unique<grpc::ClientContext>();
if (spec_.user_project->project_id &&
!spec_.user_project->project_id->empty()) {
context->AddMetadata("x-goog-user-project",
*spec_.user_project->project_id);
}
context->AddMetadata("x-goog-request-params",
absl::StrFormat("bucket=%s", bucket_name()));
if (spec_.timeout > absl::ZeroDuration() &&
spec_.timeout < absl::InfiniteDuration()) {
context->set_deadline(absl::ToChronoTime(absl::Now() + spec_.timeout));
}
if (call_credentials_fn_) {
context->set_credentials(call_credentials_fn_());
}
return context;
}
template <typename Task>
absl::Status BackoffForAttemptAsync(
absl::Status status, int attempt, Task* task,
SourceLocation loc = ::tensorstore::SourceLocation::current()) {
assert(task != nullptr);
auto delay = spec_.retries->BackoffForAttempt(attempt);
if (!delay) {
return MaybeAnnotateStatus(std::move(status),
absl::StrFormat("All %d retry attempts failed",
spec_.retries->max_retries),
absl::StatusCode::kAborted, loc);
}
gcs_grpc_metrics.retries.Increment();
ScheduleAt(absl::Now() + *delay,
WithExecutor(executor(), [task = internal::IntrusivePtr<Task>(
task)] { task->Retry(); }));
return absl::OkStatus();
}
SpecData spec_;
std::string bucket_;
std::shared_ptr<StorageStubPool> storage_stub_pool_;
std::function<std::shared_ptr<grpc::CallCredentials>()> call_credentials_fn_;
};
absl::crc32c_t ComputeCrc32c(const absl::Cord& cord) {
absl::crc32c_t crc{0};
for (auto chunk : cord.Chunks()) {
crc = absl::ExtendCrc32c(crc, chunk);
}
return crc;
}
struct ReadTask : public internal::AtomicReferenceCount<ReadTask>,
public grpc::ClientReadReactor<ReadObjectResponse> {
internal::IntrusivePtr<GcsGrpcKeyValueStore> driver_;
kvstore::ReadOptions options_;
Promise<kvstore::ReadResult> promise_;
Storage::StubInterface* stub_ = nullptr;
ReadObjectRequest request_;
ReadObjectResponse response_;
std::optional<absl::crc32c_t> crc32c_;
TimestampedStorageGeneration storage_generation_;
absl::Cord value_;
int attempt_ = 0;
absl::Mutex mutex_;
std::unique_ptr<grpc::ClientContext> context_ ABSL_GUARDED_BY(mutex_);
void TryCancel() ABSL_LOCKS_EXCLUDED(mutex_) {
absl::MutexLock lock(&mutex_);
if (context_) context_->TryCancel();
}
void Start(const std::string& object_name) {
ABSL_LOG_IF(INFO, gcs_grpc_logging) << "ReadTask " << object_name;
stub_ = driver_->get_stub().get();
promise_.ExecuteWhenNotNeeded(
[self = internal::IntrusivePtr<ReadTask>(this)] { self->TryCancel(); });
request_.set_bucket(driver_->bucket_name());
request_.set_object(object_name);
if (!StorageGeneration::IsUnknown(
options_.generation_conditions.if_equal)) {
uint64_t gen =
StorageGeneration::IsNoValue(options_.generation_conditions.if_equal)
? 0
: StorageGeneration::ToUint64(
options_.generation_conditions.if_equal);
request_.set_if_generation_match(gen);
}
if (!StorageGeneration::IsUnknown(
options_.generation_conditions.if_not_equal)) {
uint64_t gen = StorageGeneration::IsNoValue(
options_.generation_conditions.if_not_equal)
? 0
: StorageGeneration::ToUint64(
options_.generation_conditions.if_not_equal);
request_.set_if_generation_not_match(gen);
}
if (options_.byte_range.inclusive_min != 0) {
request_.set_read_offset(options_.byte_range.inclusive_min);
}
if (options_.byte_range.exclusive_max != -1) {
auto target_size = options_.byte_range.size();
assert(target_size >= 0);
request_.set_read_limit(target_size == 0 ? 1 : target_size);
}
Retry();
}
void Retry() ABSL_LOCKS_EXCLUDED(mutex_) {
if (!promise_.result_needed()) {
return;
}
value_.Clear();
storage_generation_ =
TimestampedStorageGeneration{StorageGeneration::Unknown(), absl::Now()};
{
absl::MutexLock lock(&mutex_);
assert(context_ == nullptr);
context_ = driver_->AllocateContext();
intrusive_ptr_increment(this);
stub_->async()->ReadObject(context_.get(), &request_, this);
}
StartRead(&response_);
StartCall();
}
void OnReadDone(bool ok) override {
if (!ok) return;
if (!promise_.result_needed()) {
TryCancel();
return;
}
if (response_.has_metadata()) {
storage_generation_.generation =
StorageGeneration::FromUint64(response_.metadata().generation());
}
if (response_.has_object_checksums() &&
response_.object_checksums().crc32c() != 0 &&
options_.byte_range.inclusive_min == 0 &&
!options_.byte_range.exclusive_max) {
crc32c_ = absl::crc32c_t(response_.object_checksums().crc32c());
}
if (response_.has_content_range()) {
auto returned_size =
response_.content_range().end() - response_.content_range().start();
if (auto size = options_.byte_range.size();
(size > 0 && size != returned_size) ||
(options_.byte_range.inclusive_min >= 0 &&
response_.content_range().start() !=
options_.byte_range.inclusive_min)) {
promise_.SetResult(absl::OutOfRangeError(
tensorstore::StrCat("Requested byte range ", options_.byte_range,
" was not satisfied by GCS object with size ",
response_.content_range().complete_length())));
TryCancel();
return;
}
}
if (response_.has_checksummed_data() &&
response_.checksummed_data().has_crc32c() &&
response_.checksummed_data().crc32c() != 0) {
auto content_crc32c =
ComputeCrc32c(response_.checksummed_data().content());
if (content_crc32c !=
absl::crc32c_t(response_.checksummed_data().crc32c())) {
promise_.SetResult(absl::DataLossError(absl::StrFormat(
"Object fragment crc32c %08x does not match expected crc32c %08x",
static_cast<uint32_t>(content_crc32c),
response_.checksummed_data().crc32c())));
TryCancel();
return;
}
}
if (response_.has_checksummed_data()) {
gcs_grpc_metrics.bytes_read.IncrementBy(
response_.checksummed_data().content().size());
value_.Append(response_.checksummed_data().content());
}
StartRead(&response_);
}
void OnDone(const grpc::Status& s) override {
internal::IntrusivePtr<ReadTask> self(this, internal::adopt_object_ref);
driver_->executor()(
[self = std::move(self), status = GrpcStatusToAbslStatus(s)]() {
self->ReadFinished(std::move(status));
});
}
void ReadFinished(absl::Status status) {
if (!promise_.result_needed()) {
return;
}
{
absl::MutexLock lock(&mutex_);
context_ = nullptr;
}
if (!status.ok() && attempt_ == 0 &&
status.code() == absl::StatusCode::kUnauthenticated) {
attempt_++;
Retry();
return;
}
if (!status.ok() && IsRetriable(status)) {
status =
driver_->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
auto latency = absl::Now() - storage_generation_.time;
gcs_grpc_metrics.read_latency_ms.Observe(
absl::ToInt64Milliseconds(latency));
if (!status.ok()) {
if (absl::IsFailedPrecondition(status) || absl::IsAborted(status)) {
if (!StorageGeneration::IsUnknown(
options_.generation_conditions.if_equal)) {
storage_generation_.generation = StorageGeneration::Unknown();
} else {
storage_generation_.generation =
options_.generation_conditions.if_not_equal;
}
promise_.SetResult(
kvstore::ReadResult::Unspecified(std::move(storage_generation_)));
return;
}
if (absl::IsNotFound(status)) {
promise_.SetResult(
kvstore::ReadResult::Missing(storage_generation_.time));
return;
}
promise_.SetResult(std::move(status));
return;
}
if (StorageGeneration::IsUnknown(storage_generation_.generation)) {
promise_.SetResult(
absl::InternalError("Object missing a valid generation"));
return;
}
if (options_.byte_range.size() == 0) {
value_.Clear();
} else if (crc32c_.has_value() && ComputeCrc32c(value_) != *crc32c_) {
promise_.SetResult(
absl::DataLossError("Object crc32c does not match expected crc32c"));
return;
}
promise_.SetResult(kvstore::ReadResult::Value(
std::move(value_), std::move(storage_generation_)));
}
};
struct WriteTask : public internal::AtomicReferenceCount<WriteTask>,
public grpc::ClientWriteReactor<WriteObjectRequest> {
internal::IntrusivePtr<GcsGrpcKeyValueStore> driver_;
kvstore::WriteOptions options_;
Promise<TimestampedStorageGeneration> promise_;
std::string object_name_;
absl::Cord value_;
Storage::StubInterface* stub_ = nullptr;
WriteObjectRequest request_;
WriteObjectResponse response_;
TimestampedStorageGeneration write_result_;
size_t write_offset_;
absl::crc32c_t crc32c_;
int attempt_ = 0;
absl::Mutex mutex_;
std::unique_ptr<grpc::ClientContext> context_ ABSL_GUARDED_BY(mutex_);
void TryCancel() ABSL_LOCKS_EXCLUDED(mutex_) {
absl::MutexLock lock(&mutex_);
if (context_) context_->TryCancel();
}
void UpdateRequestForNextWrite() ABSL_LOCKS_EXCLUDED(mutex_) {
absl::MutexLock lock(&mutex_);
if (write_offset_ == 0) {
write_result_.time = absl::Now();
auto& resource =
*request_.mutable_write_object_spec()->mutable_resource();
resource.set_bucket(driver_->bucket_name());
resource.set_name(object_name_);
request_.mutable_write_object_spec()->set_object_size(value_.size());
if (!StorageGeneration::IsUnknown(
options_.generation_conditions.if_equal)) {
auto gen = StorageGeneration::ToUint64(
options_.generation_conditions.if_equal);
request_.mutable_write_object_spec()->set_if_generation_match(gen);
}
} else {
request_.clear_write_object_spec();
}
request_.set_write_offset(write_offset_);
size_t next_write_offset =
std::min(write_offset_ + kMaxWriteBytes, value_.size());
auto& checksummed_data = *request_.mutable_checksummed_data();
checksummed_data.set_content(
value_.Subcord(write_offset_, next_write_offset - write_offset_));
auto chunk_crc32c = ComputeCrc32c(checksummed_data.content());
checksummed_data.set_crc32c(static_cast<uint32_t>(chunk_crc32c));
crc32c_ = absl::ConcatCrc32c(crc32c_, chunk_crc32c,
checksummed_data.content().size());
write_offset_ = next_write_offset;
if (write_offset_ == value_.size()) {
request_.mutable_object_checksums()->set_crc32c(
static_cast<uint32_t>(crc32c_));
request_.set_finish_write(true);
}
}
void Start(std::string object_name, absl::Cord value) {
ABSL_LOG_IF(INFO, gcs_grpc_logging) << "WriteTask " << object_name;
object_name_ = std::move(object_name);
value_ = std::move(value);
stub_ = driver_->get_stub().get();
promise_.ExecuteWhenNotNeeded([self = internal::IntrusivePtr<WriteTask>(
this)] { self->TryCancel(); });
Retry();
}
void Retry() ABSL_LOCKS_EXCLUDED(mutex_) {
if (!promise_.result_needed()) {
return;
}
write_offset_ = 0;
crc32c_ = absl::crc32c_t{0};
request_.Clear();
{
absl::MutexLock lock(&mutex_);
assert(context_ == nullptr);
context_ = driver_->AllocateContext();
intrusive_ptr_increment(this);
stub_->async()->WriteObject(context_.get(), &response_, this);
}
UpdateRequestForNextWrite();
auto options = grpc::WriteOptions();
if (request_.finish_write()) {
options.set_last_message();
}
StartWrite(&request_, options);
StartCall();
}
void OnWriteDone(bool ok) override {
if (!ok) return;
if (request_.finish_write()) return;
UpdateRequestForNextWrite();
auto options = grpc::WriteOptions();
if (request_.finish_write()) {
options.set_last_message();
}
StartWrite(&request_, options);
}
void OnDone(const grpc::Status& s) override {
internal::IntrusivePtr<WriteTask> self(this, internal::adopt_object_ref);
driver_->executor()(
[self = std::move(self), status = GrpcStatusToAbslStatus(s)] {
self->WriteFinished(std::move(status));
});
}
void WriteFinished(absl::Status status) {
if (!promise_.result_needed()) {
return;
}
auto latency = absl::Now() - write_result_.time;
gcs_grpc_metrics.write_latency_ms.Observe(
absl::ToInt64Milliseconds(latency));
{
absl::MutexLock lock(&mutex_);
context_ = nullptr;
}
if (!status.ok() && attempt_ == 0 &&
status.code() == absl::StatusCode::kUnauthenticated) {
attempt_++;
Retry();
return;
}
if (!status.ok() && IsRetriable(status)) {
status =
driver_->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
if (response_.has_resource()) {
write_result_.generation =
StorageGeneration::FromUint64(response_.resource().generation());
}
if (absl::IsFailedPrecondition(status) || absl::IsAlreadyExists(status)) {
write_result_.generation = StorageGeneration::Unknown();
promise_.SetResult(std::move(write_result_));
} else if (absl::IsNotFound(status) &&
!StorageGeneration::IsUnknown(
options_.generation_conditions.if_equal)) {
write_result_.generation = StorageGeneration::Unknown();
promise_.SetResult(std::move(write_result_));
} else if (!status.ok()) {
promise_.SetResult(status);
} else {
promise_.SetResult(std::move(write_result_));
}
}
};
struct DeleteTask : public internal::AtomicReferenceCount<DeleteTask> {
internal::IntrusivePtr<GcsGrpcKeyValueStore> driver_;
kvstore::WriteOptions options_;
Promise<TimestampedStorageGeneration> promise_;
Storage::StubInterface* stub_ = nullptr;
absl::Time start_time_;
DeleteObjectRequest request_;
::google::protobuf::Empty response_;
int attempt_ = 0;
absl::Mutex mutex_;
std::unique_ptr<grpc::ClientContext> context_ ABSL_GUARDED_BY(mutex_);
void TryCancel() ABSL_LOCKS_EXCLUDED(mutex_) {
absl::MutexLock lock(&mutex_);
if (context_) context_->TryCancel();
}
void Start(const std::string& object_name) {
ABSL_LOG_IF(INFO, gcs_grpc_logging) << "DeleteTask " << object_name;
stub_ = driver_->get_stub().get();
promise_.ExecuteWhenNotNeeded([self = internal::IntrusivePtr<DeleteTask>(
this)] { self->TryCancel(); });
request_.set_bucket(driver_->bucket_name());
request_.set_object(object_name);
if (!StorageGeneration::IsUnknown(
options_.generation_conditions.if_equal)) {
auto gen =
StorageGeneration::ToUint64(options_.generation_conditions.if_equal);
request_.set_if_generation_match(gen);
}
Retry();
}
void Retry() ABSL_LOCKS_EXCLUDED(mutex_) {
if (!promise_.result_needed()) {
return;
}
start_time_ = absl::Now();
{
absl::MutexLock lock(&mutex_);
assert(context_ == nullptr);
context_ = driver_->AllocateContext();
intrusive_ptr_increment(this);
stub_->async()->DeleteObject(
context_.get(), &request_, &response_,
WithExecutor(driver_->executor(), [this](::grpc::Status s) {
internal::IntrusivePtr<DeleteTask> self(this,
internal::adopt_object_ref);
self->DeleteFinished(GrpcStatusToAbslStatus(s));
}));
}
}
void DeleteFinished(absl::Status status) {
if (!promise_.result_needed()) {
return;
}
{
absl::MutexLock lock(&mutex_);
context_ = nullptr;
}
if (!status.ok() && attempt_ == 0 &&
status.code() == absl::StatusCode::kUnauthenticated) {
attempt_++;
Retry();
return;
}
if (!status.ok() && IsRetriable(status)) {
status =
driver_->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
TimestampedStorageGeneration r;
r.time = start_time_;
r.generation = StorageGeneration::NoValue();
if (absl::IsFailedPrecondition(status)) {
r.generation = StorageGeneration::Unknown();
} else if (absl::IsNotFound(status)) {
if (!options_.generation_conditions.MatchesNoValue()) {
r.generation = StorageGeneration::Unknown();
}
} else if (!status.ok()) {
promise_.SetResult(std::move(status));
return;
}
promise_.SetResult(std::move(r));
}
};
struct ListTask : public internal::AtomicReferenceCount<ListTask> {
internal::IntrusivePtr<GcsGrpcKeyValueStore> driver_;
kvstore::ListOptions options_;
ListReceiver receiver_;
Storage::StubInterface* stub_ = nullptr;
ListObjectsRequest request;
ListObjectsResponse response;
int attempt_ = 0;
absl::Mutex mutex_;
std::unique_ptr<grpc::ClientContext> context_ ABSL_GUARDED_BY(mutex_);
bool cancelled_ ABSL_GUARDED_BY(mutex_) = false;
ListTask(internal::IntrusivePtr<GcsGrpcKeyValueStore>&& driver,
kvstore::ListOptions&& options, ListReceiver&& receiver)
: driver_(std::move(driver)),
options_(std::move(options)),
receiver_(std::move(receiver)) {
execution::set_starting(receiver_, [this] { TryCancel(); });
}
~ListTask() {
{
absl::MutexLock l(&mutex_);
context_ = nullptr;
}
driver_ = {};
execution::set_stopping(receiver_);
}
bool is_cancelled() ABSL_LOCKS_EXCLUDED(mutex_) {
absl::MutexLock l(&mutex_);
return cancelled_;
}
void TryCancel() ABSL_LOCKS_EXCLUDED(mutex_) {
absl::MutexLock l(&mutex_);
if (!cancelled_) {
cancelled_ = true;
if (context_) context_->TryCancel();
}
}
void Start() {
ABSL_LOG_IF(INFO, gcs_grpc_logging) << "ListTask " << options_.range;
stub_ = driver_->get_stub().get();
request.set_lexicographic_start(options_.range.inclusive_min);
request.set_lexicographic_end(options_.range.exclusive_max);
request.set_parent(driver_->bucket_name());
request.set_page_size(1000);
Retry();
}
void Retry() ABSL_LOCKS_EXCLUDED(mutex_) {
if (is_cancelled()) {
execution::set_done(receiver_);
return;
}
{
absl::MutexLock lock(&mutex_);
context_ = driver_->AllocateContext();
intrusive_ptr_increment(this);
stub_->async()->ListObjects(
context_.get(), &request, &response,
WithExecutor(driver_->executor(), [this](::grpc::Status s) {
internal::IntrusivePtr<ListTask> self(this,
internal::adopt_object_ref);
self->ListFinished(GrpcStatusToAbslStatus(s));
}));
}
}
void ListFinished(absl::Status status) {
if (is_cancelled()) {
execution::set_done(receiver_);
return;
}
if (!status.ok() && IsRetriable(status)) {
status =
driver_->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
if (!status.ok()) {
execution::set_error(receiver_, std::move(status));
return;
}
bool done = false;
for (const auto& o : response.objects()) {
if (is_cancelled()) {
done = true;
break;
}
std::string_view name = o.name();
if (!Contains(options_.range, name)) {
if (KeyRange::CompareKeyAndExclusiveMax(
name, options_.range.exclusive_max) >= 0) {
done = true;
break;
}
continue;
}
if (options_.strip_prefix_length) {
name = name.substr(options_.strip_prefix_length);
}
execution::set_value(receiver_, ListEntry{
std::string(name),
ListEntry::checked_size(o.size()),
});
}
if (!done && !response.next_page_token().empty()) {
request.set_page_token(response.next_page_token());
response.Clear();
attempt_ = 0;
Retry();
return;
}
execution::set_done(receiver_);
}
};
struct DeleteRangeListReceiver {
internal::IntrusivePtr<GcsGrpcKeyValueStore> driver_;
Promise<void> promise_;
FutureCallbackRegistration cancel_registration_;
void set_starting(AnyCancelReceiver cancel) {
cancel_registration_ = promise_.ExecuteWhenNotNeeded(std::move(cancel));
}
void set_value(ListEntry entry) {
assert(!entry.key.empty());
if (!entry.key.empty()) {
LinkError(promise_, driver_->Delete(std::move(entry.key)));
}
}
void set_error(absl::Status error) {
SetDeferredResult(promise_, std::move(error));
promise_ = Promise<void>();
}
void set_done() { promise_ = Promise<void>(); }
void set_stopping() {
cancel_registration_.Unregister();
driver_ = {};
}
};
Future<kvstore::ReadResult> GcsGrpcKeyValueStore::Read(Key key,
ReadOptions options) {
gcs_grpc_metrics.read.Increment();
if (!IsValidObjectName(key)) {
return absl::InvalidArgumentError("Invalid blob object name");
}
if (!IsValidStorageGeneration(options.generation_conditions.if_equal) ||
!IsValidStorageGeneration(options.generation_conditions.if_not_equal)) {
return absl::InvalidArgumentError("Malformed StorageGeneration");
}
return internal_kvstore_batch::HandleBatchRequestByGenericByteRangeCoalescing(
*this, std::move(key), std::move(options));
}
Future<kvstore::ReadResult> GcsGrpcKeyValueStore::ReadImpl(
Key&& key, ReadOptions&& options) {
gcs_grpc_metrics.batch_read.Increment();
auto op = PromiseFuturePair<ReadResult>::Make();
auto task = internal::MakeIntrusivePtr<ReadTask>();
task->driver_ = internal::IntrusivePtr<GcsGrpcKeyValueStore>(this);
task->options_ = std::move(options);
task->promise_ = std::move(op.promise);
task->Start(key);
return std::move(op.future);
}
Future<TimestampedStorageGeneration> GcsGrpcKeyValueStore::Write(
Key key, std::optional<Value> value, WriteOptions options) {
gcs_grpc_metrics.write.Increment();
if (!IsValidObjectName(key)) {
return absl::InvalidArgumentError("Invalid blob object name");
}
if (!IsValidStorageGeneration(options.generation_conditions.if_equal)) {
return absl::InvalidArgumentError("Malformed StorageGeneration");
}
auto op = PromiseFuturePair<TimestampedStorageGeneration>::Make();
if (!value) {
auto task = internal::MakeIntrusivePtr<DeleteTask>();
task->driver_ = internal::IntrusivePtr<GcsGrpcKeyValueStore>(this);
task->options_ = std::move(options);
task->promise_ = std::move(op.promise);
task->Start(key);
} else {
auto task = internal::MakeIntrusivePtr<WriteTask>();
task->driver_ = internal::IntrusivePtr<GcsGrpcKeyValueStore>(this);
task->options_ = std::move(options);
task->promise_ = std::move(op.promise);
task->Start(key, *std::move(value));
}
return std::move(op.future);
}
void GcsGrpcKeyValueStore::ListImpl(ListOptions options,
ListReceiver receiver) {
gcs_grpc_metrics.list.Increment();
if (options.range.empty()) {
execution::set_starting(receiver, [] {});
execution::set_done(receiver);
execution::set_stopping(receiver);
return;
}
auto task = internal::MakeIntrusivePtr<ListTask>(
internal::IntrusivePtr<GcsGrpcKeyValueStore>(this), std::move(options),
std::move(receiver));
task->Start();
}
Future<const void> GcsGrpcKeyValueStore::DeleteRange(KeyRange range) {
gcs_grpc_metrics.delete_range.Increment();
if (range.empty()) return absl::OkStatus();
auto op = PromiseFuturePair<void>::Make(tensorstore::MakeResult());
ListOptions list_options;
list_options.range = std::move(range);
ListImpl(list_options, DeleteRangeListReceiver{
internal::IntrusivePtr<GcsGrpcKeyValueStore>(this),
std::move(op.promise)});
return std::move(op.future);
}
Future<kvstore::DriverPtr> GcsGrpcKeyValueStoreSpec::DoOpen() const {
auto driver = internal::MakeIntrusivePtr<GcsGrpcKeyValueStore>();
driver->spec_ = data_;
driver->bucket_ = absl::StrFormat("projects/_/buckets/%s", data_.bucket);
std::string endpoint = data_.endpoint;
if (endpoint.empty()) {
endpoint = "dns:
}
auto channel_credentials =
GetCredentialsForEndpoint(endpoint, driver->call_credentials_fn_);
driver->storage_stub_pool_ = GetSharedStorageStubPool(
endpoint, data_.num_channels, std::move(channel_credentials));
if (driver->spec_.wait_for_connection > absl::ZeroDuration()) {
driver->storage_stub_pool_->WaitForConnected(
driver->spec_.wait_for_connection);
}
return driver;
}
Result<kvstore::Spec> ParseGcsGrpcUrl(std::string_view url) {
auto parsed = internal::ParseGenericUri(url);
assert(parsed.scheme == kUriScheme);
if (!parsed.query.empty()) {
return absl::InvalidArgumentError("Query string not supported");
}
if (!parsed.fragment.empty()) {
return absl::InvalidArgumentError("Fragment identifier not supported");
}
if (!IsValidBucketName(parsed.authority)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid GCS bucket name: ", QuoteString(parsed.authority)));
}
auto decoded_path = parsed.path.empty()
? std::string()
: internal::PercentDecode(parsed.path.substr(1));
auto driver_spec = internal::MakeIntrusivePtr<GcsGrpcKeyValueStoreSpec>();
driver_spec->data_.bucket = std::string(parsed.authority);
driver_spec->data_.user_project =
Context::Resource<GcsUserProjectResource>::DefaultSpec();
driver_spec->data_.retries =
Context::Resource<internal_storage_gcs::GcsRequestRetries>::DefaultSpec();
driver_spec->data_.data_copy_concurrency =
Context::Resource<DataCopyConcurrencyResource>::DefaultSpec();
return {std::in_place, std::move(driver_spec), std::move(decoded_path)};
}
}
}
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(
tensorstore::GcsGrpcKeyValueStore)
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::GcsGrpcKeyValueStoreSpec>
registration;
const tensorstore::internal_kvstore::UrlSchemeRegistration
url_scheme_registration{kUriScheme, tensorstore::ParseGcsGrpcUrl};
} | #include <stddef.h>
#include <cstring>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/notification.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "grpcpp/support/sync_stream.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/internal/grpc/grpc_mock.h"
#include "tensorstore/internal/grpc/utils.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/gcs_grpc/mock_storage_service.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/proto/parse_text_proto_or_die.h"
#include "tensorstore/proto/protobuf_matchers.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
#include "google/storage/v2/storage.pb.h"
namespace {
namespace kvstore = ::tensorstore::kvstore;
using ::protobuf_matchers::EqualsProto;
using ::tensorstore::CompletionNotifyingReceiver;
using ::tensorstore::Context;
using ::tensorstore::KeyRange;
using ::tensorstore::KvStore;
using ::tensorstore::MatchesStatus;
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::ParseTextProtoOrDie;
using ::tensorstore::StorageGeneration;
using ::tensorstore::grpc_mocker::MockGrpcServer;
using ::tensorstore::internal::AbslStatusToGrpcStatus;
using ::tensorstore::internal::FlatCordBuilder;
using ::tensorstore_grpc::MockStorage;
using ::testing::_;
using ::testing::AtLeast;
using ::testing::DoAll;
using ::testing::Return;
using ::testing::SetArgPointee;
using ::google::storage::v2::DeleteObjectRequest;
using ::google::storage::v2::ListObjectsRequest;
using ::google::storage::v2::ListObjectsResponse;
using ::google::storage::v2::ReadObjectRequest;
using ::google::storage::v2::ReadObjectResponse;
using ::google::storage::v2::WriteObjectRequest;
using ::google::storage::v2::WriteObjectResponse;
class GcsGrpcTest : public testing::Test {
public:
tensorstore::KvStore OpenStore() {
ABSL_LOG(INFO) << "Using " << mock_service_.server_address();
return kvstore::Open({{"driver", "gcs_grpc"},
{"endpoint", mock_service_.server_address()},
{"bucket", "bucket"},
{"timeout", "100ms"}})
.value();
}
MockStorage& mock() { return *mock_service_.service(); }
tensorstore::grpc_mocker::MockGrpcServer<MockStorage> mock_service_;
};
TEST_F(GcsGrpcTest, Read) {
ReadObjectRequest expected_request = ParseTextProtoOrDie(R"pb(
bucket: 'projects/_/buckets/bucket'
object: 'abc'
)pb");
ReadObjectResponse response = ParseTextProtoOrDie(R"pb(
metadata { generation: 2 }
checksummed_data { content: '1234' }
)pb");
EXPECT_CALL(mock(), ReadObject(_, EqualsProto(expected_request), _))
.Times(AtLeast(1))
.WillRepeatedly(testing::Invoke(
[&](auto*, auto*,
grpc::ServerWriter<ReadObjectResponse>* resp) -> ::grpc::Status {
resp->Write(response);
return grpc::Status::OK;
}));
auto start = absl::Now();
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto result, kvstore::Read(store, expected_request.object()).result());
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value, "1234");
EXPECT_GT(result.stamp.time, start);
EXPECT_EQ(result.stamp.generation, StorageGeneration::FromUint64(2));
}
TEST_F(GcsGrpcTest, ReadRetry) {
ReadObjectRequest expected_request = ParseTextProtoOrDie(R"pb(
bucket: 'projects/_/buckets/bucket'
object: 'abc'
)pb");
ReadObjectResponse response = ParseTextProtoOrDie(R"pb(
metadata { generation: 2 }
checksummed_data { content: '1234' }
)pb");
::testing::Sequence s1;
EXPECT_CALL(mock(), ReadObject(_, EqualsProto(expected_request), _))
.Times(2)
.InSequence(s1)
.WillRepeatedly(testing::Return(
AbslStatusToGrpcStatus(absl::ResourceExhaustedError(""))));
EXPECT_CALL(mock(), ReadObject(_, EqualsProto(expected_request), _))
.Times(AtLeast(1))
.InSequence(s1)
.WillRepeatedly(testing::Invoke(
[&](auto*, auto*,
grpc::ServerWriter<ReadObjectResponse>* resp) -> ::grpc::Status {
resp->Write(response);
return grpc::Status::OK;
}));
auto start = absl::Now();
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto result, kvstore::Read(store, expected_request.object()).result());
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value, "1234");
EXPECT_GT(result.stamp.time, start);
EXPECT_EQ(result.stamp.generation, StorageGeneration::FromUint64(2));
}
TEST_F(GcsGrpcTest, ReadWithOptions) {
ReadObjectRequest expected_request = ParseTextProtoOrDie(R"pb(
bucket: 'projects/_/buckets/bucket'
object: 'abc'
if_generation_not_match: 3
if_generation_match: 1
read_offset: 1
read_limit: 9
)pb");
ReadObjectResponse response = ParseTextProtoOrDie(R"pb(
metadata { generation: 2 }
checksummed_data { content: '1234' }
)pb");
EXPECT_CALL(mock(), ReadObject(_, EqualsProto(expected_request), _))
.Times(AtLeast(1))
.WillRepeatedly(testing::Invoke(
[&](auto*, auto*,
grpc::ServerWriter<ReadObjectResponse>* resp) -> ::grpc::Status {
resp->Write(response);
return grpc::Status::OK;
}));
kvstore::ReadOptions options;
options.generation_conditions.if_not_equal = StorageGeneration::FromUint64(3);
options.generation_conditions.if_equal = StorageGeneration::FromUint64(1);
options.staleness_bound = absl::InfiniteFuture();
options.byte_range = OptionalByteRangeRequest{1, 10};
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto result,
kvstore::Read(store, expected_request.object(), options).result());
}
TEST_F(GcsGrpcTest, Write) {
std::vector<WriteObjectRequest> requests;
WriteObjectResponse response = ParseTextProtoOrDie(R"pb(
resource { name: 'abc' bucket: "projects/_/buckets/bucket" generation: 1 }
)pb");
EXPECT_CALL(mock(), WriteObject(_, _, _))
.Times(AtLeast(1))
.WillRepeatedly(testing::Invoke(
[&](auto*, grpc::ServerReader<WriteObjectRequest>* reader,
auto* resp) -> ::grpc::Status {
WriteObjectRequest req;
while (reader->Read(&req)) {
requests.push_back(std::move(req));
}
resp->CopyFrom(response);
return grpc::Status::OK;
}));
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto generation,
kvstore::Write(store, response.resource().name(), absl::Cord("abcd"))
.result());
EXPECT_THAT(
requests,
testing::AllOf(
testing::SizeIs(testing::Ge(1)),
testing::Each(EqualsProto<WriteObjectRequest>(R"pb(
write_object_spec {
resource { name: "abc" bucket: "projects/_/buckets/bucket" }
object_size: 4
}
checksummed_data { content: "abcd" crc32c: 2462583345 }
object_checksums { crc32c: 2462583345 }
finish_write: true
write_offset: 0
)pb"))));
}
TEST_F(GcsGrpcTest, WriteRetry) {
std::vector<WriteObjectRequest> requests;
WriteObjectResponse response = ParseTextProtoOrDie(R"pb(
resource { name: 'abc' bucket: 'bucket' generation: 1 }
)pb");
::testing::Sequence s1;
EXPECT_CALL(mock(), WriteObject)
.InSequence(s1)
.WillOnce(testing::Return(
AbslStatusToGrpcStatus(absl::ResourceExhaustedError(""))));
EXPECT_CALL(mock(), WriteObject)
.InSequence(s1)
.WillOnce(testing::Invoke(
[&](auto*, grpc::ServerReader<WriteObjectRequest>* reader,
auto* resp) -> ::grpc::Status {
WriteObjectRequest req;
if (reader->Read(&req)) {
requests.push_back(req);
}
return AbslStatusToGrpcStatus(absl::ResourceExhaustedError(""));
}));
EXPECT_CALL(mock(), WriteObject)
.Times(AtLeast(1))
.InSequence(s1)
.WillRepeatedly(testing::Invoke(
[&](auto*, grpc::ServerReader<WriteObjectRequest>* reader,
auto* resp) -> ::grpc::Status {
WriteObjectRequest req;
while (reader->Read(&req)) {
requests.push_back(std::move(req));
}
resp->CopyFrom(response);
return grpc::Status::OK;
}));
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto generation,
kvstore::Write(store, response.resource().name(), absl::Cord("abcd"))
.result());
EXPECT_THAT(
requests,
testing::AllOf(
testing::SizeIs(testing::Ge(2)),
testing::Each(EqualsProto<WriteObjectRequest>(R"pb(
write_object_spec {
resource { name: "abc" bucket: "projects/_/buckets/bucket" }
object_size: 4
}
checksummed_data { content: "abcd" crc32c: 2462583345 }
object_checksums { crc32c: 2462583345 }
finish_write: true
write_offset: 0
)pb"))));
}
TEST_F(GcsGrpcTest, WriteEmpty) {
std::vector<WriteObjectRequest> requests;
WriteObjectResponse response = ParseTextProtoOrDie(R"pb(
resource { name: 'abc' bucket: 'projects/_/buckets/bucket' generation: 1 }
)pb");
EXPECT_CALL(mock(), WriteObject)
.Times(AtLeast(1))
.WillRepeatedly(testing::Invoke(
[&](auto*, grpc::ServerReader<WriteObjectRequest>* reader,
auto* resp) -> ::grpc::Status {
WriteObjectRequest req;
while (reader->Read(&req)) {
requests.push_back(std::move(req));
}
resp->CopyFrom(response);
return grpc::Status::OK;
}));
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto generation,
kvstore::Write(store, response.resource().name(), absl::Cord()).result());
EXPECT_THAT(
requests,
testing::AllOf(
testing::SizeIs(testing::Ge(1)),
testing::Each(EqualsProto<WriteObjectRequest>(R"pb(
write_object_spec {
resource { name: "abc" bucket: "projects/_/buckets/bucket" }
object_size: 0
}
checksummed_data { crc32c: 0 }
object_checksums { crc32c: 0 }
finish_write: true
write_offset: 0
)pb"))));
}
TEST_F(GcsGrpcTest, WriteWithOptions) {
std::vector<WriteObjectRequest> requests;
WriteObjectResponse response = ParseTextProtoOrDie(R"pb(
resource { name: 'abc' bucket: "projects/_/buckets/bucket" generation: 1 }
)pb");
EXPECT_CALL(mock(), WriteObject)
.Times(AtLeast(1))
.WillRepeatedly(testing::Invoke(
[&](auto*, grpc::ServerReader<WriteObjectRequest>* reader,
auto* resp) -> ::grpc::Status {
WriteObjectRequest req;
while (reader->Read(&req)) {
requests.push_back(std::move(req));
}
resp->CopyFrom(response);
return grpc::Status::OK;
}));
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto generation,
kvstore::Write(store, response.resource().name(), absl::Cord("abcd"),
{StorageGeneration::FromUint64(3)})
.result());
EXPECT_THAT(
requests,
testing::AllOf(
testing::SizeIs(testing::Ge(1)),
testing::Each(EqualsProto<WriteObjectRequest>(R"pb(
write_object_spec {
resource { name: "abc" bucket: "projects/_/buckets/bucket" }
if_generation_match: 3
object_size: 4
}
checksummed_data { content: "abcd" crc32c: 2462583345 }
object_checksums { crc32c: 2462583345 }
finish_write: true
write_offset: 0
)pb"))));
}
TEST_F(GcsGrpcTest, WriteMultipleRequests) {
WriteObjectResponse response = ParseTextProtoOrDie(R"pb(
resource { name: 'bigly' bucket: "projects/_/buckets/bucket" generation: 1 }
)pb");
std::vector<WriteObjectRequest> requests;
EXPECT_CALL(mock(), WriteObject)
.Times(AtLeast(1))
.WillRepeatedly(testing::Invoke(
[&](auto*, grpc::ServerReader<WriteObjectRequest>* reader,
auto* resp) -> ::grpc::Status {
WriteObjectRequest req;
while (reader->Read(&req)) {
size_t len = req.checksummed_data().content().size();
req.mutable_checksummed_data()->set_content(
absl::StrFormat("size: %d", len));
requests.push_back(std::move(req));
}
resp->CopyFrom(response);
return grpc::Status::OK;
}));
FlatCordBuilder cord_builder(16 + 2 * 1048576);
memset(cord_builder.data(), 0x37, cord_builder.size());
absl::Cord data = std::move(cord_builder).Build();
data.Append("abcd");
EXPECT_EQ(data.size(), 2097172);
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto generation,
kvstore::Write(store, response.resource().name(), data).result());
ASSERT_THAT(requests, testing::SizeIs(testing::Ge(2)));
EXPECT_THAT(
tensorstore::span(&requests[requests.size() - 2], 2),
testing::ElementsAre(
EqualsProto<WriteObjectRequest>(R"pb(
write_object_spec {
resource { name: "bigly" bucket: "projects/_/buckets/bucket" }
object_size: 2097172
}
checksummed_data { content: "size: 2097152", crc32c: 2470751355 }
write_offset: 0
)pb"),
EqualsProto<WriteObjectRequest>(R"pb(
checksummed_data { content: "size: 20", crc32c: 2394860217 }
object_checksums { crc32c: 1181131586 }
finish_write: true
write_offset: 2097152
)pb")));
}
TEST_F(GcsGrpcTest, WriteNullopt) {
DeleteObjectRequest expected_request = ParseTextProtoOrDie(R"pb(
bucket: 'projects/_/buckets/bucket'
object: 'abc'
if_generation_match: 0
)pb");
EXPECT_CALL(mock(), DeleteObject(_, EqualsProto(expected_request), _))
.Times(AtLeast(1))
.WillRepeatedly(Return(grpc::Status::OK));
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto generation,
kvstore::Write(store, expected_request.object(), std::nullopt,
{StorageGeneration::NoValue()})
.result());
}
TEST_F(GcsGrpcTest, Delete) {
DeleteObjectRequest expected_request = ParseTextProtoOrDie(R"pb(
bucket: 'projects/_/buckets/bucket'
object: 'abc'
)pb");
EXPECT_CALL(mock(), DeleteObject(_, EqualsProto(expected_request), _))
.Times(AtLeast(1))
.WillRepeatedly(Return(grpc::Status::OK));
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto generation,
kvstore::Delete(store, expected_request.object()).result());
}
TEST_F(GcsGrpcTest, DeleteWithOptions) {
DeleteObjectRequest expected_request = ParseTextProtoOrDie(R"pb(
bucket: 'projects/_/buckets/bucket'
object: 'abc'
if_generation_match: 2
)pb");
EXPECT_CALL(mock(), DeleteObject(_, EqualsProto(expected_request), _))
.Times(AtLeast(1))
.WillRepeatedly(Return(grpc::Status::OK));
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto generation, kvstore::Delete(store, expected_request.object(),
{StorageGeneration::FromUint64(2)})
.result());
}
TEST_F(GcsGrpcTest, DeleteRange) {
ListObjectsRequest request1 = ParseTextProtoOrDie(R"pb(
parent: 'projects/_/buckets/bucket'
page_size: 1000
lexicographic_start: 'a/c'
lexicographic_end: 'a/d'
)pb");
ListObjectsResponse response1 = ParseTextProtoOrDie(R"pb(
objects { name: 'a/c' }
objects { name: 'a/ce' }
)pb");
DeleteObjectRequest request2 = ParseTextProtoOrDie(R"pb(
bucket: 'projects/_/buckets/bucket'
object: 'a/c'
)pb");
DeleteObjectRequest request3 = ParseTextProtoOrDie(R"pb(
bucket: 'projects/_/buckets/bucket'
object: 'a/ce'
)pb");
EXPECT_CALL(mock(), ListObjects(_, EqualsProto(request1), _))
.Times(AtLeast(1))
.WillRepeatedly(
DoAll(SetArgPointee<2>(response1), Return(grpc::Status::OK)));
EXPECT_CALL(mock(), DeleteObject(_, EqualsProto(request2), _))
.Times(AtLeast(1))
.WillRepeatedly(Return(grpc::Status::OK));
EXPECT_CALL(mock(), DeleteObject(_, EqualsProto(request3), _))
.Times(AtLeast(1))
.WillRepeatedly(Return(grpc::Status::OK));
auto store = OpenStore();
TENSORSTORE_EXPECT_OK(
kvstore::DeleteRange(store, KeyRange::Prefix("a/c")).result());
}
TEST_F(GcsGrpcTest, List) {
ListObjectsRequest request1 = ParseTextProtoOrDie(R"pb(
parent: 'projects/_/buckets/bucket'
page_size: 1000
)pb");
ListObjectsRequest request2 = ParseTextProtoOrDie(R"pb(
parent: 'projects/_/buckets/bucket'
page_size: 1000
page_token: 'next-page-token'
)pb");
ListObjectsResponse response1 = ParseTextProtoOrDie(R"pb(
objects { name: 'a' }
objects { name: 'b' }
next_page_token: 'next-page-token'
)pb");
ListObjectsResponse response2 = ParseTextProtoOrDie(R"pb(
objects { name: 'c' }
)pb");
EXPECT_CALL(mock(), ListObjects(_, EqualsProto(request1), _))
.Times(AtLeast(1))
.WillRepeatedly(
DoAll(SetArgPointee<2>(response1), Return(grpc::Status::OK)));
EXPECT_CALL(mock(), ListObjects(_, EqualsProto(request2), _))
.Times(AtLeast(1))
.WillRepeatedly(
DoAll(SetArgPointee<2>(response2), Return(grpc::Status::OK)));
auto store = OpenStore();
absl::Notification notification;
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::kvstore::List(store, {}),
tensorstore::CompletionNotifyingReceiver{
¬ification, tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: a", "set_value: b",
"set_value: c", "set_done", "set_stopping"));
}
TEST(GcsGrpcSpecTest, InvalidSpec) {
auto context = Context::Default();
EXPECT_THAT(kvstore::Open({{"driver", "gcs_grpc"}}, context).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
kvstore::Open({{"driver", "gcs_grpc"}, {"bucket", "bucket:xyz"}}, context)
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
kvstore::Open(
{{"driver", "gcs_grpc"}, {"bucket", "my-bucket"}, {"path", "a\tb"}},
context)
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*Invalid GCS path.*"));
}
TEST(GcsGrpcUrlTest, UrlRoundtrip) {
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", "gcs_grpc"}, {"bucket", "my-bucket"}, {"path", "abc"}},
"gcs_grpc:
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", "gcs_grpc"}, {"bucket", "my-bucket"}, {"path", "abc def"}},
"gcs_grpc:
}
TEST(GcsGrpcUrlTest, InvalidUri) {
EXPECT_THAT(kvstore::Spec::FromUrl("gcs_grpc:
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Spec::FromUrl("gcs_grpc:
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Spec::FromUrl("gcs_grpc:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Invalid GCS bucket name: \"bucket:xyz\""));
EXPECT_THAT(kvstore::Spec::FromUrl("gcs_grpc:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Query string not supported"));
EXPECT_THAT(kvstore::Spec::FromUrl("gcs_grpc:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Fragment identifier not supported"));
EXPECT_THAT(kvstore::Spec::FromUrl("gcs_grpc:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*Invalid GCS path.*"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/gcs_grpc/gcs_grpc.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/gcs_grpc/gcs_grpc_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
43b2a04f-8127-459d-866c-5f33d8aa9a42 | cpp | google/arolla | proto_input_loader | arolla/io/proto/proto_input_loader.cc | arolla/io/proto/proto_input_loader_test.cc | #include "arolla/io/proto/proto_input_loader.h"
#include <algorithm>
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "absl/types/span.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
#include "arolla/io/input_loader.h"
#include "arolla/io/proto/reflection/reader.h"
#include "arolla/io/proto_types/types.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qtype/qtype.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
using proto::ProtoTypeReader;
using proto::StringFieldType;
absl::StatusOr<std::unique_ptr<ProtoTypeReader>> CreateReaderWithStringType(
absl::Span<const google::protobuf::FieldDescriptor* const> fields,
std::vector<proto::ProtoFieldAccessInfo> access_infos,
StringFieldType string_type) {
auto repeated_it = std::find_if(
access_infos.begin(), access_infos.end(),
[](proto::ProtoFieldAccessInfo v) {
return std::holds_alternative<proto::RepeatedFieldAccess>(v);
});
if (repeated_it == access_infos.end()) {
if (!access_infos.empty() &&
std::holds_alternative<proto::RepeatedFieldSizeAccess>(
access_infos.back())) {
return proto::ProtoTypeReader::CreateDenseArrayShapeReader(
fields, std::move(access_infos), string_type);
} else {
return proto::ProtoTypeReader::CreateOptionalReader(
fields, std::move(access_infos), string_type);
}
} else {
return proto::ProtoTypeReader::CreateDenseArrayReader(
fields, std::move(access_infos), string_type);
}
}
absl::StatusOr<std::pair<std::string, proto::ProtoFieldAccessInfo>>
ParseProtopathElement(absl::string_view path_element) {
bool is_size_element = absl::ConsumeSuffix(&path_element, "@size");
if (!absl::StrContains(path_element, '[') &&
!absl::StrContains(path_element, ']')) {
if (is_size_element) {
return std::pair{std::string(path_element),
proto::RepeatedFieldSizeAccess{}};
} else {
return std::pair{std::string(path_element),
proto::ProtoFieldAccessInfo{}};
}
}
if (is_size_element) {
return absl::FailedPreconditionError(absl::StrFormat(
"@size accessor does not accept field access by index, got %s",
path_element));
}
std::vector<absl::string_view> splits =
absl::StrSplit(path_element, absl::ByAnyChar("[]"), absl::SkipEmpty());
auto error = [&]() {
return absl::FailedPreconditionError(absl::StrCat(
"cannot parse access by index protopath element: ", path_element));
};
if (splits.size() != 2) {
return error();
}
std::string field_name(splits[0]);
size_t idx = static_cast<size_t>(-1);
if (!absl::SimpleAtoi(splits[1], &idx)) {
return error();
}
if (absl::StrFormat("%s[%d]", field_name, idx) != path_element) {
return error();
}
return std::pair{field_name, proto::RepeatedFieldIndexAccess{idx}};
}
absl::StatusOr<std::unique_ptr<proto::ProtoTypeReader>> ParseProtopathToReader(
const google::protobuf::Descriptor* const descr, absl::string_view protopath,
proto::StringFieldType string_type) {
if (!absl::ConsumePrefix(&protopath, "/")) {
return absl::FailedPreconditionError(absl::StrFormat(
"protopath must start with '/', got: \"%s\"", protopath));
}
std::vector<std::string> elements = absl::StrSplit(protopath, '/');
if (elements.empty()) {
return absl::FailedPreconditionError(
absl::StrFormat("empty protopath: %s", protopath));
}
if (elements.back() == "@size" && elements.size() > 1) {
elements.pop_back();
elements.back().append("@size");
}
std::vector<const google::protobuf::FieldDescriptor*> fields;
std::vector<proto::ProtoFieldAccessInfo> access_infos;
const google::protobuf::FieldDescriptor* previous_field = nullptr;
for (absl::string_view path_element : elements) {
ASSIGN_OR_RETURN((auto [field_name, access_info]),
ParseProtopathElement(path_element));
const google::protobuf::Descriptor* current_descr;
if (previous_field != nullptr) {
current_descr = previous_field->message_type();
if (current_descr == nullptr) {
return absl::FailedPreconditionError(absl::StrFormat(
"unexpected type of the field `%s` in the protopath "
"`%s`: expected a message",
previous_field->name(), protopath));
}
} else {
current_descr = descr;
}
const google::protobuf::FieldDescriptor* field_descriptor =
current_descr->FindFieldByName(field_name);
if (field_descriptor == nullptr) {
return absl::FailedPreconditionError(absl::StrFormat(
"unknown field `%s` in the message `%s` in the protopath `%s`.",
field_name, current_descr->full_name(), protopath));
}
if (field_descriptor->enum_type() != nullptr ||
field_descriptor->is_extension()) {
return absl::FailedPreconditionError(absl::StrFormat(
"unsupported type `%s` of the field `%s` in the protopath `%s`.",
field_descriptor->type_name(), field_descriptor->name(), protopath));
}
if (field_descriptor->is_repeated() &&
std::holds_alternative<proto::RegularFieldAccess>(access_info)) {
access_info = proto::RepeatedFieldAccess{};
}
fields.push_back(field_descriptor);
access_infos.push_back(access_info);
previous_field = field_descriptor;
}
bool is_size_protopath =
std::holds_alternative<proto::RepeatedFieldSizeAccess>(
access_infos.back());
if (previous_field->message_type() != nullptr && !is_size_protopath) {
return absl::FailedPreconditionError(absl::StrCat(
"unexpected type of the last field in protopath `%s`", protopath));
}
return CreateReaderWithStringType(fields, std::move(access_infos),
string_type);
}
}
ProtoFieldsLoader::ProtoFieldsLoader(ProtoFieldsLoader::PrivateConstructorTag,
const google::protobuf::Descriptor* descr,
proto::StringFieldType string_type)
: descr_(descr), string_type_(string_type) {}
absl::StatusOr<std::unique_ptr<InputLoader<google::protobuf::Message>>>
ProtoFieldsLoader::Create(const google::protobuf::Descriptor* descr,
proto::StringFieldType string_type) {
return std::make_unique<ProtoFieldsLoader>(PrivateConstructorTag{}, descr,
string_type);
}
absl::Nullable<const QType*> ProtoFieldsLoader::GetQTypeOf(
absl::string_view name) const {
ASSIGN_OR_RETURN(const auto& reader,
ParseProtopathToReader(descr_, name, string_type_), nullptr);
return reader->qtype();
}
std::vector<std::string> ProtoFieldsLoader::SuggestAvailableNames() const {
return {};
}
absl::StatusOr<BoundInputLoader<google::protobuf::Message>> ProtoFieldsLoader::BindImpl(
const absl::flat_hash_map<std::string, TypedSlot>& output_slots) const {
std::vector<ProtoTypeReader::BoundReadFn> readers;
for (const auto& [name, slot] : output_slots) {
ASSIGN_OR_RETURN(const auto& reader,
ParseProtopathToReader(descr_, name, string_type_));
if (reader->qtype() != slot.GetType()) {
return absl::FailedPreconditionError(
absl::StrFormat("invalid type for slot %s: expected %s, got %s", name,
slot.GetType()->name(), reader->qtype()->name()));
}
ASSIGN_OR_RETURN(auto read_fn, reader->BindReadFn(slot));
readers.push_back(read_fn);
}
return BoundInputLoader<google::protobuf::Message>(
[descr_(this->descr_), readers_(std::move(readers))](
const google::protobuf::Message& m, FramePtr frame,
RawBufferFactory*) -> absl::Status {
if (descr_ != m.GetDescriptor()) {
return absl::FailedPreconditionError(
"message must have the same descriptor as provided during "
"construction of ProtoFieldsLoader");
}
for (const auto& r : readers_) {
r(m, frame);
}
return absl::OkStatus();
});
}
} | #include "arolla/io/proto/proto_input_loader.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "google/protobuf/message.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/io/input_loader.h"
#include "arolla/io/proto_types/types.h"
#include "arolla/io/testing/matchers.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/naming/table.h"
#include "arolla/proto/testing/test.pb.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/bytes.h"
#include "arolla/util/text.h"
namespace arolla {
namespace {
using ::arolla::testing::InputLoaderSupports;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::IsEmpty;
using ::testing::IsNull;
template <typename T>
class ProtoLoaderTest : public ::testing::Test {
public:
using StringType = T;
};
using StringTypes = ::testing::Types<Text, Bytes>;
TYPED_TEST_SUITE(ProtoLoaderTest, StringTypes);
TYPED_TEST(ProtoLoaderTest, LoadScalars) {
using StringType = TypeParam;
proto::StringFieldType string_type = std::is_same_v<StringType, Text>
? proto::StringFieldType::kText
: proto::StringFieldType::kBytes;
ASSERT_OK_AND_ASSIGN(
auto input_loader_ptr,
ProtoFieldsLoader::Create(::testing_namespace::Root::descriptor(),
string_type));
const InputLoader<google::protobuf::Message>& input_loader = *input_loader_ptr;
using OInt = ::arolla::OptionalValue<int>;
using OBytes = ::arolla::OptionalValue<Bytes>;
using OText = ::arolla::OptionalValue<StringType>;
auto oi32 = GetQType<OInt>();
auto obytes = GetQType<OBytes>();
auto otxt = GetQType<OText>();
std::string x_def_name(naming::TablePath().Column("x").FullName());
std::string inner_a_def_name(
naming::TablePath("inner").Column("a").FullName());
std::string inner_inner2_z_def_name(
naming::TablePath("inner").Child("inner2").Column("z").FullName());
std::string str_def_name(naming::TablePath().Column("str").FullName());
std::string raw_bytes_def_name(
naming::TablePath().Column("raw_bytes").FullName());
EXPECT_THAT(input_loader,
InputLoaderSupports({{x_def_name, oi32},
{inner_a_def_name, oi32},
{inner_inner2_z_def_name, oi32},
{str_def_name, otxt},
{raw_bytes_def_name, obytes}}));
FrameLayout::Builder layout_builder;
auto x_def_slot = layout_builder.AddSlot<OInt>();
auto inner_a_def_slot = layout_builder.AddSlot<OInt>();
auto inner_inner2_z_def_slot = layout_builder.AddSlot<OInt>();
auto str_def_slot = layout_builder.AddSlot<OText>();
auto raw_bytes_def_slot = layout_builder.AddSlot<OBytes>();
ASSERT_OK_AND_ASSIGN(
auto bound_input_loader,
input_loader.Bind({
{x_def_name, TypedSlot::FromSlot(x_def_slot)},
{inner_a_def_name, TypedSlot::FromSlot(inner_a_def_slot)},
{inner_inner2_z_def_name,
TypedSlot::FromSlot(inner_inner2_z_def_slot)},
{str_def_name, TypedSlot::FromSlot(str_def_slot)},
{raw_bytes_def_name, TypedSlot::FromSlot(raw_bytes_def_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
FramePtr frame = alloc.frame();
::testing_namespace::Root r;
r.set_x(19);
r.set_str("3");
r.set_raw_bytes("37");
r.mutable_inner()->set_a(57);
r.mutable_inner()->mutable_inner2()->set_z(2);
ASSERT_OK(bound_input_loader(r, frame));
EXPECT_EQ(frame.Get(x_def_slot), 19);
EXPECT_EQ(frame.Get(inner_a_def_slot), 57);
EXPECT_EQ(frame.Get(inner_inner2_z_def_slot), 2);
EXPECT_EQ(frame.Get(str_def_slot), StringType("3"));
EXPECT_EQ(frame.Get(raw_bytes_def_slot), arolla::Bytes("37"));
r.clear_x();
r.clear_str();
r.clear_inner();
ASSERT_OK(bound_input_loader(r, frame));
EXPECT_EQ(frame.Get(x_def_slot), std::nullopt);
EXPECT_EQ(frame.Get(inner_a_def_slot), std::nullopt);
EXPECT_EQ(frame.Get(inner_inner2_z_def_slot), std::nullopt);
EXPECT_EQ(frame.Get(str_def_slot), std::nullopt);
}
TEST(ProtoFieldsLoaderTest, ProtopathIndexAccess) {
ASSERT_OK_AND_ASSIGN(
auto input_loader,
ProtoFieldsLoader::Create(::testing_namespace::Root::descriptor()));
using oint = ::arolla::OptionalValue<int>;
auto oi32 = GetQType<oint>();
std::string ys_def_name(
naming::TablePath().Column(naming::ArrayAccess("ys", 0)).FullName());
std::string inners_a_def_name(naming::TablePath()
.Child(naming::ArrayAccess("inners", 0))
.Column("a")
.FullName());
std::string inners_as_def_name(naming::TablePath()
.Child(naming::ArrayAccess("inners", 1))
.Column(naming::ArrayAccess("as", 0))
.FullName());
EXPECT_THAT(input_loader, InputLoaderSupports({{ys_def_name, oi32},
{inners_a_def_name, oi32},
{inners_as_def_name, oi32}}));
FrameLayout::Builder layout_builder;
auto ys_def_slot = layout_builder.AddSlot<oint>();
auto inners_a_def_slot = layout_builder.AddSlot<oint>();
auto inners_as_def_slot = layout_builder.AddSlot<oint>();
ASSERT_OK_AND_ASSIGN(
auto bound_input_loader,
input_loader->Bind({
{ys_def_name, TypedSlot::FromSlot(ys_def_slot)},
{inners_a_def_name, TypedSlot::FromSlot(inners_a_def_slot)},
{inners_as_def_name, TypedSlot::FromSlot(inners_as_def_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
FramePtr frame = alloc.frame();
::testing_namespace::Root r;
r.add_ys(19);
r.add_inners()->set_a(17);
r.add_inners()->add_as(57);
ASSERT_OK(bound_input_loader(r, frame));
EXPECT_EQ(frame.Get(ys_def_slot), 19);
EXPECT_EQ(frame.Get(inners_a_def_slot), 17);
EXPECT_EQ(frame.Get(inners_as_def_slot), 57);
r.clear_ys();
r.clear_inners();
ASSERT_OK(bound_input_loader(r, frame));
EXPECT_EQ(frame.Get(ys_def_slot), std::nullopt);
EXPECT_EQ(frame.Get(inners_a_def_slot), std::nullopt);
EXPECT_EQ(frame.Get(inners_as_def_slot), std::nullopt);
}
TEST(ProtoFieldsLoaderTest, ProtopathRepeatedAccess) {
ASSERT_OK_AND_ASSIGN(
auto input_loader,
ProtoFieldsLoader::Create(::testing_namespace::Root::descriptor()));
using OInt = ::arolla::OptionalValue<int>;
using DAInt = arolla::DenseArray<int>;
auto dai32 = GetDenseArrayQType<int>();
std::string ys_def_name(naming::TablePath().Column("ys").FullName());
std::string inners_a_def_name(
naming::TablePath().Child("inners").Column("a").FullName());
std::string inners_as_def_name(
naming::TablePath().Child("inners").Column("as").FullName());
EXPECT_THAT(input_loader, InputLoaderSupports({{ys_def_name, dai32},
{inners_a_def_name, dai32},
{inners_as_def_name, dai32}}));
FrameLayout::Builder layout_builder;
auto ys_def_slot = layout_builder.AddSlot<DAInt>();
auto inners_a_def_slot = layout_builder.AddSlot<DAInt>();
auto inners_as_def_slot = layout_builder.AddSlot<DAInt>();
ASSERT_OK_AND_ASSIGN(
auto bound_input_loader,
input_loader->Bind({
{ys_def_name, TypedSlot::FromSlot(ys_def_slot)},
{inners_a_def_name, TypedSlot::FromSlot(inners_a_def_slot)},
{inners_as_def_name, TypedSlot::FromSlot(inners_as_def_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
FramePtr frame = alloc.frame();
::testing_namespace::Root r;
r.add_ys(19);
r.add_ys(3);
auto inners_0 = r.add_inners();
inners_0->set_a(17);
inners_0->add_as(57);
inners_0->add_as(37);
r.add_inners();
auto inners_2 = r.add_inners();
inners_2->set_a(3);
inners_2->add_as(17);
ASSERT_OK(bound_input_loader(r, frame));
EXPECT_THAT(frame.Get(ys_def_slot), ElementsAre(OInt{19}, OInt{3}));
EXPECT_THAT(frame.Get(inners_a_def_slot),
ElementsAre(OInt{17}, std::nullopt, OInt{3}));
EXPECT_THAT(frame.Get(inners_as_def_slot),
ElementsAre(OInt{57}, OInt{37}, OInt{17}));
r.clear_ys();
r.clear_inners();
ASSERT_OK(bound_input_loader(r, frame));
EXPECT_THAT(frame.Get(ys_def_slot), IsEmpty());
EXPECT_THAT(frame.Get(inners_a_def_slot), IsEmpty());
EXPECT_THAT(frame.Get(inners_as_def_slot), IsEmpty());
}
TEST(SizeAccessLoaderTest, ProtopathRepeatedSizeAccess) {
ASSERT_OK_AND_ASSIGN(
auto input_loader,
ProtoFieldsLoader::Create(::testing_namespace::Root::descriptor()));
using Size = proto::arolla_size_t;
using VSize = ::arolla::DenseArray<Size>;
auto root_size = GetQType<DenseArrayShape>();
auto v_size = GetDenseArrayQType<Size>();
std::string ys_size_name(naming::TablePath().Size("ys").FullName());
std::string inners_size_name(naming::TablePath().Size("inners").FullName());
std::string inners_as_size_name(
naming::TablePath().Child("inners").Size("as").FullName());
EXPECT_THAT(*input_loader,
InputLoaderSupports({{ys_size_name, root_size},
{inners_size_name, root_size},
{inners_as_size_name, v_size}}));
FrameLayout::Builder layout_builder;
auto ys_size_slot = layout_builder.AddSlot<DenseArrayShape>();
auto inners_size_slot = layout_builder.AddSlot<DenseArrayShape>();
auto inners_as_size_slot = layout_builder.AddSlot<VSize>();
ASSERT_OK_AND_ASSIGN(
auto bound_input_loader,
input_loader->Bind({
{ys_size_name, TypedSlot::FromSlot(ys_size_slot)},
{inners_size_name, TypedSlot::FromSlot(inners_size_slot)},
{inners_as_size_name, TypedSlot::FromSlot(inners_as_size_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
FramePtr frame = alloc.frame();
::testing_namespace::Root r;
r.add_ys(19);
r.add_ys(3);
auto inners_0 = r.add_inners();
inners_0->add_as(57);
inners_0->add_as(37);
r.add_inners();
auto inners_2 = r.add_inners();
inners_2->add_as(17);
ASSERT_OK(bound_input_loader(r, frame));
EXPECT_THAT(frame.Get(ys_size_slot), Eq(DenseArrayShape{.size = 2}));
EXPECT_THAT(frame.Get(inners_size_slot), Eq(DenseArrayShape{.size = 3}));
EXPECT_THAT(frame.Get(inners_as_size_slot), ElementsAre(2, 0, 1));
r.clear_ys();
r.clear_inners();
ASSERT_OK(bound_input_loader(r, frame));
EXPECT_THAT(frame.Get(ys_size_slot), Eq(DenseArrayShape{.size = 0}));
EXPECT_THAT(frame.Get(inners_size_slot), Eq(DenseArrayShape{.size = 0}));
EXPECT_THAT(frame.Get(inners_as_size_slot), IsEmpty());
}
TYPED_TEST(ProtoLoaderTest, LoadDenseArrays) {
using StringType = TypeParam;
proto::StringFieldType string_type = std::is_same_v<StringType, Text>
? proto::StringFieldType::kText
: proto::StringFieldType::kBytes;
ASSERT_OK_AND_ASSIGN(
auto input_loader_ptr,
ProtoFieldsLoader::Create(::testing_namespace::Root::descriptor(),
string_type));
const InputLoader<google::protobuf::Message>& input_loader = *input_loader_ptr;
using OText = ::arolla::OptionalValue<StringType>;
using OBytes = ::arolla::OptionalValue<Bytes>;
using DAText = ::arolla::DenseArray<StringType>;
using DABytes = ::arolla::DenseArray<Bytes>;
std::string str_name(naming::TablePath().Column("repeated_str").FullName());
std::string bytes_name(
naming::TablePath().Column("repeated_raw_bytes").FullName());
EXPECT_THAT(input_loader,
InputLoaderSupports({{str_name, GetQType<DAText>()},
{bytes_name, GetQType<DABytes>()}}));
FrameLayout::Builder layout_builder;
auto str_slot = layout_builder.AddSlot<DAText>();
auto bytes_slot = layout_builder.AddSlot<DABytes>();
ASSERT_OK_AND_ASSIGN(auto bound_input_loader,
input_loader.Bind({
{str_name, TypedSlot::FromSlot(str_slot)},
{bytes_name, TypedSlot::FromSlot(bytes_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
FramePtr frame = alloc.frame();
::testing_namespace::Root r;
*r.add_repeated_str() = "19";
*r.add_repeated_str() = "3";
*r.add_repeated_raw_bytes() = "3";
*r.add_repeated_raw_bytes() = "19";
ASSERT_OK(bound_input_loader(r, frame));
EXPECT_THAT(frame.Get(str_slot),
ElementsAre(OText{StringType{"19"}}, OText{StringType{"3"}}));
EXPECT_THAT(frame.Get(bytes_slot),
ElementsAre(OBytes{Bytes{"3"}}, OBytes{Bytes{"19"}}));
r.clear_repeated_str();
r.clear_repeated_raw_bytes();
ASSERT_OK(bound_input_loader(r, frame));
EXPECT_THAT(frame.Get(str_slot), IsEmpty());
EXPECT_THAT(frame.Get(bytes_slot), IsEmpty());
}
TEST(SizeAccessErrorsLoaderTest, CreateFromProtopathsErrors) {
ASSERT_OK_AND_ASSIGN(
auto input_loader_ptr,
ProtoFieldsLoader::Create(::testing_namespace::Root::descriptor()));
EXPECT_THAT(input_loader_ptr->GetQTypeOf(""), IsNull());
EXPECT_THAT(input_loader_ptr->GetQTypeOf("/"), IsNull());
EXPECT_THAT(input_loader_ptr->GetQTypeOf("x"), IsNull());
EXPECT_THAT(input_loader_ptr->GetQTypeOf("/i_am_not_here"), IsNull());
EXPECT_THAT(input_loader_ptr->GetQTypeOf("/x[:]"), IsNull());
EXPECT_THAT(input_loader_ptr->GetQTypeOf("/x[0]"), IsNull());
EXPECT_THAT(input_loader_ptr->GetQTypeOf("/x/y"), IsNull());
EXPECT_THAT(input_loader_ptr->GetQTypeOf("/ys/x"), IsNull());
for (auto ppath : {"/ys[]", "/ys[-1]", "/ys[a]", "/ys[0x0]", "/ys[\"0\"]",
"/ys[00]", "/ys[ 0 ]"}) {
EXPECT_THAT(input_loader_ptr->GetQTypeOf(ppath), IsNull())
<< "ppath=" << ppath;
}
}
TEST(SizeAccessErrorsLoaderTest, CreateFromSizeProtopathsErrors) {
ASSERT_OK_AND_ASSIGN(
auto input_loader_ptr,
ProtoFieldsLoader::Create(::testing_namespace::Root::descriptor()));
EXPECT_THAT(input_loader_ptr->GetQTypeOf("/i_am_not_here/@size"), IsNull());
EXPECT_THAT(input_loader_ptr->GetQTypeOf("/@size"), IsNull());
EXPECT_THAT(input_loader_ptr->GetQTypeOf("/x/@size"), IsNull());
EXPECT_THAT(input_loader_ptr->GetQTypeOf("/ys[0]/@size"), IsNull());
EXPECT_THAT(input_loader_ptr->GetQTypeOf("/inners/@size/a"), IsNull());
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/proto/proto_input_loader.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/proto/proto_input_loader_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
5c088239-3540-4667-9d39-2d5bbb9476af | cpp | google/quiche | quic_time | quiche/quic/core/quic_time.cc | quiche/quic/core/quic_time_test.cc | #include "quiche/quic/core/quic_time.h"
#include <cinttypes>
#include <cstdlib>
#include <limits>
#include <string>
#include "absl/strings/str_cat.h"
namespace quic {
std::string QuicTime::Delta::ToDebuggingValue() const {
constexpr int64_t kMillisecondInMicroseconds = 1000;
constexpr int64_t kSecondInMicroseconds = 1000 * kMillisecondInMicroseconds;
int64_t absolute_value = std::abs(time_offset_);
if (absolute_value >= kSecondInMicroseconds &&
absolute_value % kSecondInMicroseconds == 0) {
return absl::StrCat(time_offset_ / kSecondInMicroseconds, "s");
}
if (absolute_value >= kMillisecondInMicroseconds &&
absolute_value % kMillisecondInMicroseconds == 0) {
return absl::StrCat(time_offset_ / kMillisecondInMicroseconds, "ms");
}
return absl::StrCat(time_offset_, "us");
}
uint64_t QuicWallTime::ToUNIXSeconds() const { return microseconds_ / 1000000; }
uint64_t QuicWallTime::ToUNIXMicroseconds() const { return microseconds_; }
bool QuicWallTime::IsAfter(QuicWallTime other) const {
return microseconds_ > other.microseconds_;
}
bool QuicWallTime::IsBefore(QuicWallTime other) const {
return microseconds_ < other.microseconds_;
}
bool QuicWallTime::IsZero() const { return microseconds_ == 0; }
QuicTime::Delta QuicWallTime::AbsoluteDifference(QuicWallTime other) const {
uint64_t d;
if (microseconds_ > other.microseconds_) {
d = microseconds_ - other.microseconds_;
} else {
d = other.microseconds_ - microseconds_;
}
if (d > static_cast<uint64_t>(std::numeric_limits<int64_t>::max())) {
d = std::numeric_limits<int64_t>::max();
}
return QuicTime::Delta::FromMicroseconds(d);
}
QuicWallTime QuicWallTime::Add(QuicTime::Delta delta) const {
uint64_t microseconds = microseconds_ + delta.ToMicroseconds();
if (microseconds < microseconds_) {
microseconds = std::numeric_limits<uint64_t>::max();
}
return QuicWallTime(microseconds);
}
QuicWallTime QuicWallTime::Subtract(QuicTime::Delta delta) const {
uint64_t microseconds = microseconds_ - delta.ToMicroseconds();
if (microseconds > microseconds_) {
microseconds = 0;
}
return QuicWallTime(microseconds);
}
} | #include "quiche/quic/core/quic_time.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_clock.h"
namespace quic {
namespace test {
class QuicTimeDeltaTest : public QuicTest {};
TEST_F(QuicTimeDeltaTest, Zero) {
EXPECT_TRUE(QuicTime::Delta::Zero().IsZero());
EXPECT_FALSE(QuicTime::Delta::Zero().IsInfinite());
EXPECT_FALSE(QuicTime::Delta::FromMilliseconds(1).IsZero());
}
TEST_F(QuicTimeDeltaTest, Infinite) {
EXPECT_TRUE(QuicTime::Delta::Infinite().IsInfinite());
EXPECT_FALSE(QuicTime::Delta::Zero().IsInfinite());
EXPECT_FALSE(QuicTime::Delta::FromMilliseconds(1).IsInfinite());
}
TEST_F(QuicTimeDeltaTest, FromTo) {
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(1),
QuicTime::Delta::FromMicroseconds(1000));
EXPECT_EQ(QuicTime::Delta::FromSeconds(1),
QuicTime::Delta::FromMilliseconds(1000));
EXPECT_EQ(QuicTime::Delta::FromSeconds(1),
QuicTime::Delta::FromMicroseconds(1000000));
EXPECT_EQ(1, QuicTime::Delta::FromMicroseconds(1000).ToMilliseconds());
EXPECT_EQ(2, QuicTime::Delta::FromMilliseconds(2000).ToSeconds());
EXPECT_EQ(1000, QuicTime::Delta::FromMilliseconds(1).ToMicroseconds());
EXPECT_EQ(1, QuicTime::Delta::FromMicroseconds(1000).ToMilliseconds());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(2000).ToMicroseconds(),
QuicTime::Delta::FromSeconds(2).ToMicroseconds());
}
TEST_F(QuicTimeDeltaTest, Add) {
EXPECT_EQ(QuicTime::Delta::FromMicroseconds(2000),
QuicTime::Delta::Zero() + QuicTime::Delta::FromMilliseconds(2));
}
TEST_F(QuicTimeDeltaTest, Subtract) {
EXPECT_EQ(QuicTime::Delta::FromMicroseconds(1000),
QuicTime::Delta::FromMilliseconds(2) -
QuicTime::Delta::FromMilliseconds(1));
}
TEST_F(QuicTimeDeltaTest, Multiply) {
int i = 2;
EXPECT_EQ(QuicTime::Delta::FromMicroseconds(4000),
QuicTime::Delta::FromMilliseconds(2) * i);
EXPECT_EQ(QuicTime::Delta::FromMicroseconds(4000),
i * QuicTime::Delta::FromMilliseconds(2));
double d = 2;
EXPECT_EQ(QuicTime::Delta::FromMicroseconds(4000),
QuicTime::Delta::FromMilliseconds(2) * d);
EXPECT_EQ(QuicTime::Delta::FromMicroseconds(4000),
d * QuicTime::Delta::FromMilliseconds(2));
EXPECT_EQ(QuicTime::Delta::FromMicroseconds(5),
QuicTime::Delta::FromMicroseconds(9) * 0.5);
EXPECT_EQ(QuicTime::Delta::FromMicroseconds(2),
QuicTime::Delta::FromMicroseconds(12) * 0.2);
}
TEST_F(QuicTimeDeltaTest, Max) {
EXPECT_EQ(QuicTime::Delta::FromMicroseconds(2000),
std::max(QuicTime::Delta::FromMicroseconds(1000),
QuicTime::Delta::FromMicroseconds(2000)));
}
TEST_F(QuicTimeDeltaTest, NotEqual) {
EXPECT_TRUE(QuicTime::Delta::FromSeconds(0) !=
QuicTime::Delta::FromSeconds(1));
EXPECT_FALSE(QuicTime::Delta::FromSeconds(0) !=
QuicTime::Delta::FromSeconds(0));
}
TEST_F(QuicTimeDeltaTest, DebuggingValue) {
const QuicTime::Delta one_us = QuicTime::Delta::FromMicroseconds(1);
const QuicTime::Delta one_ms = QuicTime::Delta::FromMilliseconds(1);
const QuicTime::Delta one_s = QuicTime::Delta::FromSeconds(1);
EXPECT_EQ("1s", one_s.ToDebuggingValue());
EXPECT_EQ("3s", (3 * one_s).ToDebuggingValue());
EXPECT_EQ("1ms", one_ms.ToDebuggingValue());
EXPECT_EQ("3ms", (3 * one_ms).ToDebuggingValue());
EXPECT_EQ("1us", one_us.ToDebuggingValue());
EXPECT_EQ("3us", (3 * one_us).ToDebuggingValue());
EXPECT_EQ("3001us", (3 * one_ms + one_us).ToDebuggingValue());
EXPECT_EQ("3001ms", (3 * one_s + one_ms).ToDebuggingValue());
EXPECT_EQ("3000001us", (3 * one_s + one_us).ToDebuggingValue());
}
class QuicTimeTest : public QuicTest {
protected:
MockClock clock_;
};
TEST_F(QuicTimeTest, Initialized) {
EXPECT_FALSE(QuicTime::Zero().IsInitialized());
EXPECT_TRUE((QuicTime::Zero() + QuicTime::Delta::FromMicroseconds(1))
.IsInitialized());
}
TEST_F(QuicTimeTest, CopyConstruct) {
QuicTime time_1 = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(1234);
EXPECT_NE(time_1, QuicTime(QuicTime::Zero()));
EXPECT_EQ(time_1, QuicTime(time_1));
}
TEST_F(QuicTimeTest, CopyAssignment) {
QuicTime time_1 = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(1234);
QuicTime time_2 = QuicTime::Zero();
EXPECT_NE(time_1, time_2);
time_2 = time_1;
EXPECT_EQ(time_1, time_2);
}
TEST_F(QuicTimeTest, Add) {
QuicTime time_1 = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(1);
QuicTime time_2 = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(2);
QuicTime::Delta diff = time_2 - time_1;
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(1), diff);
EXPECT_EQ(1000, diff.ToMicroseconds());
EXPECT_EQ(1, diff.ToMilliseconds());
}
TEST_F(QuicTimeTest, Subtract) {
QuicTime time_1 = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(1);
QuicTime time_2 = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(2);
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(1), time_2 - time_1);
}
TEST_F(QuicTimeTest, SubtractDelta) {
QuicTime time = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(2);
EXPECT_EQ(QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(1),
time - QuicTime::Delta::FromMilliseconds(1));
}
TEST_F(QuicTimeTest, Max) {
QuicTime time_1 = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(1);
QuicTime time_2 = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(2);
EXPECT_EQ(time_2, std::max(time_1, time_2));
}
TEST_F(QuicTimeTest, MockClock) {
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
QuicTime now = clock_.ApproximateNow();
QuicTime time = QuicTime::Zero() + QuicTime::Delta::FromMicroseconds(1000);
EXPECT_EQ(now, time);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
now = clock_.ApproximateNow();
EXPECT_NE(now, time);
time = time + QuicTime::Delta::FromMilliseconds(1);
EXPECT_EQ(now, time);
}
TEST_F(QuicTimeTest, LE) {
const QuicTime zero = QuicTime::Zero();
const QuicTime one = zero + QuicTime::Delta::FromSeconds(1);
EXPECT_TRUE(zero <= zero);
EXPECT_TRUE(zero <= one);
EXPECT_TRUE(one <= one);
EXPECT_FALSE(one <= zero);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_time.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_time_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
4e13516c-178e-4cf2-b24e-2d400d9d8e92 | cpp | tensorflow/tensorflow | sparse_dense_binary_op_shared | tensorflow/core/kernels/sparse_dense_binary_op_shared.cc | tensorflow/core/kernels/sparse_dense_binary_op_shared_test.cc | #define EIGEN_USE_THREADS
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/cwise_ops.h"
#include "tensorflow/core/kernels/cwise_ops_common.h"
#include "tensorflow/core/util/bcast.h"
using Eigen::TensorRef;
using tensorflow::gtl::ArraySlice;
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
template <typename Device, typename T, typename Functor>
class SparseDenseBinaryOpShared : public OpKernel {
public:
explicit SparseDenseBinaryOpShared(OpKernelConstruction *ctx)
: OpKernel(ctx) {}
void Compute(OpKernelContext *ctx) override {
const Tensor *indices_t, *values_t, *shape_t, *dense_t;
OP_REQUIRES_OK(ctx, ctx->input("sp_indices", &indices_t));
OP_REQUIRES_OK(ctx, ctx->input("sp_values", &values_t));
OP_REQUIRES_OK(ctx, ctx->input("sp_shape", &shape_t));
OP_REQUIRES_OK(ctx, ctx->input("dense", &dense_t));
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices_t->shape()),
errors::InvalidArgument(
"Input sp_indices should be a matrix but received shape: ",
indices_t->shape().DebugString()));
OP_REQUIRES(ctx,
TensorShapeUtils::IsVector(values_t->shape()) &&
TensorShapeUtils::IsVector(shape_t->shape()),
errors::InvalidArgument(
"Inputs sp_values and sp_shape should be vectors "
"but received shapes: ",
values_t->shape().DebugString(), " and ",
shape_t->shape().DebugString()));
OP_REQUIRES(
ctx, TensorShapeUtils::IsVector(shape_t->shape()),
errors::InvalidArgument("Input sp_shape must be a vector. Got: ",
shape_t->shape().DebugString()));
OP_REQUIRES(
ctx, values_t->dim_size(0) == indices_t->dim_size(0),
errors::InvalidArgument(
"The first dimension of values and indices should match. (",
values_t->dim_size(0), " vs. ", indices_t->dim_size(0), ")"));
OP_REQUIRES(
ctx, shape_t->shape().dim_size(0) == indices_t->shape().dim_size(1),
errors::InvalidArgument(
"Number of dimensions must match second dimension of indices. ",
"Got ", shape_t->shape().dim_size(0),
" dimensions, indices shape: ", indices_t->shape().DebugString()));
OP_REQUIRES(ctx, shape_t->NumElements() > 0,
errors::InvalidArgument(
"The shape argument requires at least one element."));
const auto indices_mat = indices_t->matrix<int64_t>();
const auto shape_vec = shape_t->vec<int64_t>();
TensorShape lhs_shape;
OP_REQUIRES_OK(ctx, TensorShape::BuildTensorShape(shape_vec, &lhs_shape));
const auto lhs_dims = BCast::FromShape(lhs_shape);
const auto rhs_dims = BCast::FromShape(dense_t->shape());
BCast b(lhs_dims, rhs_dims, false);
auto VecGreaterEq = [](absl::Span<const int64_t> lhs,
absl::Span<const int64_t> rhs) {
if (lhs.size() < rhs.size()) return false;
for (size_t i = 0; i < rhs.size(); ++i) {
if (lhs[lhs.size() - 1 - i] < rhs[rhs.size() - 1 - i]) return false;
}
return true;
};
OP_REQUIRES(ctx, VecGreaterEq(lhs_dims, rhs_dims) && b.IsValid(),
errors::InvalidArgument(
"SparseDenseBinaryOpShared broadcasts dense to sparse "
"only; got incompatible shapes: [",
absl::StrJoin(lhs_dims, ","), "] vs. [",
absl::StrJoin(rhs_dims, ","), "]"));
Tensor *output_values = nullptr;
Tensor dense_gathered;
const int64_t nnz = indices_t->dim_size(0);
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({nnz}), &output_values));
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({nnz}),
&dense_gathered));
bool op_is_div = false;
if (absl::StrContains(ctx->op_kernel().type_string_view(), "Div")) {
op_is_div = true;
}
auto dense_gathered_flat = dense_gathered.flat<T>();
const int ndims = lhs_dims.size();
switch (ndims) {
#define CASE(NDIM) \
case NDIM: { \
TensorRef<Eigen::Tensor<const T, NDIM, Eigen::RowMajor>> rhs_ref = \
dense_t->shaped<T, NDIM>(b.y_reshape()) \
.broadcast(BCast::ToIndexArray<NDIM>(b.y_bcast())); \
Eigen::array<Eigen::DenseIndex, NDIM> idx; \
bool indices_valid = true; \
for (int i = 0; i < nnz; ++i) { \
for (int d = 0; d < NDIM; ++d) { \
idx[d] = internal::SubtleMustCopy(indices_mat(i, d)); \
if (!FastBoundsCheck(idx[d], rhs_ref.dimension(d))) { \
indices_valid = false; \
} \
} \
OP_REQUIRES( \
ctx, indices_valid, \
errors::InvalidArgument("Provided indices are out-of-bounds w.r.t. " \
"dense side with broadcasted shape")); \
dense_gathered_flat(i) = rhs_ref.coeff(idx); \
if (op_is_div) { \
OP_REQUIRES(ctx, dense_gathered_flat(i) != T{0}, \
errors::InvalidArgument( \
"SparseDenseCwiseDiv cannot divide by zero," \
"but input dense tensor contains zero ")); \
} \
} \
break; \
}
CASE(1);
CASE(2);
CASE(3);
CASE(4);
CASE(5);
default:
OP_REQUIRES(
ctx, false,
errors::InvalidArgument("Only tensors with ranks between 1 and 5 "
"are currently supported. Tensor rank: ",
ndims));
#undef CASE
}
output_values->flat<T>().device(ctx->eigen_device<Device>()) =
values_t->flat<T>().binaryExpr(dense_gathered_flat,
typename Functor::func());
}
};
#define REGISTER_KERNELS(T) \
REGISTER_KERNEL_BUILDER( \
Name("SparseDenseCwiseMul").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
SparseDenseBinaryOpShared<CPUDevice, T, functor::mul<T>>) \
\
REGISTER_KERNEL_BUILDER( \
Name("SparseDenseCwiseDiv").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
SparseDenseBinaryOpShared<CPUDevice, T, functor::div<T>>) \
REGISTER_KERNEL_BUILDER( \
Name("SparseDenseCwiseAdd").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
SparseDenseBinaryOpShared<CPUDevice, T, functor::add<T>>)
TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNELS);
#undef REGISTER_KERNELS
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
static void ExpectHasSubstr(StringPiece s, StringPiece expected) {
EXPECT_TRUE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
class SparseDenseCDivTest : public OpsTestBase {
protected:
template <typename T>
void MakeOp() {
DataType value_type = tensorflow::DataTypeToEnum<T>::value;
TF_ASSERT_OK(NodeDefBuilder("cdiv", "SparseDenseCwiseDiv")
.Input(FakeInput(DT_INT64))
.Input(FakeInput(value_type))
.Input(FakeInput(DT_INT64))
.Input(FakeInput(value_type))
.Attr("T", value_type)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
class SparseDenseCMulTest : public OpsTestBase {
protected:
template <typename T>
void MakeOp() {
DataType value_type = tensorflow::DataTypeToEnum<T>::value;
TF_ASSERT_OK(NodeDefBuilder("cmul", "SparseDenseCwiseMul")
.Input(FakeInput(DT_INT64))
.Input(FakeInput(value_type))
.Input(FakeInput(DT_INT64))
.Input(FakeInput(value_type))
.Attr("T", value_type)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(SparseDenseCDivTest, DoNotBroadcastSparse_FewerDims) {
MakeOp<float>();
AddInputFromArray<int64_t>(TensorShape({1, 1}), {0});
AddInputFromArray<float>(TensorShape({1}), {1618});
AddInputFromArray<int64_t>(TensorShape({1}), {1});
AddInputFromArray<float>(TensorShape({2, 1}), {17, 19});
ExpectHasSubstr(RunOpKernel().ToString(), "broadcasts dense to sparse only");
}
TEST_F(SparseDenseCDivTest, DoNotBroadcastSparse_SameDims) {
MakeOp<float>();
AddInputFromArray<int64_t>(TensorShape({1, 2}), {0, 0});
AddInputFromArray<float>(TensorShape({1}), {1618});
AddInputFromArray<int64_t>(TensorShape({2}), {1, 1});
AddInputFromArray<float>(TensorShape({2, 1}), {17, 19});
ExpectHasSubstr(RunOpKernel().ToString(), "broadcasts dense to sparse only");
}
TEST_F(SparseDenseCDivTest, SameShape) {
MakeOp<float>();
const auto indices_shape = TensorShape({4, 2});
std::initializer_list<int64_t> in{0, 1, 1, 0, 2, 0, 2, 1};
const absl::Span<const int64_t> indices(in);
std::initializer_list<int64_t> sh{3, 2};
const absl::Span<const int64_t> shape(sh);
Tensor dense(DT_FLOAT, TensorShape(shape));
auto dense_flat = dense.flat<float>();
dense_flat.setConstant(1.);
AddInputFromArray<int64_t>(indices_shape, indices);
AddInputFromArray<float>(TensorShape({4}), {1, 2, 3, 4});
AddInputFromArray<int64_t>(TensorShape({2}), shape);
AddInputFromArray<float>(TensorShape(shape), dense_flat);
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({4}));
test::FillValues<float>(&expected, {1, 2, 3, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(SparseDenseCDivTest, BroadcastDenseSameDims) {
MakeOp<float>();
const auto indices_shape = TensorShape({4, 2});
std::initializer_list<int64_t> in{0, 1, 1, 0, 2, 0, 2, 1};
const absl::Span<const int64_t> indices(in);
std::initializer_list<int64_t> sh{3, 2};
const absl::Span<const int64_t> shape(sh);
Tensor dense(DT_FLOAT, TensorShape({3, 1}));
auto dense_flat = dense.flat<float>();
dense_flat.setConstant(1.);
AddInputFromArray<int64_t>(indices_shape, indices);
AddInputFromArray<float>(TensorShape({4}), {1, 2, 3, 4});
AddInputFromArray<int64_t>(TensorShape({2}), shape);
AddInputFromArray<float>(TensorShape({3, 1}), dense_flat);
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({4}));
test::FillValues<float>(&expected, {1, 2, 3, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(SparseDenseCDivTest, BroadcastDenseFewerDims) {
MakeOp<float>();
const auto indices_shape = TensorShape({4, 2});
std::initializer_list<int64_t> in{0, 1, 1, 0, 2, 0, 2, 1};
const absl::Span<const int64_t> indices(in);
std::initializer_list<int64_t> sh{3, 2};
const absl::Span<const int64_t> shape(sh);
Tensor dense(DT_FLOAT, TensorShape({2}));
auto dense_flat = dense.flat<float>();
dense_flat.setConstant(1.);
AddInputFromArray<int64_t>(indices_shape, indices);
AddInputFromArray<float>(TensorShape({4}), {1, 2, 3, 4});
AddInputFromArray<int64_t>(TensorShape({2}), shape);
AddInputFromArray<float>(TensorShape({2}), dense_flat);
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({4}));
test::FillValues<float>(&expected, {1, 2, 3, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(SparseDenseCMulTest, BroadcastDense) {
MakeOp<float>();
const auto indices_shape = TensorShape({4, 2});
std::initializer_list<int64_t> in{0, 1, 1, 0, 2, 0, 2, 1};
const absl::Span<const int64_t> indices(in);
std::initializer_list<int64_t> sh{3, 2};
const absl::Span<const int64_t> shape(sh);
Tensor dense(DT_FLOAT, TensorShape({2}));
auto dense_flat = dense.flat<float>();
dense_flat(0) = 0.5;
dense_flat(1) = 0;
AddInputFromArray<int64_t>(indices_shape, indices);
AddInputFromArray<float>(TensorShape({4}), {1, 2, 3, 4});
AddInputFromArray<int64_t>(TensorShape({2}), shape);
AddInputFromArray<float>(TensorShape({2}), dense_flat);
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({4}));
test::FillValues<float>(&expected, {0, 1, 1.5, 0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
static Graph* SparseMatCMulDenseMat(Graph* g, Node* sp_indices, Node* sp_vals,
Node* sp_shape, Node* dense) {
Node* ret;
TF_CHECK_OK(
NodeBuilder(g->NewName("SparseDenseCwiseMul"), "SparseDenseCwiseMul")
.Input(sp_indices)
.Input(sp_vals)
.Input(sp_shape)
.Input(dense)
.Finalize(g, &ret));
return g;
}
static Node* MakeTensor(Graph* g, int B, int M, int N) {
Tensor data(DT_FLOAT, TensorShape({B, M, N}));
data.flat<float>().setRandom();
return test::graph::Constant(g, data);
}
struct ST {
Node* indices;
Node* vals;
Node* shape;
};
static ST MakeSparseTensor(Graph* g, int B, int M, int N, int nnz_inner) {
const int total_nnz = B * M * nnz_inner;
const int kNumDims = 3;
Tensor indices(DT_INT64, TensorShape({total_nnz, kNumDims}));
Tensor vals(DT_FLOAT, TensorShape({total_nnz}));
Tensor shape(DT_INT64, TensorShape({kNumDims}));
vals.flat<float>().setRandom();
test::FillValues(&shape, absl::Span<const int64_t>({B, M, N}));
auto indices_mat = indices.matrix<int64_t>();
int nnz_cnt = 0;
std::unordered_set<int> picked;
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dist(0, N - 1);
for (int i = 0; i < B; ++i) {
for (int j = 0; j < M; ++j) {
for (int k = 0; k < nnz_inner; ++k) {
indices_mat(nnz_cnt, 0) = i;
indices_mat(nnz_cnt, 1) = j;
int inner = dist(gen);
while (picked.count(inner) == 1) {
inner = dist(gen);
}
picked.insert(inner);
indices_mat(nnz_cnt, 2) = inner;
++nnz_cnt;
}
}
}
return ST{test::graph::Constant(g, indices), test::graph::Constant(g, vals),
test::graph::Constant(g, shape)};
}
#define BM_SparseMatCMulDenseMatArgs(N, NNZ_INNER) \
static void BM_SparseMatCMulDenseMat_##N##_##NNZ_INNER( \
::testing::benchmark::State& state) { \
Graph* g = new Graph(OpRegistry::Global()); \
Node* dense = MakeTensor(g, 8, 4, N); \
ST sp = MakeSparseTensor(g, 8, 4, N, NNZ_INNER); \
\
test::Benchmark( \
"cpu", SparseMatCMulDenseMat(g, sp.indices, sp.vals, sp.shape, dense), \
false) \
.Run(state); \
state.SetItemsProcessed( \
static_cast<int64_t>(state.iterations() * 8 * 4 * N * 2)); \
} \
BENCHMARK(BM_SparseMatCMulDenseMat_##N##_##NNZ_INNER)
BM_SparseMatCMulDenseMatArgs(1048576, 1);
BM_SparseMatCMulDenseMatArgs(1048576, 8);
BM_SparseMatCMulDenseMatArgs(1048576, 32);
BM_SparseMatCMulDenseMatArgs(262144, 1);
BM_SparseMatCMulDenseMatArgs(262144, 8);
BM_SparseMatCMulDenseMatArgs(262144, 32);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_dense_binary_op_shared_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a43301bc-e07c-4b0a-982c-f19e66c6fd34 | cpp | google/cel-cpp | type_check_issue | checker/type_check_issue.cc | checker/type_check_issue_test.cc | #include "checker/type_check_issue.h"
#include <string>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "common/source.h"
namespace cel {
namespace {
absl::string_view SeverityString(TypeCheckIssue::Severity severity) {
switch (severity) {
case TypeCheckIssue::Severity::kInformation:
return "INFORMATION";
case TypeCheckIssue::Severity::kWarning:
return "WARNING";
case TypeCheckIssue::Severity::kError:
return "ERROR";
case TypeCheckIssue::Severity::kDeprecated:
return "DEPRECATED";
default:
return "SEVERITY_UNSPECIFIED";
}
}
}
std::string TypeCheckIssue::ToDisplayString(const Source& source) const {
return absl::StrCat(
absl::StrFormat("%s: %s:%d:%d: %s", SeverityString(severity_),
source.description(), location_.line, location_.column,
message_),
source.DisplayErrorLocation(location_));
}
} | #include "checker/type_check_issue.h"
#include "common/source.h"
#include "internal/testing.h"
namespace cel {
namespace {
TEST(TypeCheckIssueTest, DisplayString) {
ASSERT_OK_AND_ASSIGN(auto source, NewSource("test{\n\tfield1: 123\n}"));
TypeCheckIssue issue = TypeCheckIssue::CreateError(2, 2, "test error");
EXPECT_EQ(issue.ToDisplayString(*source),
"ERROR: <input>:2:2: test error\n"
" | field1: 123\n"
" | ..^");
}
TEST(TypeCheckIssueTest, DisplayStringNoPosition) {
ASSERT_OK_AND_ASSIGN(auto source, NewSource("test{\n\tfield1: 123\n}"));
TypeCheckIssue issue = TypeCheckIssue::CreateError(-1, -1, "test error");
EXPECT_EQ(issue.ToDisplayString(*source), "ERROR: <input>:-1:-1: test error");
}
TEST(TypeCheckIssueTest, DisplayStringDeprecated) {
ASSERT_OK_AND_ASSIGN(auto source, NewSource("test{\n\tfield1: 123\n}"));
TypeCheckIssue issue = TypeCheckIssue(TypeCheckIssue::Severity::kDeprecated,
{-1, -1}, "test error 2");
EXPECT_EQ(issue.ToDisplayString(*source),
"DEPRECATED: <input>:-1:-1: test error 2");
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/checker/type_check_issue.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/checker/type_check_issue_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
e1211064-e34b-4b2f-a04e-4c5792219b55 | cpp | google/tensorstore | str_cat | tensorstore/util/str_cat.h | tensorstore/util/str_cat_test.cc | #ifndef TENSORSTORE_UTIL_STR_CAT_H_
#define TENSORSTORE_UTIL_STR_CAT_H_
#include <cstddef>
#include <ostream>
#include <sstream>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/strings/str_cat.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_strcat {
template <typename... T, typename F>
constexpr bool Requires(F) {
return std::is_invocable_v<F, T...>;
}
template <typename T>
auto ToAlphaNumOrString(const T& x);
template <typename T>
std::string StringifyUsingOstream(const T& x) {
std::ostringstream ostr;
ostr << x;
return ostr.str();
}
template <typename... T>
std::string StringifyTuple(const std::tuple<T...>& x) {
return std::apply(
[](const auto&... item) {
std::string result = "{";
size_t i = 0;
(absl::StrAppend(&result, ToAlphaNumOrString(item),
(++i == sizeof...(item) ? "}" : ", ")),
...);
return result;
},
x);
}
template <typename A, typename B>
std::string StringifyPair(const std::pair<A, B>& x) {
return absl::StrCat("{", ToAlphaNumOrString(x.first), ", ",
ToAlphaNumOrString(x.second), "}");
}
template <typename Iterator>
std::string StringifyContainer(Iterator begin, Iterator end) {
std::string result = "{";
if (begin != end) {
absl::StrAppend(&result, ToAlphaNumOrString(*begin++));
}
for (; begin != end; ++begin) {
absl::StrAppend(&result, ", ", ToAlphaNumOrString(*begin));
}
absl::StrAppend(&result, "}");
return result;
}
template <typename T>
auto ToAlphaNumOrString(const T& x) {
if constexpr (std::is_same_v<T, std::nullptr_t>) {
return "null";
} else if constexpr (std::is_convertible_v<T, absl::AlphaNum> &&
!std::is_enum_v<T>) {
return x;
} else if constexpr (internal::IsOstreamable<T>) {
return StringifyUsingOstream(x);
} else if constexpr (Requires<const T>(
[](auto&& v) -> decltype(StringifyPair(v)) {})) {
return StringifyPair(x);
} else if constexpr (Requires<const T>(
[](auto&& v) -> decltype(StringifyTuple(v)) {})) {
return StringifyTuple(x);
} else if constexpr (Requires<const T>(
[](auto&& v) -> decltype(v.begin(), v.end()) {})) {
return StringifyContainer(x.begin(), x.end());
} else if constexpr (std::is_enum_v<T>) {
using I = typename std::underlying_type<T>::type;
return static_cast<I>(x);
} else {
return StringifyUsingOstream(x);
}
}
}
template <typename Element, std::ptrdiff_t N>
std::enable_if_t<internal::IsOstreamable<Element>, std::ostream&> operator<<(
std::ostream& os, ::tensorstore::span<Element, N> s) {
os << "{";
std::ptrdiff_t size = s.size();
for (std::ptrdiff_t i = 0; i < size; ++i) {
if (i != 0) os << ", ";
os << s[i];
}
return os << "}";
}
template <typename... Arg>
std::string StrCat(const Arg&... arg) {
return absl::StrCat(internal_strcat::ToAlphaNumOrString(arg)...);
}
template <typename... Arg>
void StrAppend(std::string* result, const Arg&... arg) {
return absl::StrAppend(result, internal_strcat::ToAlphaNumOrString(arg)...);
}
}
#endif | #include "tensorstore/util/str_cat.h"
#include <complex>
#include <map>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
#include <tuple>
#include <utility>
#include <gtest/gtest.h>
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::internal_strcat::StringifyUsingOstream;
enum class OstreamableEnum { value = 0 };
enum class PlainEnum { value = 0 };
std::ostream& operator<<(std::ostream& os, OstreamableEnum e) {
return os << "enum";
}
TEST(ToStringUsingOstreamTest, Basic) {
EXPECT_EQ("hello", StringifyUsingOstream("hello"));
EXPECT_EQ("1", StringifyUsingOstream(1));
EXPECT_EQ("(1,2)", StringifyUsingOstream(std::complex<float>(1, 2)));
}
TEST(StrAppendTest, Basic) {
std::string result = "X";
tensorstore::StrAppend(&result, "a", std::complex<float>(1, 2), 3);
EXPECT_EQ("Xa(1,2)3", result);
}
TEST(StrCat, Basic) {
EXPECT_EQ("a(1,2)3", tensorstore::StrCat("a", std::complex<float>(1, 2), 3));
char a = 'a';
EXPECT_EQ("a", tensorstore::StrCat(a));
}
TEST(StrCat, Enum) {
EXPECT_EQ("enum", tensorstore::StrCat(OstreamableEnum::value));
EXPECT_EQ("0", tensorstore::StrCat(PlainEnum::value));
}
TEST(StrCat, Null) { EXPECT_EQ("null", tensorstore::StrCat(nullptr)); }
TEST(StrCat, Tuple) {
EXPECT_EQ("{1, 2, abc}", tensorstore::StrCat(std::make_tuple(1, 2.0, "abc")));
}
TEST(StrCat, Pair) {
EXPECT_EQ("{2, abc}", tensorstore::StrCat(std::make_pair(2.0, "abc")));
}
TEST(StrCat, Container) {
std::vector<int> x{1, 2, 3};
EXPECT_EQ("{1, 2, 3}", tensorstore::StrCat(x));
EXPECT_EQ("{1, 2, 3}", tensorstore::StrCat(tensorstore::span(x)));
std::map<std::string, int> y{{"a", 1}, {"b", 2}};
EXPECT_EQ("{{a, 1}, {b, 2}}", tensorstore::StrCat(y));
}
TEST(StrCat, Nested) {
std::vector<std::pair<int, int>> x{{1, 2}, {2, 3}};
EXPECT_EQ("{{1, 2}, {2, 3}}", tensorstore::StrCat(x));
std::pair<std::pair<int, int>, std::pair<int, int>> y{{1, 2}, {2, 3}};
EXPECT_EQ("{{1, 2}, {2, 3}}", tensorstore::StrCat(y));
}
TEST(SpanTest, Ostream) {
std::ostringstream ostr;
ostr << tensorstore::span({1, 2, 3});
EXPECT_EQ("{1, 2, 3}", ostr.str());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/str_cat.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/str_cat_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
8af32f24-845c-498c-bf76-f9bf36338851 | cpp | tensorflow/tensorflow | batch_dataset_op | tensorflow/core/kernels/data/batch_dataset_op.cc | tensorflow/core/kernels/data/batch_dataset_op_test.cc | #include "tensorflow/core/kernels/data/batch_dataset_op.h"
#include <algorithm>
#include <cstdlib>
#include <functional>
#include <optional>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/util/batch_util.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
constexpr const char* const BatchDatasetOp::kDatasetType;
constexpr const char* const BatchDatasetOp::kInputDataset;
constexpr const char* const BatchDatasetOp::kBatchSize;
constexpr const char* const BatchDatasetOp::kDropRemainder;
constexpr const char* const BatchDatasetOp::kParallelCopy;
constexpr const char* const BatchDatasetOp::kOutputTypes;
constexpr const char* const BatchDatasetOp::kOutputShapes;
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kBatchDataset[] = "BatchDataset";
class BatchDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t batch_size, bool drop_remainder,
bool parallel_copy, const DatasetBase* input, int op_version)
: DatasetBase(DatasetContext(ctx)),
batch_size_(batch_size),
reserve_size_(drop_remainder ? batch_size
: std::min<int64_t>(batch_size, 1 << 16)),
drop_remainder_(drop_remainder),
parallel_copy_(parallel_copy),
input_(input),
op_version_(op_version),
traceme_metadata_(
{{"batch_size",
strings::Printf("%lld", static_cast<long long>(batch_size))},
{"drop_remainder", drop_remainder ? "true" : "false"},
{"parallel_copy", parallel_copy ? "true" : "false"}}) {
input_->Ref();
const auto& input_shapes = input_->output_shapes();
output_shapes_.reserve(input_shapes.size());
for (const auto& input_shape : input_shapes) {
if (drop_remainder_ || input_->Cardinality() == kInfiniteCardinality) {
output_shapes_.emplace_back(
PartialTensorShape({batch_size_}).Concatenate(input_shape));
} else {
output_shapes_.emplace_back(
PartialTensorShape({-1}).Concatenate(input_shape));
}
}
random_indexing_compatible_ = absl::OkStatus();
if (!drop_remainder_) {
random_indexing_compatible_ = absl::FailedPreconditionError(absl::StrCat(
type_string(),
" does not support global shuffling with `drop_remainder=False`."));
} else if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.op_version = op_version_;
params.set_args(batch_size_);
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return n / batch_size_ + (n % batch_size_ == 0 || drop_remainder_ ? 0 : 1);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
const int64 cardinality = Cardinality();
if (index < 0 || index >= cardinality) {
return errors::OutOfRange("Index out of range [0, ", cardinality,
"):", index);
}
int batch_start_index = batch_size_ * index;
std::vector<std::vector<Tensor>> batch_elements;
int input_cardinality = input_->Cardinality();
for (int i = batch_start_index;
i < batch_start_index + batch_size_ && i < input_cardinality; ++i) {
std::vector<Tensor> batch_element_tuple;
TF_RETURN_IF_ERROR(input_->Get(ctx, i, &batch_element_tuple));
batch_elements.emplace_back(std::move(batch_element_tuple));
}
TF_RETURN_IF_ERROR(CopyBatch(AnyContext(ctx), std::move(batch_elements),
parallel_copy_, out_tensors));
return absl::OkStatus();
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* batch_size = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(batch_size_, &batch_size));
Node* drop_remainder = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(drop_remainder_, &drop_remainder));
AttrValue parallel_copy;
b->BuildAttrValue(parallel_copy_, ¶llel_copy);
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, batch_size, drop_remainder},
{{kParallelCopy, parallel_copy}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
tsl::mutex_lock l(mu_);
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
std::vector<std::vector<Tensor>> batch_elements;
{
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
batch_elements.reserve(dataset()->reserve_size_);
*end_of_sequence = false;
IteratorContextWithIndexMapper ctx_with_index_mapper(ctx, this);
for (int i = 0; i < dataset()->batch_size_ && !*end_of_sequence; ++i) {
std::vector<Tensor> batch_element_tuple;
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx_with_index_mapper.Get(),
&batch_element_tuple,
end_of_sequence));
if (!*end_of_sequence) {
batch_elements.emplace_back(std::move(batch_element_tuple));
} else {
input_impl_.reset();
}
}
ctx_with_index_mapper.MergeCheckpoint();
}
if (batch_elements.empty()) {
DCHECK(*end_of_sequence);
return absl::OkStatus();
}
if (dataset()->drop_remainder_ &&
batch_elements.size() < dataset()->batch_size_) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(CopyBatch(AnyContext(ctx), std::move(batch_elements),
dataset()->parallel_copy_, out_tensors));
*end_of_sequence = false;
return absl::OkStatus();
}
IndexMapperFn GetIndexMapper(
IndexMapperFn parent_index_mapper) const override {
int64_t batch_size = dataset()->batch_size_;
return [parent_index_mapper,
batch_size](size_t element_position) -> absl::StatusOr<size_t> {
size_t batch_element_position = element_position / batch_size;
size_t input_element_offset = element_position % batch_size;
TF_ASSIGN_OR_RETURN(size_t shuffled_element_position,
parent_index_mapper(batch_element_position));
return shuffled_element_position * batch_size + input_element_offset;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args), dataset()->batch_size_);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (ctx->restored_element_count().has_value()) {
IteratorContext::Params params(ctx);
params.restored_element_count =
*ctx->restored_element_count() * dataset()->batch_size_;
IteratorContext ctx_copy(params);
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(&ctx_copy, reader, input_impl_));
ctx->MergeCheckpoint(ctx_copy.checkpoint());
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return dataset()->traceme_metadata_;
}
private:
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
};
const int64_t batch_size_;
const int64_t reserve_size_;
const bool drop_remainder_;
const bool parallel_copy_;
const DatasetBase* const input_;
const int op_version_;
std::vector<PartialTensorShape> output_shapes_;
absl::Status random_indexing_compatible_;
const TraceMeMetadata traceme_metadata_;
};
BatchDatasetOp::BatchDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx),
op_version_(ctx->def().op() == kBatchDataset ? 1 : 2) {
if (ctx->HasAttr(kParallelCopy)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kParallelCopy, ¶llel_copy_));
}
}
void BatchDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t batch_size = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBatchSize, &batch_size));
OP_REQUIRES(ctx, batch_size > 0,
errors::InvalidArgument("Batch size must be greater than zero."));
bool drop_remainder = false;
if (op_version_ > 1) {
OP_REQUIRES_OK(
ctx, ParseScalarArgument<bool>(ctx, kDropRemainder, &drop_remainder));
}
*output = new Dataset(ctx, batch_size, drop_remainder, parallel_copy_, input,
op_version_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("BatchDataset").Device(DEVICE_CPU),
BatchDatasetOp);
REGISTER_KERNEL_BUILDER(Name("BatchDatasetV2").Device(DEVICE_CPU),
BatchDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/batch_dataset_op.h"
#include <string>
#include "tensorflow/core/common_runtime/type_inference.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "batch_dataset";
class BatchDatasetOpTest : public DatasetOpsTestBase {};
BatchDatasetParams BatchDatasetParams1() {
return BatchDatasetParams(RangeDatasetParams(0, 12, 1),
4,
false,
true,
{DT_INT64},
{PartialTensorShape({4})},
kNodeName);
}
BatchDatasetParams BatchDatasetParams2() {
return BatchDatasetParams(RangeDatasetParams(0, 12, 1),
4,
true,
false,
{DT_INT64},
{PartialTensorShape({4})},
kNodeName);
}
BatchDatasetParams BatchDatasetParams3() {
return BatchDatasetParams(RangeDatasetParams(0, 10, 1),
3,
false,
false,
{DT_INT64},
{PartialTensorShape({-1})},
kNodeName);
}
BatchDatasetParams BatchDatasetParams4() {
return BatchDatasetParams(RangeDatasetParams(0, 10, 1),
3,
true,
true,
{DT_INT64},
{PartialTensorShape({3})},
kNodeName);
}
BatchDatasetParams BatchDatasetParams5() {
return BatchDatasetParams(RangeDatasetParams(0, 10, 1),
12,
true,
true,
{DT_INT64},
{PartialTensorShape({12})},
kNodeName);
}
BatchDatasetParams BatchDatasetParams6() {
return BatchDatasetParams(RangeDatasetParams(0, 10, 1),
12,
false,
true,
{DT_INT64},
{PartialTensorShape({-1})},
kNodeName);
}
BatchDatasetParams BatchDatasetParams7() {
return BatchDatasetParams(RangeDatasetParams(0, 0, 1),
4,
false,
false,
{DT_INT64},
{PartialTensorShape({4})},
kNodeName);
}
BatchDatasetParams InvalidBatchSizeBatchDatasetParams() {
return BatchDatasetParams(RangeDatasetParams(0, 10, 1),
-1,
false,
false,
{DT_INT64},
{PartialTensorShape({3})},
kNodeName);
}
std::vector<GetNextTestCase<BatchDatasetParams>> GetNextTestCases() {
return {{BatchDatasetParams1(),
CreateTensors<int64_t>(
TensorShape({4}), {{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{BatchDatasetParams2(),
CreateTensors<int64_t>(
TensorShape({4}), {{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{BatchDatasetParams3(),
{CreateTensor<int64_t>(TensorShape({3}), {0, 1, 2}),
CreateTensor<int64_t>(TensorShape({3}), {3, 4, 5}),
CreateTensor<int64_t>(TensorShape({3}), {6, 7, 8}),
CreateTensor<int64_t>(TensorShape({1}), {9})}},
{BatchDatasetParams4(),
CreateTensors<int64_t>(TensorShape({3}),
{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})},
{BatchDatasetParams5(),
{}},
{BatchDatasetParams6(),
CreateTensors<int64_t>(TensorShape({10}),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}})},
{BatchDatasetParams7(),
{}}};
}
ITERATOR_GET_NEXT_TEST_P(BatchDatasetOpTest, BatchDatasetParams,
GetNextTestCases())
TEST_F(BatchDatasetOpTest, DatasetNodeName) {
auto batch_dataset_params = BatchDatasetParams1();
TF_ASSERT_OK(Initialize(batch_dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(batch_dataset_params.node_name()));
}
TEST_F(BatchDatasetOpTest, DatasetTypeString) {
auto batch_dataset_params = BatchDatasetParams1();
TF_ASSERT_OK(Initialize(batch_dataset_params));
name_utils::OpNameParams params;
params.op_version = batch_dataset_params.op_version();
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(BatchDatasetOp::kDatasetType, params)));
}
TEST_F(BatchDatasetOpTest, DatasetOutputDtypes) {
auto batch_dataset_params = BatchDatasetParams1();
TF_ASSERT_OK(Initialize(batch_dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<BatchDatasetParams>>
DatasetOutputShapesTestCases() {
return {{BatchDatasetParams1(),
{PartialTensorShape({4})}},
{BatchDatasetParams2(),
{PartialTensorShape({4})}},
{BatchDatasetParams3(),
{PartialTensorShape({-1})}},
{BatchDatasetParams4(),
{PartialTensorShape({3})}},
{BatchDatasetParams5(),
{PartialTensorShape({12})}},
{BatchDatasetParams6(),
{PartialTensorShape({-1})}},
{BatchDatasetParams7(),
{PartialTensorShape({4})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(BatchDatasetOpTest, BatchDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<BatchDatasetParams>> CardinalityTestCases() {
return {
{BatchDatasetParams1(), 3},
{BatchDatasetParams2(), 3},
{BatchDatasetParams3(), 4},
{BatchDatasetParams4(), 3},
{BatchDatasetParams5(), 0},
{BatchDatasetParams6(), 1},
{BatchDatasetParams7(), 0}};
}
DATASET_CARDINALITY_TEST_P(BatchDatasetOpTest, BatchDatasetParams,
CardinalityTestCases())
TEST_F(BatchDatasetOpTest, IteratorOutputDtypes) {
auto batch_dataset_params = BatchDatasetParams1();
TF_ASSERT_OK(Initialize(batch_dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<BatchDatasetParams>>
IteratorOutputShapesTestCases() {
return {{BatchDatasetParams1(),
{PartialTensorShape({4})}},
{BatchDatasetParams2(),
{PartialTensorShape({4})}},
{BatchDatasetParams3(),
{PartialTensorShape({-1})}},
{BatchDatasetParams4(),
{PartialTensorShape({3})}},
{BatchDatasetParams5(),
{PartialTensorShape({12})}},
{BatchDatasetParams6(),
{PartialTensorShape({-1})}},
{BatchDatasetParams7(),
{PartialTensorShape({4})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(BatchDatasetOpTest, BatchDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(BatchDatasetOpTest, IteratorOutputPrefix) {
auto batch_dataset_params = BatchDatasetParams1();
TF_ASSERT_OK(Initialize(batch_dataset_params));
name_utils::IteratorPrefixParams params;
params.op_version = batch_dataset_params.op_version();
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
BatchDatasetOp::kDatasetType, batch_dataset_params.iterator_prefix(),
params)));
}
std::vector<IteratorSaveAndRestoreTestCase<BatchDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{BatchDatasetParams1(),
{0, 1, 5},
CreateTensors<int64_t>(
TensorShape({4}), {{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{BatchDatasetParams2(),
{0, 1, 5},
CreateTensors<int64_t>(
TensorShape({4}), {{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{BatchDatasetParams3(),
{0, 1, 5},
{CreateTensor<int64_t>(TensorShape({3}), {0, 1, 2}),
CreateTensor<int64_t>(TensorShape({3}), {3, 4, 5}),
CreateTensor<int64_t>(TensorShape({3}), {6, 7, 8}),
CreateTensor<int64_t>(TensorShape({1}), {9})}},
{BatchDatasetParams4(),
{0, 1, 5},
{CreateTensor<int64_t>(TensorShape({3}), {0, 1, 2}),
CreateTensor<int64_t>(TensorShape({3}), {3, 4, 5}),
CreateTensor<int64_t>(TensorShape({3}), {6, 7, 8})}},
{BatchDatasetParams5(),
{0, 1, 5},
{}},
{BatchDatasetParams6(),
{0, 1, 5},
{CreateTensor<int64_t>(TensorShape({10}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})}},
{BatchDatasetParams7(),
{0, 1, 5},
{}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(BatchDatasetOpTest, BatchDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(BatchDatasetOpTest, InvalidBatchSize) {
auto batch_dataset_params = InvalidBatchSizeBatchDatasetParams();
EXPECT_EQ(Initialize(batch_dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
REGISTER_OP("BatchDatasetOpTest>ConstTypeCtor")
.Output("output: dtype")
.Attr("value: tensor")
.Attr("dtype: type")
.SetTypeConstructor(full_type::Unary(TFT_TENSOR, "dtype"));
static void add_identity_nodes(Node* node, Graph& graph,
std::vector<Node*>& identity_nodes) {
for (int i = 0; i < node->num_outputs(); i++) {
Node* new_node;
std::string name = absl::StrCat("Identity", i);
TF_EXPECT_OK(NodeBuilder(name, "Identity")
.Attr("T", node->output_type(i))
.Input(node, i)
.Finalize(&graph, &new_node));
identity_nodes.push_back(new_node);
}
}
static Status type_inference(Graph& graph) {
GraphOptimizationPassOptions opt_options;
std::unique_ptr<Graph> graph_ptr(new Graph(OpRegistry::Global()));
graph_ptr->Copy(graph);
opt_options.graph = &graph_ptr;
opt_options.flib_def = graph.mutable_flib_def();
TypeInferencePass pass;
return pass.Run(opt_options);
}
TEST(BatchDatsetOpTest, TypeInference) {
Graph graph(OpRegistry::Global());
Node* input_dataset;
Node* batch_size;
Node* drop_remainder;
Node* batch_dataset_v2;
FullTypeDef input_dataset_t;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_PRODUCT
args {
type_id: TFT_RAGGED
args { type_id: TFT_STRING }
}
}
})pb",
&input_dataset_t));
TensorProto tensor_proto;
TF_EXPECT_OK(NodeBuilder("input_dataset", "Const")
.Attr("value", tensor_proto)
.Attr("dtype", DT_VARIANT)
.Finalize(&graph, &input_dataset));
(*input_dataset->mutable_def()->mutable_experimental_type()) =
input_dataset_t;
TF_EXPECT_OK(NodeBuilder("batch_size", "BatchDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_INT64)
.Finalize(&graph, &batch_size));
TF_EXPECT_OK(NodeBuilder("drop_remainder", "BatchDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_BOOL)
.Finalize(&graph, &drop_remainder));
TF_EXPECT_OK(NodeBuilder("BatchDatasetV2", "BatchDatasetV2")
.Attr("output_types", {DT_VARIANT})
.Attr("output_shapes", {TensorShape({1})})
.Input(input_dataset)
.Input(batch_size)
.Input(drop_remainder)
.Finalize(&graph, &batch_dataset_v2));
std::vector<Node*> identity_nodes;
add_identity_nodes(batch_dataset_v2, graph, identity_nodes);
TF_EXPECT_OK(type_inference(graph));
EXPECT_TRUE(full_type::IsEqual(identity_nodes[0]->def().experimental_type(),
input_dataset_t))
<< "fulltype is\n"
<< identity_nodes[0]->def().experimental_type().DebugString()
<< "\nexpected\n"
<< input_dataset_t.DebugString();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/batch_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/batch_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b4f270fd-bf69-449f-b2f8-d9f027296279 | cpp | tensorflow/tensorflow | value_inference | third_party/xla/xla/hlo/builder/value_inference.cc | third_party/xla/xla/tests/value_inference_test.cc | #include "xla/hlo/builder/value_inference.h"
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/hash/hash.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
Literal CreatePredLiteral(bool pred, const Shape& reference_shape) {
if (reference_shape.IsTuple()) {
std::vector<Literal> sub_literals;
const auto& reference_shape_tuple_shapes = reference_shape.tuple_shapes();
sub_literals.reserve(reference_shape_tuple_shapes.size());
for (const Shape& shape : reference_shape_tuple_shapes) {
sub_literals.emplace_back(CreatePredLiteral(pred, shape));
}
return Literal::MoveIntoTuple(absl::MakeSpan(sub_literals));
}
PrimitiveType element_type = reference_shape.element_type();
if (element_type == TOKEN) {
return LiteralUtil::CreateR0(pred);
}
Literal literal = LiteralUtil::CreateR0(pred);
Literal literal_broadcast =
literal.Broadcast(ShapeUtil::ChangeElementType(reference_shape, PRED), {})
.value();
return literal_broadcast;
}
Literal CreateS64Literal(int64_t value, const Shape& reference_shape) {
if (reference_shape.IsTuple()) {
std::vector<Literal> sub_literals;
const auto& reference_shape_tuple_shapes = reference_shape.tuple_shapes();
sub_literals.reserve(reference_shape_tuple_shapes.size());
for (const Shape& shape : reference_shape_tuple_shapes) {
sub_literals.emplace_back(CreateS64Literal(value, shape));
}
return Literal::MoveIntoTuple(absl::MakeSpan(sub_literals));
}
PrimitiveType element_type = reference_shape.element_type();
if (element_type == TOKEN) {
return LiteralUtil::CreateToken();
}
Literal literal = LiteralUtil::CreateR0<int64_t>(value);
return literal
.Broadcast(ShapeUtil::ChangeElementType(reference_shape, S64), {})
.value();
}
Literal CreateGarbageLiteral(const Shape& reference_shape) {
if (reference_shape.IsTuple()) {
std::vector<Literal> sub_literals;
for (const Shape& shape : reference_shape.tuple_shapes()) {
sub_literals.emplace_back(CreateGarbageLiteral(shape));
}
return Literal::MoveIntoTuple(absl::MakeSpan(sub_literals));
}
PrimitiveType element_type = reference_shape.element_type();
if (element_type == TOKEN) {
return LiteralUtil::CreateToken();
}
Literal literal = LiteralUtil::One(element_type);
return literal.Broadcast(reference_shape, {}).value();
}
struct HloProtoEvaluator {
explicit HloProtoEvaluator(HloEvaluator& evaluator, HloInstructionProto inst)
: evaluator(evaluator),
inst(std::move(inst)),
module("EmptyModuleForEvaluation", HloModuleConfig()) {}
HloProtoEvaluator& WithComputation(
std::unique_ptr<HloComputation> new_computation) {
computation = new_computation.get();
computation->ClearUniqueIdInternal();
for (HloInstruction* inst : computation->instructions()) {
inst->ClearUniqueIdInternal();
}
module.AddEmbeddedComputation(std::move(new_computation));
return *this;
}
HloProtoEvaluator& WithPrimitiveType(PrimitiveType new_primitive_type) {
primitive_type = new_primitive_type;
return *this;
}
HloProtoEvaluator& WithOpCode(HloOpcode new_opcode) {
opcode = new_opcode;
return *this;
}
HloProtoEvaluator& WithOperands(absl::Span<Literal> operands) {
this->operands = operands;
return *this;
}
HloProtoEvaluator& WithSubshape(ShapeIndex shape_index) {
this->shape_index = std::move(shape_index);
return *this;
}
absl::StatusOr<Literal> Evaluate() {
HloComputation::Builder builder("EmptyComputation");
absl::flat_hash_map<int64_t, HloInstruction*> operand_map;
for (int64_t i = 0; i < inst.operand_ids_size(); ++i) {
int64_t operand_handle = inst.operand_ids(i);
std::unique_ptr<HloInstruction> operand =
HloInstruction::CreateConstant(operands[i].Clone());
operand_map[operand_handle] = operand.get();
builder.AddInstruction(std::move(operand));
}
if (primitive_type.has_value()) {
*inst.mutable_shape() = ShapeUtil::ChangeElementType(
Shape(inst.shape()), primitive_type.value())
.ToProto();
}
if (opcode.has_value()) {
*inst.mutable_opcode() = std::string(HloOpcodeString(opcode.value()));
}
absl::flat_hash_map<int64_t, HloComputation*> computation_map;
if (inst.called_computation_ids_size() != 0) {
TF_RET_CHECK(inst.called_computation_ids_size() == 1 &&
computation != nullptr)
<< inst.DebugString();
computation_map[inst.called_computation_ids(0)] = computation;
}
TF_ASSIGN_OR_RETURN(
auto new_instruction,
HloInstruction::CreateFromProto(inst, operand_map, computation_map));
new_instruction->ClearUniqueIdInternal();
builder.AddInstruction(std::move(new_instruction));
auto computation = builder.Build();
module.AddEntryComputation(std::move(computation));
if (shape_index.empty()) {
return evaluator.Evaluate(module.entry_computation()->root_instruction());
} else {
TF_ASSIGN_OR_RETURN(
auto result,
evaluator.Evaluate(module.entry_computation()->root_instruction()));
return result.SubLiteral(this->shape_index);
}
}
HloEvaluator& evaluator;
HloInstructionProto inst;
HloModule module;
absl::Span<Literal> operands;
ShapeIndex shape_index = {};
HloComputation* computation = nullptr;
std::optional<PrimitiveType> primitive_type = std::nullopt;
std::optional<HloOpcode> opcode = std::nullopt;
};
enum PostorderDFSNodeType {
kConstantValue = 0,
kConstantUpperBound,
kConstantLowerBound,
kValueIsDynamic,
kBoundIsDynamic,
};
std::string PostorderDFSNodeTypeToString(PostorderDFSNodeType type) {
switch (type) {
case kConstantValue:
return "kConstantValue";
case kConstantUpperBound:
return "kConstantUpperBound";
case kConstantLowerBound:
return "kConstantLowerBound";
case kValueIsDynamic:
return "kValueIsDynamic";
case kBoundIsDynamic:
return "kBoundIsDynamic";
}
}
struct InferenceContext {
explicit InferenceContext(ShapeIndex shape_index,
std::vector<int64_t> caller_operand_handles)
: shape_index(std::move(shape_index)),
caller_operand_handles(std::move(caller_operand_handles)) {}
ShapeIndex shape_index;
std::vector<int64_t> caller_operand_handles;
};
struct PostorderDFSDep {
explicit PostorderDFSDep(int64_t handle, PostorderDFSNodeType type,
InferenceContext context, std::string annotation)
: handle(handle),
type(type),
context(std::move(context)),
annotation(std::move(annotation)) {}
int64_t handle;
PostorderDFSNodeType type;
InferenceContext context;
std::string annotation;
};
using Visit = std::function<absl::StatusOr<Literal>(absl::Span<Literal>)>;
using Visit0D = std::function<absl::StatusOr<Literal>()>;
using Visit1D = std::function<absl::StatusOr<Literal>(Literal)>;
using Visit2D = std::function<absl::StatusOr<Literal>(Literal, Literal)>;
struct [[nodiscard]] PostorderDFSNode {
PostorderDFSNode& AddDependency(int64_t handle, PostorderDFSNodeType type,
InferenceContext context,
std::string annotation = "") {
dependencies.emplace_back(handle, type, std::move(context),
std::move(annotation));
return *this;
}
PostorderDFSNode& AddVisit(const Visit& visit) {
this->visit = visit;
return *this;
}
PostorderDFSNode& AddVisit(const Visit0D& visit) {
this->visit = [visit](absl::Span<Literal> literals) { return visit(); };
return *this;
}
PostorderDFSNode& AddVisit(const Visit1D& visit) {
this->visit = [visit](absl::Span<Literal> literals) {
return visit(std::move(literals[0]));
};
return *this;
}
PostorderDFSNode& AddVisit(const Visit2D& visit) {
this->visit = [visit](absl::Span<Literal> literals) {
return visit(std::move(literals[0]), std::move(literals[1]));
};
return *this;
}
std::vector<PostorderDFSDep> dependencies;
Visit visit;
};
using HandleToInstruction =
std::function<absl::StatusOr<const HloInstructionProto*>(int64_t)>;
using HandleToComputation = std::function<const HloComputationProto*(int64_t)>;
struct PostorderDFSVisitor {
PostorderDFSVisitor(HloEvaluator& evaluator,
HandleToInstruction handle_to_instruction,
HandleToComputation handle_to_computation)
: evaluator(evaluator),
handle_to_instruction(handle_to_instruction),
handle_to_computation(handle_to_computation) {}
absl::StatusOr<PostorderDFSNode> AnalyzeUpperBound(int64_t handle,
InferenceContext context);
absl::StatusOr<PostorderDFSNode> AnalyzeLowerBound(int64_t handle,
InferenceContext context);
absl::StatusOr<PostorderDFSNode> AnalyzeIsDynamic(int64_t handle,
PostorderDFSNodeType type,
InferenceContext context);
absl::StatusOr<PostorderDFSNode> AnalyzeConstant(int64_t handle,
InferenceContext context);
absl::StatusOr<PostorderDFSNode> AnalyzeConstantValueFallback(
int64_t handle, PostorderDFSNodeType type, InferenceContext context);
absl::StatusOr<Literal> PostOrderDFSVisit(int64_t handle,
PostorderDFSNodeType type);
bool IsValueEffectiveInteger(int64_t handle) {
const HloInstructionProto* instr = handle_to_instruction(handle).value();
if (primitive_util::IsIntegralType(instr->shape().element_type())) {
return true;
}
HloOpcode opcode = StringToHloOpcode(instr->opcode()).value();
if (opcode != HloOpcode::kConvert) {
return false;
}
const HloInstructionProto* parent =
handle_to_instruction(instr->operand_ids(0)).value();
if (primitive_util::IsIntegralType(parent->shape().element_type())) {
return true;
}
return false;
}
bool IsInstructionOverLimit(const HloInstructionProto* proto,
const InferenceContext& context) {
auto subshape = std::make_unique<Shape>(
ShapeUtil::GetSubshape(Shape(proto->shape()), context.shape_index));
if (subshape->IsArray() &&
ShapeUtil::ElementsIn(*subshape) > kLargeShapeElementLimit) {
return true;
}
HloOpcode opcode = StringToHloOpcode(proto->opcode()).value();
for (int64_t operand_id : proto->operand_ids()) {
const HloInstructionProto* operand =
handle_to_instruction(operand_id).value();
auto operand_shape = std::make_unique<Shape>(operand->shape());
if (operand_shape->IsArray() &&
ShapeUtil::ElementsIn(*operand_shape) > kLargeShapeElementLimit &&
opcode != HloOpcode::kGetDimensionSize &&
opcode != HloOpcode::kSetDimensionSize) {
return true;
}
}
return false;
}
struct CacheKey {
CacheKey(int64_t handle, InferenceContext context,
PostorderDFSNodeType type)
: handle(handle), context(context), type(type) {}
int64_t handle;
InferenceContext context;
PostorderDFSNodeType type;
template <typename H>
friend H AbslHashValue(H h, const CacheKey& key) {
h = H::combine(std::move(h), key.handle);
h = H::combine(std::move(h), key.context.shape_index.ToString());
h = H::combine(std::move(h),
VectorString(key.context.caller_operand_handles));
h = H::combine(std::move(h), key.type);
return h;
}
friend bool operator==(const CacheKey& lhs, const CacheKey& rhs) {
return lhs.handle == rhs.handle &&
lhs.context.shape_index == rhs.context.shape_index &&
lhs.context.caller_operand_handles ==
rhs.context.caller_operand_handles &&
lhs.type == rhs.type;
}
};
HloEvaluator& evaluator;
absl::flat_hash_map<CacheKey, Literal> evaluated;
HandleToInstruction handle_to_instruction;
HandleToComputation handle_to_computation;
static constexpr int64_t kLargeShapeElementLimit = 1000 * 1000;
};
PostorderDFSNode CreateAllDynamicResult(const Shape& shape,
const PostorderDFSNodeType& type) {
return PostorderDFSNode().AddVisit(
[shape, type](absl::Span<Literal>) -> Literal {
if (type == PostorderDFSNodeType::kConstantValue ||
type == PostorderDFSNodeType::kConstantUpperBound ||
type == PostorderDFSNodeType::kConstantLowerBound) {
return CreateGarbageLiteral(shape);
} else {
return CreatePredLiteral(true, shape);
}
});
}
}
absl::StatusOr<PostorderDFSNode>
PostorderDFSVisitor::AnalyzeConstantValueFallback(int64_t handle,
PostorderDFSNodeType type,
InferenceContext context) {
TF_ASSIGN_OR_RETURN(const HloInstructionProto* root,
handle_to_instruction(handle));
TF_ASSIGN_OR_RETURN(HloOpcode opcode, StringToHloOpcode(root->opcode()));
Shape subshape =
ShapeUtil::GetSubshape(Shape(root->shape()), context.shape_index);
PostorderDFSNode result;
for (auto operand_id : root->operand_ids()) {
InferenceContext dep_context = context;
dep_context.shape_index = {};
result.AddDependency(operand_id, type, dep_context);
}
switch (opcode) {
case HloOpcode::kRng:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kInfeed:
case HloOpcode::kOutfeed:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kCustomCall:
case HloOpcode::kWhile:
case HloOpcode::kSend:
case HloOpcode::kRecv:
case HloOpcode::kSendDone:
case HloOpcode::kRecvDone:
case HloOpcode::kParameter: {
if (opcode == HloOpcode::kParameter &&
!context.caller_operand_handles.empty()) {
int64_t caller_operand = context.caller_operand_handles.back();
context.caller_operand_handles.pop_back();
return result.AddDependency(caller_operand, type, context)
.AddVisit([](Literal literal) { return literal; });
}
return CreateAllDynamicResult(subshape, type);
}
case HloOpcode::kSubtract:
case HloOpcode::kCos:
case HloOpcode::kSin:
case HloOpcode::kTan:
case HloOpcode::kNegate:
case HloOpcode::kAbs:
case HloOpcode::kDivide:
case HloOpcode::kGetDimensionSize: {
return InvalidArgument(
"AnalyzeConstantValueFallback can't handle opcode: %s",
root->opcode());
}
case HloOpcode::kCall: {
auto node = PostorderDFSNode();
auto* call_proto = root;
if (call_proto->operand_ids_size() != 1) {
return CreateAllDynamicResult(subshape, type);
}
int64_t called_root =
handle_to_computation(call_proto->called_computation_ids(0))
->root_id();
InferenceContext call_context = context;
call_context.caller_operand_handles.push_back(call_proto->operand_ids(0));
node.AddDependency(called_root, PostorderDFSNodeType::kConstantValue,
call_context, "callee's root instruction");
return node.AddVisit([](Literal operand) -> absl::StatusOr<Literal> {
return std::move(operand);
});
}
case HloOpcode::kConditional: {
auto node = PostorderDFSNode();
auto* conditional_proto = root;
InferenceContext predicate_context = context;
predicate_context.shape_index = {};
node.AddDependency(conditional_proto->operand_ids(0),
PostorderDFSNodeType::kConstantValue,
predicate_context)
.AddDependency(conditional_proto->operand_ids(0),
PostorderDFSNodeType::kValueIsDynamic,
predicate_context);
const int64_t branch_size =
conditional_proto->called_computation_ids_size();
for (int64_t i = 0; i < branch_size; ++i) {
int64_t branch_root =
handle_to_computation(conditional_proto->called_computation_ids(i))
->root_id();
InferenceContext branch_context = context;
branch_context.caller_operand_handles.push_back(
conditional_proto->operand_ids(i + 1));
node.AddDependency(branch_root, PostorderDFSNodeType::kConstantValue,
branch_context);
}
return node.AddVisit(
[](absl::Span<Literal> operands) -> absl::StatusOr<Literal> {
int64_t pred_is_dynamic = operands[1].Get<bool>({});
if (pred_is_dynamic) {
return std::move(operands[2]);
} else {
int64_t branch_index = 0;
if (operands[0].shape().element_type() == PRED) {
if (operands[0].Get<bool>({})) {
branch_index = 0;
} else {
branch_index = 1;
}
} else {
branch_index = operands[0].GetIntegralAsS64({}).value();
}
const int64_t branch_dynamism_index = 2 + branch_index;
return std::move(operands[branch_dynamism_index]);
}
});
}
case HloOpcode::kGetTupleElement: {
int64_t operand_handle = root->operand_ids(0);
PostorderDFSNode result;
context.shape_index.push_front(root->tuple_index());
return PostorderDFSNode()
.AddDependency(operand_handle, type, context)
.AddVisit([](Literal operand) { return operand; });
}
case HloOpcode::kReduce:
case HloOpcode::kSort:
case HloOpcode::kScatter:
case HloOpcode::kReduceWindow: {
const HloComputationProto* computation_proto =
handle_to_computation(root->called_computation_ids(0));
return result.AddVisit(
[root, computation_proto, context,
this](absl::Span<Literal> operands) -> absl::StatusOr<Literal> {
TF_ASSIGN_OR_RETURN(
auto computation,
HloComputation::CreateFromProto(*computation_proto, {}));
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.WithComputation(std::move(computation))
.WithSubshape(context.shape_index)
.Evaluate();
});
}
default: {
if (opcode == HloOpcode::kTuple && !context.shape_index.empty()) {
int64_t tuple_operand_index = context.shape_index.front();
InferenceContext tuple_operand_context = context;
tuple_operand_context.shape_index.pop_front();
return PostorderDFSNode()
.AddDependency(root->operand_ids(tuple_operand_index), type,
tuple_operand_context)
.AddVisit([](Literal operand) { return operand; });
}
return result.AddVisit([root, this](absl::Span<Literal> operands) {
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.Evaluate();
});
}
}
}
absl::StatusOr<PostorderDFSNode> PostorderDFSVisitor::AnalyzeUpperBound(
int64_t handle, InferenceContext context) {
TF_ASSIGN_OR_RETURN(const HloInstructionProto* root,
handle_to_instruction(handle));
TF_ASSIGN_OR_RETURN(HloOpcode opcode, StringToHloOpcode(root->opcode()));
Shape subshape =
ShapeUtil::GetSubshape(Shape(root->shape()), context.shape_index);
if (IsInstructionOverLimit(root, context)) {
return CreateAllDynamicResult(subshape,
PostorderDFSNodeType::kConstantUpperBound);
}
switch (opcode) {
case HloOpcode::kGetDimensionSize: {
int64_t dimension = root->dimensions(0);
int64_t operand_handle = root->operand_ids(0);
const HloInstructionProto* operand_proto =
handle_to_instruction(operand_handle).value();
return PostorderDFSNode().AddVisit(
[operand_proto, dimension]() -> absl::StatusOr<Literal> {
return LiteralUtil::CreateR0<int32_t>(
operand_proto->shape().dimensions(dimension));
});
}
case HloOpcode::kAbs: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantLowerBound, context)
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantUpperBound, context)
.AddVisit([this](Literal lower_bound,
Literal upper_bound) -> absl::StatusOr<Literal> {
TF_ASSIGN_OR_RETURN(auto lower_bound_abs,
evaluator.EvaluateElementwiseUnaryOp(
HloOpcode::kAbs, lower_bound));
TF_ASSIGN_OR_RETURN(auto upper_bound_abs,
evaluator.EvaluateElementwiseUnaryOp(
HloOpcode::kAbs, upper_bound));
return evaluator.EvaluateElementwiseBinaryOp(
HloOpcode::kMaximum, lower_bound_abs, upper_bound_abs);
});
}
case HloOpcode::kSort: {
auto dfs = PostorderDFSNode();
InferenceContext dep_context = context;
dep_context.shape_index = {};
if (!context.shape_index.empty()) {
dfs.AddDependency(root->operand_ids(context.shape_index[0]),
PostorderDFSNodeType::kConstantUpperBound,
dep_context);
} else {
for (int64_t i = 0; i < root->operand_ids_size(); ++i) {
dfs.AddDependency(root->operand_ids(i),
PostorderDFSNodeType::kConstantUpperBound,
dep_context);
}
}
return dfs.AddVisit(
[root,
context](absl::Span<Literal> operands) -> absl::StatusOr<Literal> {
std::vector<Literal> results;
results.reserve(operands.size());
for (int64_t i = 0; i < operands.size(); ++i) {
auto max = LiteralUtil::MaxElement(operands[i]);
results.emplace_back(
max.Broadcast(operands[i].shape(), {}).value());
}
if (ShapeUtil::GetSubshape(Shape(root->shape()),
context.shape_index)
.IsTuple()) {
return LiteralUtil::MakeTupleOwned(std::move(results));
} else {
return std::move(results[0]);
}
});
}
case HloOpcode::kNegate: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantLowerBound, context)
.AddVisit([this](Literal lower_bound) -> absl::StatusOr<Literal> {
return evaluator.EvaluateElementwiseUnaryOp(HloOpcode::kNegate,
lower_bound);
});
}
case HloOpcode::kSubtract:
case HloOpcode::kDivide: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantUpperBound, context)
.AddDependency(root->operand_ids(1),
PostorderDFSNodeType::kConstantLowerBound, context)
.AddVisit([root, opcode, this](
Literal upper_bound,
Literal lower_bound) -> absl::StatusOr<Literal> {
if (opcode == HloOpcode::kDivide &&
this->IsValueEffectiveInteger(root->operand_ids(1))) {
auto zero = LiteralUtil::Zero(lower_bound.shape().element_type());
zero = zero.Broadcast(lower_bound.shape(), {}).value();
TF_ASSIGN_OR_RETURN(
auto lower_bound_is_zero,
evaluator.EvaluateElementwiseCompareOp(
ComparisonDirection::kEq, lower_bound, zero));
auto one = LiteralUtil::One(lower_bound.shape().element_type());
one = one.Broadcast(lower_bound.shape(), {}).value();
TF_ASSIGN_OR_RETURN(
lower_bound, evaluator.EvaluateElementwiseTernaryOp(
HloOpcode::kSelect, lower_bound_is_zero, one,
lower_bound));
}
std::vector<Literal> new_operands;
new_operands.emplace_back(std::move(upper_bound));
new_operands.emplace_back(std::move(lower_bound));
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(absl::MakeSpan(new_operands))
.Evaluate();
});
}
case HloOpcode::kCustomCall: {
if (root->custom_call_target() == "SetBound") {
return PostorderDFSNode().AddVisit([root]() -> absl::StatusOr<Literal> {
if (root->literal().shape().element_type() == TUPLE) {
return Literal::CreateFromProto(root->literal().tuple_literals(0));
} else {
return Literal::CreateFromProto(root->literal());
}
});
} else if (root->custom_call_target() == "Sharding") {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantUpperBound, context)
.AddVisit([](Literal operand) { return operand; });
}
return InvalidArgument(
"Upper-bound inferencing on custom call %s is not supported",
root->DebugString());
}
case HloOpcode::kGather: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantUpperBound, context)
.AddDependency(root->operand_ids(1),
PostorderDFSNodeType::kConstantValue, context)
.AddVisit([root, this](absl::Span<Literal> operands) {
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.Evaluate();
});
}
default:
return AnalyzeConstantValueFallback(
handle, PostorderDFSNodeType::kConstantUpperBound, context);
}
}
absl::StatusOr<PostorderDFSNode> PostorderDFSVisitor::AnalyzeLowerBound(
int64_t handle, InferenceContext context) {
TF_ASSIGN_OR_RETURN(const HloInstructionProto* root,
handle_to_instruction(handle));
TF_ASSIGN_OR_RETURN(HloOpcode opcode, StringToHloOpcode(root->opcode()));
Shape subshape =
ShapeUtil::GetSubshape(Shape(root->shape()), context.shape_index);
if (IsInstructionOverLimit(root, context)) {
return CreateAllDynamicResult(subshape,
PostorderDFSNodeType::kConstantLowerBound);
}
switch (opcode) {
case HloOpcode::kGetDimensionSize: {
int64_t dimension = root->dimensions(0);
int64_t operand_handle = root->operand_ids(0);
TF_ASSIGN_OR_RETURN(const HloInstructionProto* operand_proto,
handle_to_instruction(operand_handle));
return PostorderDFSNode().AddVisit(
[dimension, operand_proto]() -> absl::StatusOr<Literal> {
if (operand_proto->shape().is_dynamic_dimension(dimension)) {
return LiteralUtil::CreateR0<int32_t>(0);
} else {
return LiteralUtil::CreateR0<int32_t>(
operand_proto->shape().dimensions(dimension));
}
});
}
case HloOpcode::kAbs: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantLowerBound, context)
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantUpperBound, context)
.AddVisit([this](Literal lower_bound,
Literal upper_bound) -> absl::StatusOr<Literal> {
TF_ASSIGN_OR_RETURN(auto lower_bound_abs,
evaluator.EvaluateElementwiseUnaryOp(
HloOpcode::kAbs, lower_bound));
TF_ASSIGN_OR_RETURN(auto upper_bound_abs,
evaluator.EvaluateElementwiseUnaryOp(
HloOpcode::kAbs, upper_bound));
return evaluator.EvaluateElementwiseBinaryOp(
HloOpcode::kMinimum, lower_bound_abs, upper_bound_abs);
});
}
case HloOpcode::kNegate: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantUpperBound, context)
.AddVisit([this](Literal upper_bound) -> absl::StatusOr<Literal> {
return evaluator.EvaluateElementwiseUnaryOp(HloOpcode::kNegate,
upper_bound);
});
}
case HloOpcode::kSubtract:
case HloOpcode::kDivide: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantLowerBound, context)
.AddDependency(root->operand_ids(1),
PostorderDFSNodeType::kConstantUpperBound, context)
.AddVisit(
[root,
this](absl::Span<Literal> operands) -> absl::StatusOr<Literal> {
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.Evaluate();
});
}
case HloOpcode::kGather: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantLowerBound, context)
.AddDependency(root->operand_ids(1),
PostorderDFSNodeType::kConstantValue, context)
.AddVisit([root, this](absl::Span<Literal> operands) {
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.Evaluate();
});
}
default:
return AnalyzeConstantValueFallback(
handle, PostorderDFSNodeType::kConstantLowerBound, context);
}
}
absl::StatusOr<PostorderDFSNode> PostorderDFSVisitor::AnalyzeConstant(
int64_t handle, InferenceContext context) {
TF_ASSIGN_OR_RETURN(const HloInstructionProto* root,
handle_to_instruction(handle));
HloOpcode opcode = StringToHloOpcode(root->opcode()).value();
Shape subshape =
ShapeUtil::GetSubshape(Shape(root->shape()), context.shape_index);
if (IsInstructionOverLimit(root, context)) {
return CreateAllDynamicResult(subshape,
PostorderDFSNodeType::kConstantValue);
}
switch (opcode) {
case HloOpcode::kGetDimensionSize: {
int64_t dimension = root->dimensions(0);
int64_t operand_handle = root->operand_ids(0);
TF_ASSIGN_OR_RETURN(const HloInstructionProto* operand_proto,
handle_to_instruction(operand_handle));
return PostorderDFSNode().AddVisit(
[operand_proto, dimension, root]() -> absl::StatusOr<Literal> {
if (operand_proto->shape().is_dynamic_dimension(dimension)) {
return CreateGarbageLiteral(Shape(root->shape()));
} else {
return LiteralUtil::CreateR0<int32_t>(
operand_proto->shape().dimensions(dimension));
}
});
}
case HloOpcode::kSubtract:
case HloOpcode::kCos:
case HloOpcode::kSin:
case HloOpcode::kNegate:
case HloOpcode::kAbs:
case HloOpcode::kDivide: {
PostorderDFSNode result;
for (auto operand_id : root->operand_ids()) {
result.AddDependency(operand_id, PostorderDFSNodeType::kConstantValue,
context);
}
return result.AddVisit(
[root,
this](absl::Span<Literal> operands) -> absl::StatusOr<Literal> {
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.Evaluate();
});
}
case HloOpcode::kCustomCall: {
if (root->custom_call_target() == "SetBound") {
return PostorderDFSNode().AddVisit([root]() -> absl::StatusOr<Literal> {
if (root->literal().shape().element_type() == TUPLE) {
return Literal::CreateFromProto(root->literal().tuple_literals(0));
} else {
return Literal::CreateFromProto(root->literal());
}
});
} else if (root->custom_call_target() == "Sharding") {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantValue, context)
.AddVisit([](Literal operand) { return operand; });
} else {
return PostorderDFSNode().AddVisit(
[root, context](absl::Span<Literal>) {
return CreateGarbageLiteral(ShapeUtil::GetSubshape(
Shape(root->shape()), context.shape_index));
});
}
}
case HloOpcode::kSort: {
PostorderDFSNode result;
InferenceContext dep_context = context;
dep_context.shape_index = {};
for (auto operand_id : root->operand_ids()) {
result.AddDependency(operand_id, PostorderDFSNodeType::kConstantValue,
dep_context);
}
const HloComputationProto* computation_proto =
handle_to_computation(root->called_computation_ids(0));
return result.AddVisit(
[root, context, computation_proto,
this](absl::Span<Literal> operands) -> absl::StatusOr<Literal> {
TF_ASSIGN_OR_RETURN(
auto computation,
HloComputation::CreateFromProto(*computation_proto, {}));
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.WithComputation(std::move(computation))
.WithSubshape(context.shape_index)
.Evaluate();
});
}
default:
return AnalyzeConstantValueFallback(
handle, PostorderDFSNodeType::kConstantValue, context);
}
}
absl::StatusOr<PostorderDFSNode> PostorderDFSVisitor::AnalyzeIsDynamic(
int64_t handle, PostorderDFSNodeType type, InferenceContext context) {
TF_RETURN_IF_ERROR(handle_to_instruction(handle).status());
TF_RET_CHECK(handle_to_instruction(handle).value());
VLOG(1) << "Analyzing IsDynamic on "
<< handle_to_instruction(handle).value()->DebugString();
if (IsInstructionOverLimit(handle_to_instruction(handle).value(), context)) {
return CreateAllDynamicResult(
ShapeUtil::GetSubshape(
Shape(handle_to_instruction(handle).value()->shape()),
context.shape_index),
type);
}
TF_ASSIGN_OR_RETURN(const HloInstructionProto* root,
handle_to_instruction(handle));
TF_ASSIGN_OR_RETURN(HloOpcode opcode, StringToHloOpcode(root->opcode()));
PostorderDFSNode result;
for (auto operand_id : root->operand_ids()) {
InferenceContext dep_context = context;
dep_context.shape_index = {};
result.AddDependency(operand_id, type, dep_context);
}
switch (opcode) {
case HloOpcode::kGetDimensionSize: {
int64_t dimension = root->dimensions(0);
int64_t operand_handle = root->operand_ids(0);
TF_ASSIGN_OR_RETURN(const HloInstructionProto* operand_proto,
handle_to_instruction(operand_handle));
return PostorderDFSNode().AddVisit(
[operand_proto, dimension, type]() -> absl::StatusOr<Literal> {
if (type == PostorderDFSNodeType::kBoundIsDynamic) {
return LiteralUtil::CreateR0<bool>(false);
}
return LiteralUtil::CreateR0<bool>(
operand_proto->shape().is_dynamic_dimension(dimension));
});
}
case HloOpcode::kSort: {
auto dfs = PostorderDFSNode();
InferenceContext dep_context = context;
dep_context.shape_index = {};
for (int64_t i = 0; i < root->operand_ids_size(); ++i) {
dfs.AddDependency(root->operand_ids(i), type, dep_context);
}
return dfs.AddVisit([root, context, type](absl::Span<Literal> operands)
-> absl::StatusOr<Literal> {
bool all_operands_values_static = true;
for (int64_t i = 0; i < operands.size(); ++i) {
all_operands_values_static &= operands[i].IsAll(0);
}
if (type == PostorderDFSNodeType::kValueIsDynamic) {
return CreatePredLiteral(!all_operands_values_static,
ShapeUtil::GetSubshape(Shape(root->shape()),
context.shape_index));
}
CHECK(type == PostorderDFSNodeType::kBoundIsDynamic);
if (!context.shape_index.empty()) {
int64_t index = context.shape_index[0];
bool all_values_static = operands[index].IsAll(0);
return CreatePredLiteral(!all_values_static, operands[index].shape());
}
std::vector<Literal> results;
results.reserve(operands.size());
for (int64_t i = 0; i < operands.size(); ++i) {
bool all_values_static = operands[i].IsAll(0);
results.emplace_back(
CreatePredLiteral(!all_values_static, operands[i].shape()));
}
if (!ShapeUtil::GetSubshape(Shape(root->shape()), context.shape_index)
.IsTuple()) {
return std::move(results[0]);
}
return LiteralUtil::MakeTupleOwned(std::move(results));
});
}
case HloOpcode::kSetDimensionSize:
return result.AddVisit([root, type](absl::Span<Literal> operands) {
bool any_dynamic_operand = absl::c_any_of(
operands, [](Literal& operand) { return !operand.IsAll(0); });
return CreatePredLiteral(
type == PostorderDFSNodeType::kValueIsDynamic &&
any_dynamic_operand,
ShapeUtil::MakeStaticShape(Shape(root->shape())));
});
case HloOpcode::kDynamicSlice: {
return result.AddVisit([root](absl::Span<Literal> operands) {
bool any_dynamic_operand = absl::c_any_of(
operands, [](Literal& operand) { return !operand.IsAll(0); });
return CreatePredLiteral(any_dynamic_operand, Shape(root->shape()));
});
}
case HloOpcode::kAbs:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kBitcast:
case HloOpcode::kCeil:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kCos:
case HloOpcode::kClz:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFloor:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kNot:
case HloOpcode::kNegate:
case HloOpcode::kPopulationCount:
case HloOpcode::kReal:
case HloOpcode::kRsqrt:
case HloOpcode::kLogistic:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kConvert:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kTan:
case HloOpcode::kTanh: {
return result.AddVisit([](Literal operand) { return operand; });
}
case HloOpcode::kAdd:
case HloOpcode::kAtan2:
case HloOpcode::kDivide:
case HloOpcode::kComplex:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kPower:
case HloOpcode::kRemainder:
case HloOpcode::kSubtract:
case HloOpcode::kCompare:
case HloOpcode::kAnd:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical: {
return result.AddVisit([root, this](absl::Span<Literal> operands) {
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.WithPrimitiveType(PRED)
.WithOpCode(HloOpcode::kOr)
.Evaluate();
});
}
case HloOpcode::kTuple:
case HloOpcode::kTranspose:
case HloOpcode::kSlice:
case HloOpcode::kBroadcast:
case HloOpcode::kReverse:
case HloOpcode::kConcatenate:
case HloOpcode::kReshape:
case HloOpcode::kPad: {
if (opcode == HloOpcode::kTuple && !context.shape_index.empty()) {
int64_t tuple_operand_index = context.shape_index.front();
InferenceContext tuple_operand_context = context;
tuple_operand_context.shape_index.pop_front();
return PostorderDFSNode()
.AddDependency(root->operand_ids(tuple_operand_index), type,
tuple_operand_context)
.AddVisit([](Literal operand) { return operand; });
}
return result.AddVisit([root, this](absl::Span<Literal> operands) {
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.WithPrimitiveType(PRED)
.Evaluate();
});
}
case HloOpcode::kCall: {
auto node = PostorderDFSNode();
auto* call_proto = root;
if (call_proto->operand_ids_size() != 1) {
return CreateAllDynamicResult(
Shape(handle_to_instruction(handle).value()->shape()), type);
}
int64_t call_root =
handle_to_computation(call_proto->called_computation_ids(0))
->root_id();
InferenceContext branch_context = context;
branch_context.caller_operand_handles.push_back(
call_proto->operand_ids(0));
node.AddDependency(call_root, PostorderDFSNodeType::kValueIsDynamic,
branch_context, "callee's root instruction");
return node.AddVisit(
[context](Literal operand) -> absl::StatusOr<Literal> {
return operand;
});
}
case HloOpcode::kConditional: {
auto node = PostorderDFSNode();
auto* conditional_proto = root;
InferenceContext predicate_context = context;
predicate_context.shape_index = {};
node.AddDependency(conditional_proto->operand_ids(0),
PostorderDFSNodeType::kConstantValue,
predicate_context)
.AddDependency(conditional_proto->operand_ids(0),
PostorderDFSNodeType::kValueIsDynamic,
predicate_context);
const int64_t branch_size =
conditional_proto->called_computation_ids_size();
for (int64_t i = 0; i < branch_size; ++i) {
int64_t branch_root =
handle_to_computation(conditional_proto->called_computation_ids(i))
->root_id();
InferenceContext branch_context = context;
branch_context.caller_operand_handles.push_back(
conditional_proto->operand_ids(i + 1));
node.AddDependency(branch_root, PostorderDFSNodeType::kConstantValue,
branch_context,
absl::StrFormat("branch %lld's value", i))
.AddDependency(branch_root, PostorderDFSNodeType::kValueIsDynamic,
branch_context,
absl::StrFormat("branch %lld's dynamism", i));
}
return node.AddVisit([root, branch_size,
context](absl::Span<Literal> operands)
-> absl::StatusOr<Literal> {
int64_t pred_is_dynamic = operands[1].Get<bool>({});
auto result = CreatePredLiteral(
true,
ShapeUtil::GetSubshape(Shape(root->shape()), context.shape_index));
if (pred_is_dynamic) {
VLOG(1) << "predict is dynamic value" << result.ToString();
result.MutableEachCell<bool>(
[&](absl::Span<const int64_t> indices, bool value) {
std::string branch_value = operands[2].GetAsString(indices, {});
for (int64_t i = 0; i < branch_size; ++i) {
const int64_t branch_value_index = 2 + 2 * i;
const int64_t branch_dynamism_index = 2 + 2 * i + 1;
auto branch_is_dynamic =
operands[branch_dynamism_index].Get<bool>(indices);
if (branch_is_dynamic) {
return true;
}
if (branch_value !=
operands[branch_value_index].GetAsString(indices, {})) {
return true;
}
}
return false;
});
return result;
} else {
VLOG(1) << "predict is constant value";
int64_t branch_index = 0;
if (operands[0].shape().element_type() == PRED) {
if (operands[0].Get<bool>({})) {
branch_index = 0;
} else {
branch_index = 1;
}
} else {
branch_index = operands[0].GetIntegralAsS64({}).value();
}
const int64_t branch_dynamism_index = 2 + 2 * branch_index + 1;
return std::move(operands[branch_dynamism_index]);
}
});
}
case HloOpcode::kGetTupleElement: {
int64_t operand_handle = root->operand_ids(0);
PostorderDFSNode result;
context.shape_index.push_front(root->tuple_index());
return PostorderDFSNode()
.AddDependency(operand_handle, type, context)
.AddVisit([](Literal operand) { return operand; });
}
case HloOpcode::kReduce: {
return result.AddVisit(
[root, context, this](absl::Span<Literal> operands) {
Shape root_shape = Shape(root->shape());
Shape scalar_shape = ShapeUtil::MakeScalarShape(xla::PRED);
std::unique_ptr<HloComputation> reduce_or;
if (root_shape.IsTuple()) {
HloComputation::Builder b("reduce_or");
auto accum = b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<bool>(false)));
for (int i = 0; i < root_shape.tuple_shapes_size(); ++i) {
auto lhs = b.AddInstruction(
HloInstruction::CreateParameter(i, scalar_shape, "lhs"));
auto rhs = b.AddInstruction(HloInstruction::CreateParameter(
i + root_shape.tuple_shapes_size(), scalar_shape, "rhs"));
accum = b.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kOr, accum, lhs));
accum = b.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kOr, accum, rhs));
}
std::vector<HloInstruction*> results(
root_shape.tuple_shapes_size(), accum);
b.AddInstruction(HloInstruction::CreateTuple(results));
reduce_or = b.Build();
} else {
HloComputation::Builder b("reduce_or");
auto lhs = b.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "lhs"));
auto rhs = b.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "rhs"));
b.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kOr, lhs, rhs));
reduce_or = b.Build();
}
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.WithPrimitiveType(PRED)
.WithComputation(std::move(reduce_or))
.WithSubshape(context.shape_index)
.Evaluate();
});
}
case HloOpcode::kConstant:
case HloOpcode::kIota: {
return result.AddVisit(
[root]() { return CreatePredLiteral(false, Shape(root->shape())); });
}
case HloOpcode::kParameter: {
if (opcode == HloOpcode::kParameter &&
!context.caller_operand_handles.empty()) {
int64_t caller_operand = context.caller_operand_handles.back();
context.caller_operand_handles.pop_back();
return result.AddDependency(caller_operand, type, context)
.AddVisit([](Literal literal) { return literal; });
}
return result.AddVisit([root, context]() {
return CreatePredLiteral(
true,
ShapeUtil::GetSubshape(Shape(root->shape()), context.shape_index));
});
}
case HloOpcode::kSelect: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantValue, context)
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kValueIsDynamic, context)
.AddDependency(root->operand_ids(1), type, context)
.AddDependency(root->operand_ids(2), type, context)
.AddVisit([root](absl::Span<Literal> operands)
-> absl::StatusOr<Literal> {
OptionalLiteral optional_selector_literal(std::move(operands[0]),
std::move(operands[1]));
Literal lhs = std::move(operands[2]);
Literal rhs = std::move(operands[3]);
auto result = CreatePredLiteral(true, Shape(root->shape()));
result.MutableEachCell<bool>(
[&](absl::Span<const int64_t> indices, bool value) {
std::optional<bool> optional_selector =
optional_selector_literal.Get<bool>(indices);
bool lhs_value = lhs.Get<bool>(indices);
bool rhs_value = rhs.Get<bool>(indices);
if (optional_selector.has_value()) {
if (*optional_selector) {
return lhs_value;
} else {
return rhs_value;
}
} else {
return true;
}
});
return result;
});
}
case HloOpcode::kGather: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0), type, context)
.AddDependency(root->operand_ids(1),
PostorderDFSNodeType::kConstantValue, context)
.AddDependency(root->operand_ids(1),
PostorderDFSNodeType::kValueIsDynamic, context)
.AddVisit(
[root,
this](absl::Span<Literal> operands) -> absl::StatusOr<Literal> {
OptionalLiteral optional_selector_literal(
std::move(operands[1]), std::move(operands[2]));
if (!optional_selector_literal.AllValid()) {
return CreatePredLiteral(true, Shape(root->shape()));
}
std::vector<Literal> new_operands;
new_operands.emplace_back(std::move(operands[0]));
new_operands.emplace_back(
optional_selector_literal.GetValue()->Clone());
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(absl::MakeSpan(new_operands))
.WithPrimitiveType(PRED)
.Evaluate();
});
}
case HloOpcode::kCustomCall: {
if (root->custom_call_target() == "SetBound") {
return PostorderDFSNode().AddVisit([type,
root]() -> absl::StatusOr<Literal> {
if (type == PostorderDFSNodeType::kBoundIsDynamic) {
return CreatePredLiteral(false, Shape(root->shape()));
} else {
if (root->literal().shape().element_type() == TUPLE) {
return Literal::CreateFromProto(
root->literal().tuple_literals(1));
} else if (type == PostorderDFSNodeType::kValueIsDynamic) {
return CreatePredLiteral(true, Shape(root->shape()));
} else {
return Literal::CreateFromProto(root->literal());
}
}
});
} else if (root->custom_call_target() == "Sharding") {
return result.AddVisit([](Literal operand) { return operand; });
} else {
return InvalidArgument(
"Dynamic inferencing on custom call %s is not supported",
root->DebugString());
}
break;
}
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kWhile: {
return PostorderDFSNode().AddVisit([root, context]()
-> absl::StatusOr<Literal> {
return CreatePredLiteral(
true,
ShapeUtil::GetSubshape(Shape(root->shape()), context.shape_index));
});
break;
}
default:
return PostorderDFSNode().AddVisit([root, context]()
-> absl::StatusOr<Literal> {
return CreatePredLiteral(
true,
ShapeUtil::GetSubshape(Shape(root->shape()), context.shape_index));
});
}
}
absl::StatusOr<Literal> PostorderDFSVisitor::PostOrderDFSVisit(
int64_t handle, PostorderDFSNodeType type) {
enum VisitState {
kUnvisited = 0,
kVisiting,
kVisited,
};
int64_t unique_id = 0;
struct WorkItem {
explicit WorkItem(int64_t handle, InferenceContext context,
PostorderDFSNodeType type, VisitState state, int64_t id)
: handle(handle),
context(std::move(context)),
type(type),
state(state),
id(id) {}
int64_t handle;
InferenceContext context;
PostorderDFSNodeType type;
VisitState state;
Visit visit;
int64_t id;
std::vector<CacheKey> dependencies;
CacheKey GetCacheKey() { return CacheKey(handle, context, type); }
};
std::vector<WorkItem> stack;
WorkItem root(handle, InferenceContext({}, {}), type, kUnvisited,
unique_id++);
stack.push_back(root);
while (!stack.empty()) {
WorkItem& item = stack.back();
VLOG(1) << "stack top shape index: " << item.context.shape_index.ToString();
if (VLOG_IS_ON(1)) {
TF_RETURN_IF_ERROR(handle_to_instruction(item.handle).status());
VLOG(1) << "stack top "
<< handle_to_instruction(item.handle).value()->DebugString();
}
if (item.state == kVisiting) {
VLOG(1) << "visiting";
std::vector<Literal> literals;
literals.reserve(item.dependencies.size());
for (CacheKey& dep_key : item.dependencies) {
TF_RET_CHECK(evaluated.contains(dep_key));
literals.emplace_back(evaluated.at(dep_key).Clone());
}
VLOG(1) << "Start visiting with dependency type: "
<< PostorderDFSNodeTypeToString(item.type);
TF_ASSIGN_OR_RETURN(auto literal, item.visit(absl::MakeSpan(literals)));
VLOG(1) << "End visiting: " << literal.ToString();
evaluated[item.GetCacheKey()] = std::move(literal);
stack.pop_back();
continue;
}
VLOG(1) << "unvisited";
if (evaluated.contains(item.GetCacheKey())) {
stack.pop_back();
continue;
}
item.state = kVisiting;
PostorderDFSNode node;
switch (item.type) {
case PostorderDFSNodeType::kConstantValue: {
VLOG(1) << "constant value";
TF_ASSIGN_OR_RETURN(node, AnalyzeConstant(item.handle, item.context));
break;
}
case PostorderDFSNodeType::kConstantLowerBound: {
VLOG(1) << "constant lower bound";
TF_ASSIGN_OR_RETURN(node, AnalyzeLowerBound(item.handle, item.context));
break;
}
case PostorderDFSNodeType::kConstantUpperBound: {
VLOG(1) << "constant upper bound";
TF_ASSIGN_OR_RETURN(node, AnalyzeUpperBound(item.handle, item.context));
break;
}
case PostorderDFSNodeType::kBoundIsDynamic:
case PostorderDFSNodeType::kValueIsDynamic: {
VLOG(1) << "value is dynamic";
TF_ASSIGN_OR_RETURN(
node, AnalyzeIsDynamic(item.handle, item.type, item.context));
break;
}
}
item.visit = node.visit;
const int64_t current_item_id = stack.size() - 1;
for (const PostorderDFSDep& dep : node.dependencies) {
TF_ASSIGN_OR_RETURN(auto dependency_inst,
handle_to_instruction(dep.handle));
VLOG(1) << "dependency " << dep.annotation
<< "::" << dependency_inst->DebugString() << "index"
<< dep.context.shape_index << " stack size:" << stack.size();
stack.emplace_back(dep.handle, dep.context, dep.type, kUnvisited,
unique_id++);
stack[current_item_id].dependencies.push_back(stack.back().GetCacheKey());
}
}
VLOG(1) << "done" << evaluated[root.GetCacheKey()].ToString();
return evaluated[root.GetCacheKey()].Clone();
}
absl::StatusOr<Literal> ValueInference::AnalyzeIsDynamic(XlaOp op) {
PostorderDFSVisitor visitor(
evaluator_,
[&](int64_t handle) {
return builder_->LookUpInstructionByHandle(handle);
},
[&](int64_t handle) { return &(builder_->embedded_[handle]); });
auto result = visitor.PostOrderDFSVisit(
op.handle(), PostorderDFSNodeType::kValueIsDynamic);
return result;
}
absl::StatusOr<std::optional<int64_t>> ValueInference::CseOpHandle(
int64_t handle) {
TF_ASSIGN_OR_RETURN(auto inst, builder_->LookUpInstructionByHandle(handle));
TF_ASSIGN_OR_RETURN(HloOpcode opcode, StringToHloOpcode(inst->opcode()));
if (opcode != HloOpcode::kGetDimensionSize) {
return {std::nullopt};
}
int64_t hash = absl::HashOf(inst->operand_ids(0), inst->dimensions(0));
auto lookup = cse_map_.find(hash);
if (lookup == cse_map_.end()) {
cse_map_[hash] = handle;
return {std::nullopt};
}
TF_ASSIGN_OR_RETURN(auto equivalent_op,
builder_->LookUpInstructionByHandle(lookup->second));
if (equivalent_op->opcode() != inst->opcode() ||
equivalent_op->operand_ids(0) != inst->operand_ids(0) ||
equivalent_op->dimensions(0) != inst->dimensions(0)) {
return {std::nullopt};
}
int64_t cse = lookup->second;
if (handle != cse) {
return {cse};
}
return {std::nullopt};
}
absl::StatusOr<Literal> ValueInference::SimplifyOp(int64_t handle) {
TF_ASSIGN_OR_RETURN(auto cse_handle, CseOpHandle(handle));
if (cse_handle) {
return SimplifyOp(*cse_handle);
}
TF_ASSIGN_OR_RETURN(auto* inst, builder_->LookUpInstructionByHandle(handle));
TF_ASSIGN_OR_RETURN(HloOpcode opcode, StringToHloOpcode(inst->opcode()));
std::vector<Literal> operands;
auto output_shape = std::make_unique<const Shape>(inst->shape());
switch (opcode) {
case HloOpcode::kSlice:
case HloOpcode::kConcatenate:
case HloOpcode::kReshape:
case HloOpcode::kBroadcast: {
for (auto operand_id : inst->operand_ids()) {
TF_ASSIGN_OR_RETURN(auto literal, SimplifyOp(operand_id));
operands.emplace_back(std::move(literal));
}
return std::make_unique<HloProtoEvaluator>(evaluator_, *inst)
->WithOperands(absl::MakeSpan(operands))
.WithPrimitiveType(S64)
.Evaluate();
}
case HloOpcode::kConvert: {
auto operand =
builder_->LookUpInstructionByHandle(inst->operand_ids(0)).value();
if (Shape::Equal()(*output_shape, Shape(operand->shape()))) {
return SimplifyOp(inst->operand_ids(0));
} else {
return CreateS64Literal(-1, *output_shape);
}
}
case HloOpcode::kAdd: {
if (output_shape->rank() == 0) {
TF_ASSIGN_OR_RETURN(auto lhs, SimplifyOp(inst->operand_ids(0)));
TF_ASSIGN_OR_RETURN(auto rhs, SimplifyOp(inst->operand_ids(1)));
int64_t lhs_handle = lhs.Get<int64_t>({});
int64_t rhs_handle = rhs.Get<int64_t>({});
if (lhs_handle == -1 || rhs_handle == -1) {
return CreateS64Literal(-1, *output_shape);
}
std::function<std::optional<int64_t>(int64_t, int64_t)>
can_be_optimized;
can_be_optimized = [this, &can_be_optimized](
int64_t lhs,
int64_t rhs) -> std::optional<int64_t> {
auto rhs_inst = builder_->LookUpInstructionByHandle(rhs).value();
HloOpcode rhs_opcode = StringToHloOpcode(rhs_inst->opcode()).value();
if (rhs_opcode == HloOpcode::kSubtract) {
auto sub_lhs_handle =
SimplifyOp(rhs_inst->operand_ids(0)).value().Get<int64_t>({});
auto sub_rhs_handle =
SimplifyOp(rhs_inst->operand_ids(1)).value().Get<int64_t>({});
if (sub_rhs_handle == lhs) {
return sub_lhs_handle;
}
}
auto lhs_inst = builder_->LookUpInstructionByHandle(lhs).value();
HloOpcode lhs_opcode = StringToHloOpcode(lhs_inst->opcode()).value();
if (lhs_opcode == HloOpcode::kAdd) {
auto add_lhs_handle =
SimplifyOp(lhs_inst->operand_ids(0)).value().Get<int64_t>({});
auto add_rhs_handle =
SimplifyOp(lhs_inst->operand_ids(1)).value().Get<int64_t>({});
if (auto optimized = can_be_optimized(add_lhs_handle, rhs)) {
return Add(XlaOp(add_rhs_handle, builder_),
XlaOp(optimized.value(), builder_))
.handle();
}
if (auto optimized = can_be_optimized(add_rhs_handle, rhs)) {
return Add(XlaOp(add_lhs_handle, builder_),
XlaOp(optimized.value(), builder_))
.handle();
}
}
return std::nullopt;
};
if (auto optimized = can_be_optimized(lhs_handle, rhs_handle)) {
return LiteralUtil::CreateR0<int64_t>(optimized.value());
}
if (auto optimized = can_be_optimized(rhs_handle, lhs_handle)) {
return LiteralUtil::CreateR0<int64_t>(optimized.value());
}
XlaOp new_sum =
Add(XlaOp(lhs_handle, builder_), XlaOp(rhs_handle, builder_));
return LiteralUtil::CreateR0<int64_t>(new_sum.handle());
} else {
return CreateS64Literal(-1, *output_shape);
}
}
default: {
if (ShapeUtil::IsScalar(*output_shape)) {
return LiteralUtil::CreateR0<int64_t>(handle);
} else {
return CreateS64Literal(-1, *output_shape);
}
}
}
}
absl::StatusOr<OptionalLiteral> ValueInference::AnalyzeConstant(
XlaOp op, ValueInferenceMode mode) {
TF_RETURN_IF_ERROR(builder_->LookUpInstructionByHandle(op.handle()).status());
PostorderDFSVisitor visitor(
evaluator_,
[&](int64_t handle) {
return builder_->LookUpInstructionByHandle(handle);
},
[&](int64_t handle) { return &(builder_->embedded_[handle]); });
TF_ASSIGN_OR_RETURN(Shape op_shape, builder_->GetShape(op));
int64_t handle = op.handle();
if (ShapeUtil::IsScalar(builder_->GetShape(op).value())) {
TF_ASSIGN_OR_RETURN(auto result, SimplifyOp(handle));
auto optimized_handle = result.Get<int64_t>({});
if (optimized_handle != -1) {
handle = optimized_handle;
}
}
switch (mode) {
case ValueInferenceMode::kLowerBound: {
TF_ASSIGN_OR_RETURN(Literal mask,
visitor.PostOrderDFSVisit(
handle, PostorderDFSNodeType::kBoundIsDynamic));
if (mask.IsAll(1)) {
return OptionalLiteral(CreateGarbageLiteral(op_shape), std::move(mask));
}
TF_ASSIGN_OR_RETURN(
Literal value,
visitor.PostOrderDFSVisit(handle,
PostorderDFSNodeType::kConstantLowerBound));
return OptionalLiteral(std::move(value), std::move(mask));
}
case ValueInferenceMode::kUpperBound: {
TF_ASSIGN_OR_RETURN(Literal mask,
visitor.PostOrderDFSVisit(
handle, PostorderDFSNodeType::kBoundIsDynamic));
if (mask.IsAll(1)) {
return OptionalLiteral(CreateGarbageLiteral(op_shape), std::move(mask));
}
TF_ASSIGN_OR_RETURN(
Literal value,
visitor.PostOrderDFSVisit(handle,
PostorderDFSNodeType::kConstantUpperBound));
return OptionalLiteral(std::move(value), std::move(mask));
}
case ValueInferenceMode::kValue: {
TF_ASSIGN_OR_RETURN(Literal mask,
visitor.PostOrderDFSVisit(
handle, PostorderDFSNodeType::kValueIsDynamic));
if (mask.IsAll(1)) {
return OptionalLiteral(CreateGarbageLiteral(op_shape), std::move(mask));
}
TF_ASSIGN_OR_RETURN(Literal value,
visitor.PostOrderDFSVisit(
handle, PostorderDFSNodeType::kConstantValue));
return OptionalLiteral(std::move(value), std::move(mask));
}
}
}
} | #include "xla/hlo/builder/value_inference.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/types/span.h"
#include "xla/client/client_library.h"
#include "xla/client/global_data.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/prng.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "xla/tests/test_utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class ValueInferenceTest : public ::testing::Test {
public:
std::string TestName() const {
return ::testing::UnitTest::GetInstance()->current_test_info()->name();
}
};
class DynamismInferenceTest : public ValueInferenceTest {
public:
explicit DynamismInferenceTest(se::Platform* platform = nullptr)
: platform_(platform) {}
absl::StatusOr<Literal> ComputeDynamismLiteral(
XlaOp operand, XlaBuilder* builder, Layout* output_layout = nullptr) {
TF_RETURN_IF_ERROR(builder->first_error());
ValueInference value_inference(builder);
TF_ASSIGN_OR_RETURN(auto literal_slice,
value_inference.AnalyzeIsDynamic(operand));
return literal_slice.Clone();
}
absl::StatusOr<bool> ComputeDynamismScalar(XlaOp operand, XlaBuilder* builder,
ShapeIndex index = {}) {
TF_ASSIGN_OR_RETURN(auto literal,
ComputeDynamismLiteral(operand, builder, nullptr));
return literal.Get<bool>({}, index);
}
se::Platform* platform_;
};
TEST_F(DynamismInferenceTest, ScalarInt32Literal) {
XlaBuilder b(TestName());
auto computation = ConstantR0<int32_t>(&b, 42);
auto value = ComputeDynamismScalar(computation, &b);
ASSERT_TRUE(value.ok()) << value.status();
EXPECT_EQ(value.value(), false);
}
TEST_F(DynamismInferenceTest, Iota) {
XlaBuilder b(TestName());
auto computation = Iota(&b, S32, 2);
EXPECT_FALSE(ComputeDynamismLiteral(computation, &b).value().Get<bool>({0}));
}
TEST_F(DynamismInferenceTest, TupleSimple) {
XlaBuilder b(TestName());
auto c = ConstantR0<int32_t>(&b, 42);
auto p = Parameter(&b, 0, ShapeUtil::MakeScalarShape(S32), "p0");
auto tuple = Tuple(&b, {c, p});
EXPECT_EQ(ComputeDynamismScalar(tuple, &b, {0}).value(), false);
EXPECT_EQ(ComputeDynamismScalar(tuple, &b, {1}).value(), true);
}
TEST_F(DynamismInferenceTest, TupleGteKeepsDynamism) {
XlaBuilder b(TestName());
auto c = ConstantR0<int32_t>(&b, 42);
auto p = Parameter(&b, 0, ShapeUtil::MakeScalarShape(S32), "p0");
auto tuple = Tuple(&b, {c, p});
auto gte0 = GetTupleElement(tuple, 0);
auto gte1 = GetTupleElement(tuple, 1);
auto tuple_2 = Tuple(&b, {gte0, gte1});
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {0}).value(), false);
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {1}).value(), true);
}
TEST_F(DynamismInferenceTest, PredValueUsedTwice) {
XlaBuilder b(TestName());
auto c = ConstantR0<int32_t>(&b, 42);
auto p = Parameter(&b, 0, ShapeUtil::MakeScalarShape(S32), "p0");
auto pred = Eq(c, p);
auto result = Select(pred, p, c);
EXPECT_EQ(ComputeDynamismScalar(result, &b, {}).value(), true);
}
TEST_F(DynamismInferenceTest, ReduceUsedTwice) {
XlaBuilder b(TestName());
auto c = ConstantR0<int32_t>(&b, 42);
auto p = Parameter(&b, 0, ShapeUtil::MakeShape(S32, {2}), "p0");
auto zero = ConstantR0<int32_t>(&b, 0);
XlaComputation add_s32 = CreateScalarAddComputation(S32, &b);
auto reduce = Reduce(p, zero, add_s32, {0});
auto pred = Eq(c, reduce);
auto result = Select(pred, reduce, c);
EXPECT_EQ(ComputeDynamismScalar(result, &b, {}).value(), true);
}
TEST_F(DynamismInferenceTest, VariadicReduce) {
XlaBuilder b(TestName());
auto c = ConstantR2<int32_t>(&b, {{0, 0}});
auto p = Parameter(&b, 0, ShapeUtil::MakeShape(S32, {1, 2}), "p0");
auto half_dynamic = ConcatInDim(&b, {c, p}, 0);
XlaBuilder reduce_add("reduce_add");
auto p0 = Parameter(&reduce_add, 0, ShapeUtil::MakeScalarShape(S32), "p");
auto p1 = Parameter(&reduce_add, 1, ShapeUtil::MakeScalarShape(S32), "p");
auto p2 = Parameter(&reduce_add, 2, ShapeUtil::MakeScalarShape(S32), "p");
auto p3 = Parameter(&reduce_add, 3, ShapeUtil::MakeScalarShape(S32), "p");
auto reduce_result = p0;
reduce_result = Add(reduce_result, p1);
reduce_result = Add(reduce_result, p2);
reduce_result = Add(reduce_result, p3);
Tuple(&reduce_add, {reduce_result, reduce_result});
auto init = ConstantR0<int32_t>(&b, 0);
auto variadic_reduce = Reduce(&b, {half_dynamic, half_dynamic}, {init, init},
reduce_add.Build().value(), {1});
auto result = GetTupleElement(variadic_reduce, 0);
EXPECT_FALSE(ComputeDynamismLiteral(result, &b).value().Get<bool>({0}));
EXPECT_TRUE(ComputeDynamismLiteral(result, &b).value().Get<bool>({1}));
}
TEST_F(DynamismInferenceTest, DynamicSelectorWithMixedValues) {
XlaBuilder b(TestName());
auto constant_pred = ConstantR1<bool>(&b, {true});
auto dynamic_pred = Parameter(&b, 0, ShapeUtil::MakeShape(PRED, {1}), "p0");
auto concat = ConcatInDim(&b, {constant_pred, dynamic_pred}, 0);
auto constant_values = ConstantR1<bool>(&b, {true, true});
auto result = Select(concat, constant_values, constant_values);
EXPECT_FALSE(ComputeDynamismLiteral(result, &b).value().Get<bool>({0}));
EXPECT_TRUE(ComputeDynamismLiteral(result, &b).value().Get<bool>({1}));
}
TEST_F(DynamismInferenceTest, ConcatSliceReshapeKeepsDynamism) {
XlaBuilder b(TestName());
auto c = ConstantR0<int32_t>(&b, 42);
auto p = Parameter(&b, 0, ShapeUtil::MakeScalarShape(S32), "p0");
auto concat = ConcatScalars(&b, {c, p});
auto slice0 = SliceInDim(concat, 0, 1, 1, 0);
auto reshape0 = Reshape(slice0, {});
auto slice1 = SliceInDim(concat, 1, 2, 1, 0);
auto reshape1 = Reshape(slice1, {});
auto tuple_2 = Tuple(&b, {reshape0, reshape1});
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {0}).value(), false);
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {1}).value(), true);
}
TEST_F(DynamismInferenceTest, ParameterIsDynamic) {
XlaBuilder b(TestName());
auto computation = Parameter(&b, 0, ShapeUtil::MakeScalarShape(S32), "p0");
auto value = ComputeDynamismScalar(computation, &b);
ASSERT_TRUE(value.ok()) << value.status();
EXPECT_EQ(value.value(), true);
}
TEST_F(DynamismInferenceTest, UnaryOpKeepsDynamism) {
XlaBuilder b(TestName());
auto c = ConstantR0<int32_t>(&b, 42);
auto p = Parameter(&b, 0, ShapeUtil::MakeScalarShape(S32), "p0");
auto neg0 = Neg(c);
auto neg1 = Neg(p);
auto tuple_2 = Tuple(&b, {neg0, neg1});
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {0}).value(), false);
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {1}).value(), true);
}
TEST_F(DynamismInferenceTest, ParameterWithToken) {
XlaBuilder b(TestName());
auto p =
Parameter(&b, 0,
ShapeUtil::MakeTupleShape({ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeScalarShape(S32)}),
"p0");
EXPECT_EQ(ComputeDynamismScalar(p, &b, {0}).value(), true);
EXPECT_EQ(ComputeDynamismScalar(p, &b, {1}).value(), true);
}
TEST_F(DynamismInferenceTest, BinaryOpsOrsDynamism) {
XlaBuilder b(TestName());
auto c = ConstantR0<int32_t>(&b, 42);
auto p = Parameter(&b, 0, ShapeUtil::MakeScalarShape(S32), "p0");
auto add1 = Add(c, c);
auto add2 = Add(p, c);
auto tuple_2 = Tuple(&b, {add1, add2});
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {0}).value(), false);
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {1}).value(), true);
}
TEST_F(DynamismInferenceTest, GetDimensionSize) {
XlaBuilder b(TestName());
auto p =
Parameter(&b, 0, ShapeUtil::MakeShape(S32, {2, 3}, {true, false}), "p0");
auto gds0 = GetDimensionSize(p, 0);
auto gds1 = GetDimensionSize(p, 1);
auto tuple_2 = Tuple(&b, {gds0, gds1});
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {0}).value(), true);
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {1}).value(), false);
}
TEST_F(DynamismInferenceTest, DynamicSliceWithConstantOperands) {
XlaBuilder b(TestName());
auto constant = ConstantR1<int32_t>(&b, {0, 1, 2, 3});
auto slice_start = ConstantR0(&b, 1);
auto dynamic_slice = DynamicSlice(constant, {slice_start}, {1});
EXPECT_FALSE(
ComputeDynamismLiteral(dynamic_slice, &b).value().Get<bool>({0}));
}
TEST_F(DynamismInferenceTest, GatherWithCommonParent) {
XlaBuilder b(TestName());
Shape indices_shape = ShapeUtil::MakeShape(S32, {2});
auto operand1 = Parameter(&b, 0, indices_shape, "p1");
auto operand2 = Parameter(&b, 1, indices_shape, "p2");
auto indices = Sub(operand1, operand2);
GatherDimensionNumbers dim_numbers;
dim_numbers.add_offset_dims(1);
dim_numbers.add_start_index_map(0);
dim_numbers.set_index_vector_dim(1);
auto gather = Gather(operand1, indices, dim_numbers, {1});
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_TRUE(ComputeDynamismLiteral(gather, &b).value().Get<bool>({0, 0}));
}
TEST_F(DynamismInferenceTest, GatherWithConstantParent) {
XlaBuilder b(TestName());
Shape indices_shape = ShapeUtil::MakeShape(S32, {2});
auto data_operand = ConstantR1<int32_t>(&b, {1, 2});
auto indices = ConstantR1<int32_t>(&b, {1, 2});
GatherDimensionNumbers dim_numbers;
dim_numbers.add_offset_dims(1);
dim_numbers.add_start_index_map(0);
dim_numbers.set_index_vector_dim(1);
auto gather = Gather(data_operand, indices, dim_numbers, {1});
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_FALSE(ComputeDynamismLiteral(gather, &b).value().Get<bool>({0, 0}));
}
TEST_F(DynamismInferenceTest, GatherWithSharedConstantParent) {
XlaBuilder b(TestName());
Shape indices_shape = ShapeUtil::MakeShape(S32, {2});
auto operand1 = ConstantR1<int32_t>(&b, {1, 2});
auto operand2 = ConstantR1<int32_t>(&b, {1, 2});
auto indices = Sub(operand1, operand2);
GatherDimensionNumbers dim_numbers;
dim_numbers.add_offset_dims(1);
dim_numbers.add_start_index_map(0);
dim_numbers.set_index_vector_dim(1);
auto gather = Gather(operand1, indices, dim_numbers, {1});
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_FALSE(ComputeDynamismLiteral(gather, &b).value().Get<bool>({0, 0}));
}
TEST_F(DynamismInferenceTest, InferThroughPad) {
XlaBuilder b(TestName());
auto operand1 = ConstantR1<int32_t>(&b, {1, 2});
auto parameter = Parameter(&b, 0, ShapeUtil::MakeShape(S32, {}), "p0");
PaddingConfig padding_config;
padding_config.add_dimensions()->set_edge_padding_high(1);
auto pad = Pad(operand1, parameter, padding_config);
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_FALSE(ComputeDynamismLiteral(pad, &b).value().Get<bool>({0}));
EXPECT_FALSE(ComputeDynamismLiteral(pad, &b).value().Get<bool>({1}));
EXPECT_TRUE(ComputeDynamismLiteral(pad, &b).value().Get<bool>({2}));
}
TEST_F(DynamismInferenceTest, InferThroughConditionalBranchesAreSame) {
auto s32_shape = ShapeUtil::MakeShape(S32, {});
auto cond_shape = ShapeUtil::MakeTupleShape({s32_shape});
XlaBuilder true_builder("true");
Parameter(&true_builder, 0, s32_shape, "cond_param");
Tuple(&true_builder, {ConstantR0<int32_t>(&true_builder, 1)});
auto true_computation = true_builder.Build().value();
XlaBuilder false_builder("false");
Parameter(&false_builder, 0, s32_shape, "cond_param");
Tuple(&false_builder, {ConstantR0<int32_t>(&false_builder, 1)});
auto false_computation = false_builder.Build().value();
XlaBuilder b(TestName());
auto parameter = Parameter(&b, 0, ShapeUtil::MakeShape(PRED, {}), "p0");
auto constant = ConstantR0<int32_t>(&b, 0);
auto cond = Conditional(parameter, constant, true_computation, constant,
false_computation);
auto gte = GetTupleElement(cond, 0);
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_FALSE(ComputeDynamismLiteral(gte, &b).value().Get<bool>({}));
}
TEST_F(DynamismInferenceTest, InferThroughCall) {
auto s32_shape = ShapeUtil::MakeShape(S32, {});
XlaBuilder call_builder("call");
Parameter(&call_builder, 0, s32_shape, "call_param");
auto call_computation = call_builder.Build().value();
XlaBuilder b(TestName());
auto constant = ConstantR0<int32_t>(&b, 3);
auto call = Call(&b, call_computation, {constant});
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_EQ(ComputeDynamismScalar(call, &b, {}).value(), false);
}
TEST_F(DynamismInferenceTest, InferThroughConditionalBranchesAreNotSame) {
auto s32_shape = ShapeUtil::MakeShape(S32, {});
auto cond_shape = ShapeUtil::MakeTupleShape({s32_shape});
XlaBuilder true_builder("true");
Parameter(&true_builder, 0, s32_shape, "cond_param");
Tuple(&true_builder, {ConstantR0<int32_t>(&true_builder, 1)});
auto true_computation = true_builder.Build().value();
XlaBuilder false_builder("false");
Parameter(&false_builder, 0, s32_shape, "cond_param");
Tuple(&false_builder, {ConstantR0<int32_t>(&false_builder, 2)});
auto false_computation = false_builder.Build().value();
XlaBuilder b(TestName());
auto parameter = Parameter(&b, 0, ShapeUtil::MakeShape(PRED, {}), "p0");
auto constant = ConstantR0<int32_t>(&b, 0);
auto cond = Conditional(parameter, constant, true_computation, constant,
false_computation);
auto gte = GetTupleElement(cond, 0);
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_TRUE(ComputeDynamismLiteral(gte, &b).value().Get<bool>({}));
}
TEST_F(DynamismInferenceTest, InferThroughConditionalPredIsConstantTrueBranch) {
auto s32_shape = ShapeUtil::MakeShape(S32, {});
auto cond_shape = ShapeUtil::MakeTupleShape({s32_shape});
XlaBuilder true_builder("true");
Parameter(&true_builder, 0, s32_shape, "cond_param");
Tuple(&true_builder, {ConstantR0<int32_t>(&true_builder, 0)});
auto true_computation = true_builder.Build().value();
XlaBuilder false_builder("false");
Tuple(&false_builder,
{Parameter(&false_builder, 0, s32_shape, "cond_param")});
auto false_computation = false_builder.Build().value();
XlaBuilder b(TestName());
auto pred = ConstantR0<bool>(&b, true);
auto constant = ConstantR0<int32_t>(&b, 0);
auto cond = Conditional(pred, constant, true_computation, constant,
false_computation);
auto gte = GetTupleElement(cond, 0);
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_FALSE(ComputeDynamismLiteral(gte, &b).value().Get<bool>({}));
}
TEST_F(DynamismInferenceTest,
InferThroughConditionalPredIsConstantFalseBranch) {
auto s32_shape = ShapeUtil::MakeShape(S32, {});
auto cond_shape = ShapeUtil::MakeTupleShape({s32_shape});
XlaBuilder true_builder("true");
Parameter(&true_builder, 0, s32_shape, "cond_param");
Tuple(&true_builder, {ConstantR0<int32_t>(&true_builder, 0)});
auto true_computation = true_builder.Build().value();
XlaBuilder false_builder("false");
Tuple(&false_builder,
{Parameter(&false_builder, 0, s32_shape, "cond_param")});
auto false_computation = false_builder.Build().value();
XlaBuilder b(TestName());
auto param = Parameter(&b, 0, s32_shape, "param");
auto pred = ConstantR0<bool>(&b, false);
auto constant = ConstantR0<int32_t>(&b, 0);
auto cond =
Conditional(pred, constant, true_computation, param, false_computation);
auto gte = GetTupleElement(cond, 0);
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_TRUE(ComputeDynamismLiteral(gte, &b).value().Get<bool>({}));
}
TEST_F(DynamismInferenceTest, ArgumentForwardingNestedTuple) {
auto pred_shape = ShapeUtil::MakeShape(PRED, {});
auto s32_shape = ShapeUtil::MakeShape(S32, {});
auto tuple_shape = ShapeUtil::MakeTupleShape({pred_shape, s32_shape});
auto cond_shape = ShapeUtil::MakeTupleShape({s32_shape});
XlaBuilder inner_true_builder("inner_true");
Parameter(&inner_true_builder, 0, s32_shape, "cond_param");
Tuple(&inner_true_builder, {ConstantR0<int32_t>(&inner_true_builder, 0)});
auto inner_true_computation = inner_true_builder.Build().value();
XlaBuilder inner_false_builder("inner_false");
Tuple(&inner_false_builder,
{Parameter(&inner_false_builder, 0, s32_shape, "cond_param")});
auto inner_false_computation = inner_false_builder.Build().value();
XlaBuilder true_builder("true");
{
auto param = Parameter(&true_builder, 0, tuple_shape, "param");
auto op = GetTupleElement(param, 1);
auto pred = GetTupleElement(param, 0);
Conditional(pred, op, inner_true_computation, op, inner_false_computation);
}
auto true_computation = true_builder.Build().value();
XlaBuilder false_builder("false");
{
auto param = Parameter(&false_builder, 0, tuple_shape, "param");
auto op = GetTupleElement(param, 1);
auto pred = GetTupleElement(param, 0);
Conditional(pred, op, inner_true_computation, op, inner_false_computation);
}
auto false_computation = false_builder.Build().value();
XlaBuilder b(TestName());
auto constant = ConstantR0<int32_t>(&b, 0);
auto pred = Parameter(&b, 0, pred_shape, "param");
auto param = Tuple(&b, {pred, constant});
auto cond =
Conditional(pred, param, true_computation, param, false_computation);
auto gte = GetTupleElement(cond, 0);
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_FALSE(ComputeDynamismLiteral(gte, &b).value().Get<bool>({}));
}
class UpperBoundInferenceTest : public ValueInferenceTest {
public:
explicit UpperBoundInferenceTest(se::Platform* platform = nullptr)
: platform_(platform) {}
absl::StatusOr<OptionalLiteral> ComputeUpperBoundLiteral(
XlaOp operand, XlaBuilder* builder, Layout* output_layout = nullptr) {
ValueInference value_inference(builder);
TF_ASSIGN_OR_RETURN(auto literal,
value_inference.AnalyzeConstant(
operand, ValueInferenceMode::kUpperBound));
return literal;
}
se::Platform* platform_;
};
TEST_F(UpperBoundInferenceTest, GetDimensionSize) {
XlaBuilder b(TestName());
auto p =
Parameter(&b, 0, ShapeUtil::MakeShape(S32, {2, 3}, {true, false}), "p0");
auto gds0 = GetDimensionSize(p, 0);
auto gds1 = GetDimensionSize(p, 1);
auto tuple_2 = Tuple(&b, {gds0, gds1});
EXPECT_EQ(ComputeUpperBoundLiteral(tuple_2, &b).value().Get<int32_t>({}, {0}),
2);
EXPECT_EQ(ComputeUpperBoundLiteral(tuple_2, &b).value().Get<int32_t>({}, {1}),
3);
}
TEST_F(UpperBoundInferenceTest, GetDimensionSizeSub) {
XlaBuilder b(TestName());
auto p =
Parameter(&b, 0, ShapeUtil::MakeShape(S32, {2, 3}, {true, false}), "p0");
auto gds0 = GetDimensionSize(p, 0);
auto gds1 = GetDimensionSize(p, 1);
auto sub = Sub(gds1, gds0);
EXPECT_EQ(ComputeUpperBoundLiteral(sub, &b).value().Get<int32_t>({}), 3);
}
TEST_F(UpperBoundInferenceTest, GetDimensionSizeDiv) {
XlaBuilder b(TestName());
auto p =
Parameter(&b, 0, ShapeUtil::MakeShape(S32, {2, 3}, {true, false}), "p0");
auto gds0 = GetDimensionSize(p, 0);
auto gds1 = GetDimensionSize(p, 1);
auto div = Div(gds1, gds0);
EXPECT_EQ(ComputeUpperBoundLiteral(div, &b).value().Get<int32_t>({}), 3);
}
TEST_F(UpperBoundInferenceTest, SumSubtract) {
XlaBuilder b(TestName());
auto p =
Parameter(&b, 0, ShapeUtil::MakeShape(S32, {2, 3}, {true, true}), "p0");
auto gds0 = GetDimensionSize(p, 0);
auto gds1 = GetDimensionSize(p, 1);
auto sub = Sub(gds1, gds0);
auto add = Add(sub, gds0);
EXPECT_EQ(ComputeUpperBoundLiteral(add, &b).value().Get<int32_t>({}), 3);
auto add2 = Add(gds1, gds0);
auto add3 = Add(sub, add2);
EXPECT_EQ(ComputeUpperBoundLiteral(add3, &b).value().Get<int32_t>({}), 6);
}
TEST_F(UpperBoundInferenceTest, SumSubtractWithDataShuffling) {
XlaBuilder b(TestName());
auto p =
Parameter(&b, 0, ShapeUtil::MakeShape(S32, {2, 3}, {true, true}), "p0");
auto gds0 = GetDimensionSize(p, 0);
auto gds1 = GetDimensionSize(p, 1);
auto broadcast = Broadcast(gds0, {1, 10});
auto convert = ConvertElementType(broadcast, S32);
auto slice = SliceInDim(convert, 0, 1,
1, 1);
gds0 = Reshape(slice, {});
auto sub = Sub(gds1, gds0);
auto add = Add(sub, gds0);
EXPECT_EQ(ComputeUpperBoundLiteral(add, &b).value().Get<int32_t>({}), 3);
auto add2 = Add(gds1, gds0);
auto add3 = Add(sub, add2);
EXPECT_EQ(ComputeUpperBoundLiteral(add3, &b).value().Get<int32_t>({}), 6);
}
TEST_F(UpperBoundInferenceTest, SumSubtractEquivalentGetDimensionSize) {
XlaBuilder b(TestName());
auto p =
Parameter(&b, 0, ShapeUtil::MakeShape(S32, {2, 3}, {true, true}), "p0");
auto gds0 = GetDimensionSize(p, 0);
auto gds1 = GetDimensionSize(p, 1);
auto gds2 = GetDimensionSize(p, 0);
auto sub = Sub(gds1, gds2);
auto add = Add(sub, gds0);
EXPECT_EQ(ComputeUpperBoundLiteral(add, &b).value().Get<int32_t>({}), 3);
}
TEST_F(UpperBoundInferenceTest, ParamCantInferBound) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(S32, {2}, {true}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(S32, {}, {}), "p1");
auto gds = GetDimensionSize(p0, 0);
auto sub = Div(gds, p1);
EXPECT_FALSE(
ComputeUpperBoundLiteral(sub, &b).value().Get<int32_t>({}).has_value());
}
TEST_F(UpperBoundInferenceTest, KeyValueSort) {
XlaBuilder comparator_b("comparator");
auto p0 = Parameter(&comparator_b, 0, ShapeUtil::MakeShape(S32, {}), "p0");
auto p1 = Parameter(&comparator_b, 1, ShapeUtil::MakeShape(S32, {}), "p1");
Parameter(&comparator_b, 2, ShapeUtil::MakeShape(S32, {}), "p2");
Parameter(&comparator_b, 3, ShapeUtil::MakeShape(S32, {}), "p3");
Compare(p0, p1, ComparisonDirection::kGe);
TF_ASSERT_OK_AND_ASSIGN(auto comparator, comparator_b.Build());
int64_t elem_count = 17;
XlaBuilder b(TestName());
auto param = Parameter(&b, 0, ShapeUtil::MakeShape(S32, {elem_count}), "p0");
auto iota = Iota(&b, S32, elem_count);
auto sort = Sort({param, iota}, comparator);
auto gte = GetTupleElement(sort, 1);
for (int64_t i = 0; i < elem_count; ++i) {
auto result_first_elem =
ComputeUpperBoundLiteral(gte, &b).value().Get<int32_t>({i});
EXPECT_TRUE(result_first_elem.has_value());
EXPECT_EQ(result_first_elem.value(), elem_count - 1);
}
}
class ConstValueInferenceTest : public ValueInferenceTest {
public:
explicit ConstValueInferenceTest(se::Platform* platform = nullptr)
: platform_(platform) {}
absl::StatusOr<OptionalLiteral> ComputeConstantValueLiteral(
XlaOp operand, XlaBuilder* builder, Layout* output_layout = nullptr) {
ValueInference value_inference(builder);
TF_ASSIGN_OR_RETURN(auto literal, value_inference.AnalyzeConstant(
operand, ValueInferenceMode::kValue));
return literal;
}
se::Platform* platform_;
};
TEST_F(ConstValueInferenceTest, ConstValuePassThroughSetBound) {
XlaBuilder b(TestName());
auto p0 = ConstantR0<int32_t>(&b, 32);
Shape shape = ShapeUtil::MakeShape(S32, {});
xla::Literal dynamism = xla::LiteralUtil::CreateR0<bool>(false);
xla::Literal bound = xla::LiteralUtil::CreateR0<int32_t>(32);
xla::Literal tuple =
xla::LiteralUtil::MakeTupleOwned(std::move(bound), std::move(dynamism));
auto set_bound =
CustomCall(&b, "SetBound", {p0}, shape, "", false, {}, &tuple);
auto result =
ComputeConstantValueLiteral(set_bound, &b).value().Get<int32_t>({});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), 32);
}
TEST_F(ConstValueInferenceTest, ParamaterValuePassThroughSetBound) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(S32, {}), "p0");
Shape shape = ShapeUtil::MakeShape(S32, {});
xla::Literal dynamism = xla::LiteralUtil::CreateR0<bool>(false);
xla::Literal bound = xla::LiteralUtil::CreateR0<int32_t>(32);
xla::Literal tuple =
xla::LiteralUtil::MakeTupleOwned(std::move(bound), std::move(dynamism));
auto set_bound =
CustomCall(&b, "SetBound", {p0}, shape, "", false, {}, &tuple);
auto result =
ComputeConstantValueLiteral(set_bound, &b).value().Get<int32_t>({});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), 32);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/value_inference.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/value_inference_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
14261d17-de12-429d-884e-9463414a1834 | cpp | tensorflow/tensorflow | resize_bilinear_op | tensorflow/core/kernels/image/resize_bilinear_op.cc | tensorflow/core/kernels/image/resize_bilinear_op_test.cc | #define EIGEN_USE_THREADS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define EIGEN_USE_GPU
#endif
#include "tensorflow/core/kernels/image/resize_bilinear_op.h"
#ifdef __SSE4_1__
#include <xmmintrin.h>
#endif
#include <memory>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/cast_op.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/image_resizer_state.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T>
class ResizeBilinearOp : public OpKernel {
public:
explicit ResizeBilinearOp(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_));
OP_REQUIRES_OK(
context, context->GetAttr("half_pixel_centers", &half_pixel_centers_));
}
void Compute(OpKernelContext* context) override {
ImageResizerState st(align_corners_, half_pixel_centers_);
st.ValidateAndCreateOutput(context);
if (!context->status().ok()) return;
if (st.output->NumElements() == 0) return;
typename TTypes<T, 4>::ConstTensor image_data(
context->input(0).tensor<T, 4>());
TTypes<float, 4>::Tensor output_data = st.output->tensor<float, 4>();
functor::ResizeBilinear<Device, T>()(
context->eigen_device<Device>(), image_data, st.height_scale,
st.width_scale, half_pixel_centers_, output_data);
}
private:
bool align_corners_;
bool half_pixel_centers_;
};
namespace {
struct CachedInterpolation {
int64_t lower;
int64_t upper;
float lerp;
};
template <typename Scaler>
inline void compute_interpolation_weights(const Scaler scaler,
const int64_t out_size,
const int64_t in_size,
const float scale,
CachedInterpolation* interpolation) {
interpolation[out_size].lower = 0;
interpolation[out_size].upper = 0;
for (int64_t i = out_size - 1; i >= 0; --i) {
const float in = scaler(i, scale);
const float in_f = std::floor(in);
interpolation[i].lower =
std::max(static_cast<int64_t>(in_f), static_cast<int64_t>(0));
interpolation[i].upper =
std::min(static_cast<int64_t>(std::ceil(in)), in_size - 1);
interpolation[i].lerp = in - in_f;
}
}
inline float compute_lerp(const float top_left, const float top_right,
const float bottom_left, const float bottom_right,
const float x_lerp, const float y_lerp) {
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp;
return top + (bottom - top) * y_lerp;
}
#ifdef __SSE4_1__
inline __m128 compute_lerp_v(const __m128 top_left, const __m128 top_right,
const __m128 bottom_left,
const __m128 bottom_right, const __m128 x_lerp,
const __m128 y_lerp) {
const __m128 top =
_mm_add_ps(top_left, _mm_mul_ps(_mm_sub_ps(top_right, top_left), x_lerp));
const __m128 bottom = _mm_add_ps(
bottom_left, _mm_mul_ps(_mm_sub_ps(bottom_right, bottom_left), x_lerp));
return _mm_add_ps(top, _mm_mul_ps(_mm_sub_ps(bottom, top), y_lerp));
}
#endif
template <typename T>
void ResizeLineChannels(const T* const ys_input_lower_ptr,
const T* const ys_input_upper_ptr,
const CachedInterpolation* const xs,
const float ys_lerp, const int64_t out_width,
float* out_y, const int channels) {
for (int64_t x = 0; x < out_width; ++x) {
const int64_t xs_lower = xs[x].lower;
const int64_t xs_upper = xs[x].upper;
const float xs_lerp = xs[x].lerp;
for (int c = 0; c < channels; ++c) {
const float top_left(ys_input_lower_ptr[xs_lower + c]);
const float top_right(ys_input_lower_ptr[xs_upper + c]);
const float bottom_left(ys_input_upper_ptr[xs_lower + c]);
const float bottom_right(ys_input_upper_ptr[xs_upper + c]);
out_y[x * channels + c] = compute_lerp(top_left, top_right, bottom_left,
bottom_right, xs_lerp, ys_lerp);
}
}
}
#ifdef __SSE4_1__
template <typename T>
inline __m128 load_3xfloat_v(T* values) {
return _mm_set_ps(0.0f, static_cast<float>(values[2]),
static_cast<float>(values[1]),
static_cast<float>(values[0]));
}
template <>
inline __m128 load_3xfloat_v(float* values) {
return _mm_loadu_ps(values);
}
template <typename T>
void ResizeLine3ChannelsVector(const T* const ys_input_lower_ptr,
const T* const ys_input_upper_ptr,
const CachedInterpolation* const xs,
const float ys_lerp, const int64_t out_width,
float* out_y) {
const __m128 ys_lerp_v = _mm_set1_ps(ys_lerp);
int64_t x = 0;
for (x = 0; x < out_width - 1; ++x) {
const int64_t xs_lower = xs[x].lower;
const int64_t xs_upper = xs[x].upper;
const __m128 xs_lerp_v = _mm_set1_ps(xs[x].lerp);
const __m128 top_left_v = load_3xfloat_v(ys_input_lower_ptr + xs_lower);
const __m128 top_right_v = load_3xfloat_v(ys_input_lower_ptr + xs_upper);
const __m128 bottom_left_v = load_3xfloat_v(ys_input_upper_ptr + xs_lower);
const __m128 bottom_right_v = load_3xfloat_v(ys_input_upper_ptr + xs_upper);
_mm_storeu_ps(out_y + x * 3,
compute_lerp_v(top_left_v, top_right_v, bottom_left_v,
bottom_right_v, xs_lerp_v, ys_lerp_v));
}
ResizeLineChannels(ys_input_lower_ptr, ys_input_upper_ptr, xs + out_width - 1,
ys_lerp, 1, out_y + (out_width - 1) * 3, 3);
}
#endif
template <typename T>
void resize_image(
typename TTypes<T, 4>::ConstTensor images, const int batch_size,
const int64_t in_height, const int64_t in_width, const int64_t out_height,
const int64_t out_width, const int channels,
const std::vector<CachedInterpolation>& xs,
const std::vector<CachedInterpolation>& ys,
typename TTypes<float, 4>::Tensor output) TF_ATTRIBUTE_NOINLINE;
template <typename T>
void resize_image(typename TTypes<T, 4>::ConstTensor images,
const int batch_size, const int64_t in_height,
const int64_t in_width, const int64_t out_height,
const int64_t out_width, const int channels,
const std::vector<CachedInterpolation>& xs_vec,
const std::vector<CachedInterpolation>& ys,
typename TTypes<float, 4>::Tensor output) {
const int64_t in_row_size = in_width * channels;
const int64_t in_batch_num_values = in_height * in_row_size;
const int64_t out_row_size = out_width * channels;
const T* input_b_ptr = images.data();
const CachedInterpolation* xs = xs_vec.data();
if (channels == 3) {
float* output_y_ptr = output.data();
for (int b = 0; b < batch_size; ++b) {
for (int64_t y = 0; y < out_height; ++y) {
const T* ys_input_lower_ptr = input_b_ptr + ys[y].lower * in_row_size;
const T* ys_input_upper_ptr = input_b_ptr + ys[y].upper * in_row_size;
#ifdef __SSE4_1__
ResizeLine3ChannelsVector(ys_input_lower_ptr, ys_input_upper_ptr, xs,
ys[y].lerp, out_width, output_y_ptr);
#else
ResizeLineChannels(ys_input_lower_ptr, ys_input_upper_ptr, xs,
ys[y].lerp, out_width, output_y_ptr, 3);
#endif
output_y_ptr += out_row_size;
}
input_b_ptr += in_batch_num_values;
}
} else {
float* output_y_ptr = output.data();
for (int b = 0; b < batch_size; ++b) {
for (int64_t y = 0; y < out_height; ++y) {
const T* ys_input_lower_ptr = input_b_ptr + ys[y].lower * in_row_size;
const T* ys_input_upper_ptr = input_b_ptr + ys[y].upper * in_row_size;
ResizeLineChannels(ys_input_lower_ptr, ys_input_upper_ptr, xs,
ys[y].lerp, out_width, output_y_ptr, channels);
output_y_ptr += out_row_size;
}
input_b_ptr += in_batch_num_values;
}
}
}
template <typename Device, typename T>
struct CastFloatTo {
void operator()(const Device& d, typename TTypes<float>::ConstFlat input,
typename TTypes<T>::Flat output) {
output.device(d) = input.template cast<T>();
}
};
template <typename T>
struct CastFloatTo<GPUDevice, T> {
void operator()(const GPUDevice& d, typename TTypes<float>::ConstFlat input,
typename TTypes<T>::Flat output) {
functor::CastFunctor<GPUDevice, T, float> cast;
cast(d, output, input);
}
};
}
namespace functor {
template <typename T>
struct ResizeBilinear<CPUDevice, T> {
void operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor images,
const float height_scale, const float width_scale,
bool half_pixel_centers,
typename TTypes<float, 4>::Tensor output) {
const int batch_size = images.dimension(0);
const int64_t in_height = images.dimension(1);
const int64_t in_width = images.dimension(2);
const int channels = images.dimension(3);
const int64_t out_height = output.dimension(1);
const int64_t out_width = output.dimension(2);
if (out_height == in_height && out_width == in_width) {
output = images.template cast<float>();
return;
}
std::vector<CachedInterpolation> ys(out_height + 1);
std::vector<CachedInterpolation> xs(out_width + 1);
if (half_pixel_centers) {
compute_interpolation_weights(HalfPixelScaler(), out_height, in_height,
height_scale, ys.data());
compute_interpolation_weights(HalfPixelScaler(), out_width, in_width,
width_scale, xs.data());
} else {
compute_interpolation_weights(LegacyScaler(), out_height, in_height,
height_scale, ys.data());
compute_interpolation_weights(LegacyScaler(), out_width, in_width,
width_scale, xs.data());
}
for (int i = 0; i < xs.size(); ++i) {
xs[i].lower *= channels;
xs[i].upper *= channels;
}
resize_image<T>(images, batch_size, in_height, in_width, out_height,
out_width, channels, xs, ys, output);
}
};
}
template <typename Device, typename T>
class ResizeBilinearOpGrad : public OpKernel {
public:
explicit ResizeBilinearOpGrad(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_));
OP_REQUIRES_OK(
context, context->GetAttr("half_pixel_centers", &half_pixel_centers_));
}
void Compute(OpKernelContext* context) override {
ImageResizerGradientState st(align_corners_, half_pixel_centers_);
st.ValidateAndCreateOutput(context);
if (!context->status().ok()) return;
TTypes<float, 4>::ConstTensor input_grad =
context->input(0).tensor<float, 4>();
if (!std::is_same<T, Eigen::half>::value &&
!std::is_same<T, Eigen::bfloat16>::value) {
typename TTypes<T, 4>::Tensor output_grad(st.output->tensor<T, 4>());
functor::ResizeBilinearGrad<Device, T>()(
context->eigen_device<Device>(), input_grad, st.height_scale,
st.width_scale, half_pixel_centers_, output_grad);
} else {
Tensor output_grad;
OP_REQUIRES_OK(context, context->allocate_temp(
DT_FLOAT, st.output->shape(), &output_grad));
functor::ResizeBilinearGrad<Device, float>()(
context->eigen_device<Device>(), input_grad, st.height_scale,
st.width_scale, half_pixel_centers_, output_grad.tensor<float, 4>());
const Tensor& output_grad_const = output_grad;
CastFloatTo<Device, T>{}(context->template eigen_device<Device>(),
output_grad_const.template flat<float>(),
st.output->template flat<T>());
}
}
private:
bool align_corners_;
bool half_pixel_centers_;
};
namespace functor {
template <typename T>
struct ResizeBilinearGrad<CPUDevice, T> {
template <typename Scaler>
void ResizeGradCore(const Scaler& scaler,
typename TTypes<float, 4>::ConstTensor input_grad,
const float height_scale, const float width_scale,
typename TTypes<T, 4>::Tensor output_grad) {
const Eigen::Index batch = output_grad.dimension(0);
const Eigen::Index original_height = output_grad.dimension(1);
const Eigen::Index original_width = output_grad.dimension(2);
const Eigen::Index channels = output_grad.dimension(3);
const Eigen::Index resized_height = input_grad.dimension(1);
const Eigen::Index resized_width = input_grad.dimension(2);
output_grad.setZero();
for (Eigen::Index b = 0; b < batch; ++b) {
for (Eigen::Index y = 0; y < resized_height; ++y) {
const float in_y = scaler(y, height_scale);
const Eigen::Index top_y_index =
std::max(static_cast<Eigen::Index>(floorf(in_y)),
static_cast<Eigen::Index>(0));
const Eigen::Index bottom_y_index = std::min(
static_cast<Eigen::Index>(ceilf(in_y)), original_height - 1);
const float y_lerp = in_y - floorf(in_y);
const float inverse_y_lerp = (1.0f - y_lerp);
for (Eigen::Index x = 0; x < resized_width; ++x) {
const float in_x = scaler(x, width_scale);
const Eigen::Index left_x_index =
std::max(static_cast<Eigen::Index>(floorf(in_x)),
static_cast<Eigen::Index>(0));
const Eigen::Index right_x_index = std::min(
static_cast<Eigen::Index>(ceilf(in_x)), original_width - 1);
const float x_lerp = in_x - floorf(in_x);
const float inverse_x_lerp = (1.0f - x_lerp);
for (Eigen::Index c = 0; c < channels; ++c) {
output_grad(b, top_y_index, left_x_index, c) +=
T(input_grad(b, y, x, c) * inverse_y_lerp * inverse_x_lerp);
output_grad(b, top_y_index, right_x_index, c) +=
T(input_grad(b, y, x, c) * inverse_y_lerp * x_lerp);
output_grad(b, bottom_y_index, left_x_index, c) +=
T(input_grad(b, y, x, c) * y_lerp * inverse_x_lerp);
output_grad(b, bottom_y_index, right_x_index, c) +=
T(input_grad(b, y, x, c) * y_lerp * x_lerp);
}
}
}
}
}
void operator()(const CPUDevice& d,
typename TTypes<float, 4>::ConstTensor input_grad,
const float height_scale, const float width_scale,
const bool half_pixel_centers,
typename TTypes<T, 4>::Tensor output_grad) {
if (half_pixel_centers) {
return ResizeGradCore(HalfPixelScaler(), input_grad, height_scale,
width_scale, output_grad);
} else {
return ResizeGradCore(LegacyScaler(), input_grad, height_scale,
width_scale, output_grad);
}
}
};
}
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ResizeBilinear") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeBilinearOp<CPUDevice, T>);
TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#define REGISTER_GRAD_KERNEL(T) \
REGISTER_KERNEL_BUILDER( \
Name("ResizeBilinearGrad").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
ResizeBilinearOpGrad<CPUDevice, T>);
TF_CALL_half(REGISTER_GRAD_KERNEL);
TF_CALL_float(REGISTER_GRAD_KERNEL);
TF_CALL_double(REGISTER_GRAD_KERNEL);
TF_CALL_bfloat16(REGISTER_GRAD_KERNEL);
#undef REGISTER_GRAD_KERNEL
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ResizeBilinear") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeBilinearOp<GPUDevice, T>);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#define REGISTER_GRAD_KERNEL(T) \
REGISTER_KERNEL_BUILDER( \
Name("ResizeBilinearGrad").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
ResizeBilinearOpGrad<GPUDevice, T>);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_GRAD_KERNEL);
#undef REGISTER_GRAD_KERNEL
#endif
} | #include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
enum class TestDevice { CPU, GPU };
class ResizeBilinearOpTestBase
: public OpsTestBase,
public ::testing::WithParamInterface<TestDevice> {
protected:
explicit ResizeBilinearOpTestBase()
: align_corners_(false), half_pixel_centers_(false) {}
void SetUp() override {
if (GetParam() == TestDevice::GPU) {
std::unique_ptr<Device> device_gpu(
DeviceFactory::NewDevice("GPU", {}, "/job:a/replica:0/task:0"));
SetDevice(DEVICE_GPU, std::move(device_gpu));
}
TF_EXPECT_OK(NodeDefBuilder("resize_bilinear_op", "ResizeBilinear")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", align_corners_)
.Attr("half_pixel_centers", half_pixel_centers_)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
const Tensor* SetRandomImageInput(const TensorShape& shape) {
inputs_.clear();
CHECK_EQ(shape.dims(), 4) << "All images must have 4 dimensions.";
bool is_ref = IsRefType(input_types_[inputs_.size()]);
Tensor* input = new Tensor(allocator(), DataTypeToEnum<float>::v(), shape);
input->flat<float>().setRandom();
tensors_.push_back(input);
if (is_ref) {
CHECK_EQ(RemoveRefType(input_types_[inputs_.size()]),
DataTypeToEnum<float>::v());
inputs_.push_back({&lock_for_refs_, input});
} else {
CHECK_EQ(input_types_[inputs_.size()], DataTypeToEnum<float>::v());
inputs_.push_back({nullptr, input});
}
return input;
}
void ResizeBilinearBaseline(TTypes<float, 4>::ConstTensor images,
TTypes<float, 4>::Tensor output) {
const int batch = images.dimension(0);
const int64_t in_height = images.dimension(1);
const int64_t in_width = images.dimension(2);
const int channels = images.dimension(3);
ASSERT_EQ(batch, output.dimension(0));
ASSERT_EQ(channels, output.dimension(3));
const int64_t out_height = output.dimension(1);
const int64_t out_width = output.dimension(2);
const float height_scale = in_height / static_cast<float>(out_height);
const float width_scale = in_width / static_cast<float>(out_width);
for (int b = 0; b < batch; ++b) {
for (int64_t y = 0; y < out_height; ++y) {
const float in_y =
half_pixel_centers_
? (static_cast<float>(y) + 0.5f) * height_scale - 0.5f
: y * height_scale;
const int64_t top_y_index = std::max(static_cast<int64_t>(floorf(in_y)),
static_cast<int64_t>(0));
const int64_t bottom_y_index =
std::min(static_cast<int64_t>(ceilf(in_y)), in_height - 1);
const float y_lerp = in_y - std::floor(in_y);
for (int64_t x = 0; x < out_width; ++x) {
const float in_x =
half_pixel_centers_
? (static_cast<float>(x) + 0.5f) * width_scale - 0.5f
: x * width_scale;
const int64_t left_x_index = std::max(
static_cast<int64_t>(floorf(in_x)), static_cast<int64_t>(0));
const int64_t right_x_index =
std::min(static_cast<int64_t>(ceilf(in_x)), in_width - 1);
const float x_lerp = in_x - std::floor(in_x);
for (int c = 0; c < channels; ++c) {
const float top_left = images(b, top_y_index, left_x_index, c);
const float top_right = images(b, top_y_index, right_x_index, c);
const float bottom_left =
images(b, bottom_y_index, left_x_index, c);
const float bottom_right =
images(b, bottom_y_index, right_x_index, c);
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom =
bottom_left + (bottom_right - bottom_left) * x_lerp;
output(b, y, x, c) = top + (bottom - top) * y_lerp;
}
}
}
}
}
void TestResize(int batch_size, int input_width, int input_height,
int channels, int output_width, int output_height) {
const TensorShape shape({batch_size, input_width, input_height, channels});
const Tensor* input = SetRandomImageInput(shape);
AddInputFromArray<int32>(TensorShape({2}), {output_width, output_height});
TF_ASSERT_OK(RunOpKernel());
std::unique_ptr<Tensor> expected(new Tensor(
allocator(), DataTypeToEnum<float>::v(),
TensorShape({batch_size, output_width, output_height, channels})));
ResizeBilinearBaseline(input->tensor<float, 4>(),
expected->tensor<float, 4>());
test::ExpectClose(*expected, *GetOutput(0), 4e-5);
}
void RunManyRandomTests(int channels) {
for (int batch_size : {1, 2, 5}) {
for (int in_w : {2, 4, 7, 20, 165}) {
for (int in_h : {1, 3, 5, 8, 100, 233}) {
for (int target_height : {1, 2, 3, 50, 113}) {
for (int target_width : {target_height, target_height / 2 + 1}) {
TestResize(batch_size, in_w, in_h, channels, target_width,
target_height);
}
}
}
}
}
}
bool align_corners_;
bool half_pixel_centers_;
};
class ResizeBilinearOpTest : public ResizeBilinearOpTestBase {
public:
ResizeBilinearOpTest() {}
};
class ResizeBilinearHalfPixelCentersOpTest : public ResizeBilinearOpTestBase {
public:
ResizeBilinearHalfPixelCentersOpTest() { half_pixel_centers_ = true; }
};
class ResizeBilinearOpAlignCornersTest : public ResizeBilinearOpTestBase {
public:
ResizeBilinearOpAlignCornersTest() { align_corners_ = true; }
};
TEST_P(ResizeBilinearOpTest, TestResizeRandomDataSeveralInputsSizes1Channel) {
RunManyRandomTests(1);
}
TEST_P(ResizeBilinearOpTest, TestResizeRandomDataSeveralInputsSizes3Channels) {
RunManyRandomTests(3);
}
TEST_P(ResizeBilinearOpTest, TestResizeRandomDataSeveralInputsSizes4Channels) {
RunManyRandomTests(4);
}
TEST_P(ResizeBilinearOpTest, TestBilinear2x2To1x1) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {1.0});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinearRandom2x2To1x1) {
const Tensor* input = SetRandomImageInput(TensorShape({1, 2, 2, 1}));
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
std::unique_ptr<Tensor> expected(new Tensor(
allocator(), DataTypeToEnum<float>::v(), TensorShape({1, 1, 1, 1})));
ResizeBilinearBaseline(input->tensor<float, 4>(),
expected->tensor<float, 4>());
EXPECT_EQ(input->flat<float>()(0), output->flat<float>()(0));
test::ExpectClose(*expected, *output);
}
TEST_P(ResizeBilinearOpAlignCornersTest, TestBilinearAlignCorners2x2To1x1) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {1.0});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear2x2To3x3) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 5.0f / 3, 2,
7.0f / 3, 3, 10.0f / 3,
3, 11.0f / 3, 4});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpAlignCornersTest, TestBilinearAlignCorners2x2To3x3) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 1.5, 2,
2, 2.5, 3,
3, 3.5, 4});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear3x3To2x2) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 2.5,
5.5, 7});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpAlignCornersTest, TestBilinearAlignCorners3x3To2x2) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 3,
7, 9});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear3x3To4x4) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 4, 4, 1}));
test::FillValues<float>(&expected,
{1, 1.75, 2.5, 3,
3.25, 4, 4.75, 5.25,
5.5, 6.25, 7, 7.5,
7, 7.75, 8.5, 9});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear4x4To3x3) {
AddInputFromArray<float>(
TensorShape({1, 4, 4, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 7.0f/3, 11.0f/3,
19.0f/3, 23.0f/3, 27.0f/3,
35.0f/3, 39.0f/3, 43.0f/3});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearHalfPixelCentersOpTest, TestDownsamples) {
TestResize(4, 298, 297, 3, 61, 71);
}
TEST_P(ResizeBilinearHalfPixelCentersOpTest, TestUpsamples) {
TestResize(4, 61, 71, 3, 298, 297);
}
TEST_P(ResizeBilinearOpAlignCornersTest, TestBilinearAlignCorners4x4To3x3) {
AddInputFromArray<float>(
TensorShape({1, 4, 4, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{ 1, 2.5, 4,
7, 8.5, 10,
13, 14.5, 16});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear2x2To3x3Batch2) {
AddInputFromArray<float>(TensorShape({2, 2, 2, 1}), {1, 2, 3, 4, 1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 5.0f/3, 2, 7.0f/3, 3, 10.0f/3, 3, 11.0f/3, 4,
1, 5.0f/3, 2, 7.0f/3, 3, 10.0f/3, 3, 11.0f/3, 4
});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear2x2x2To3x3x2) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 2}),
{1, -1, 2, -2, 3, -3, 4, -4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 2}));
test::FillValues<float>(&expected,
{
1, -1,
5.0f/3, -5.0f/3,
2, -2,
7.0f/3, -7.0f/3,
3, -3,
10.0f/3, -10.0f/3,
3, -3,
11.0f/3, -11.0f/3,
4, -4
});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear2x2To4x4) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 4, 4, 1}));
test::FillValues<float>(&expected,
{1, 1.5, 2, 2,
2, 2.5, 3, 3,
3, 3.5, 4, 4,
3, 3.5, 4, 4});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, Test1_1c) { TestResize(1, 183, 299, 1, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test1_3c) { TestResize(1, 183, 299, 3, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test2_1c) { TestResize(1, 141, 186, 1, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test2_3c) { TestResize(1, 141, 186, 3, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test3_1c) { TestResize(1, 749, 603, 1, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test3_3c) { TestResize(1, 749, 603, 3, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test4_1c) { TestResize(1, 299, 299, 1, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test4_3c) { TestResize(1, 299, 299, 3, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test5_1c) { TestResize(1, 298, 297, 1, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test5_3c) { TestResize(1, 298, 297, 3, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test6_1c) { TestResize(1, 304, 303, 1, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test6_3c) { TestResize(1, 304, 303, 3, 299, 299); }
TEST_P(ResizeBilinearOpTest, TestInvalidOutputSize) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
Status s = RunOpKernel();
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(
absl::StrContains(s.message(), "output dimensions must be positive"))
<< s;
}
TEST_P(ResizeBilinearOpTest, TestInvalidInputShape) {
AddInputFromArray<float>(TensorShape({2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
Status s = RunOpKernel();
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(s.message(), "input must be 4-dimensional"))
<< s;
}
TEST_P(ResizeBilinearOpTest, TestInvalidSizeDim) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2, 1}), {4, 4});
Status s = RunOpKernel();
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(s.message(), "shape_t must be 1-dimensional"))
<< s;
}
TEST_P(ResizeBilinearOpTest, TestInvalidSizeElements) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({3}), {4, 4, 1});
Status s = RunOpKernel();
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(s.message(), "shape_t must have two elements"))
<< s;
}
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpTestCpu, ResizeBilinearOpTest,
::testing::Values(TestDevice::CPU));
INSTANTIATE_TEST_SUITE_P(ResizeBilinearHalfPixelCentersOpTestCpu,
ResizeBilinearHalfPixelCentersOpTest,
::testing::Values(TestDevice::CPU));
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpAlignCornersTestCpu,
ResizeBilinearOpAlignCornersTest,
::testing::Values(TestDevice::CPU));
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpTestGpu, ResizeBilinearOpTest,
::testing::Values(TestDevice::GPU));
INSTANTIATE_TEST_SUITE_P(ResizeBilinearHalfPixelCentersOpTestGpu,
ResizeBilinearHalfPixelCentersOpTest,
::testing::Values(TestDevice::GPU));
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpAlignCornersTestGpu,
ResizeBilinearOpAlignCornersTest,
::testing::Values(TestDevice::GPU));
#endif
class ResizeBM : public ResizeBilinearOpTest {
public:
void TestBody() override {}
void SetUpBenchmark(int input_width, int input_height, int num_channels,
int output_width, int output_height) {
TF_EXPECT_OK(NodeDefBuilder("resize_bilinear_op", "ResizeBilinear")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", align_corners_)
.Attr("half_pixel_centers", half_pixel_centers_)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
const TensorShape shape(
{ 1, input_width, input_height, num_channels});
SetRandomImageInput(shape);
AddInputFromArray<int32>(TensorShape({2}), {output_width, output_height});
}
using ResizeBilinearOpTest::RunOpKernel;
};
#ifdef PLATFORM_GOOGLE
void BM_Resize(benchmark::State& state) {
ResizeBM bench;
bench.SetUpBenchmark(640, 480, 3, 1024, 768);
for (const auto _ : state) {
CHECK(bench.RunOpKernel().ok());
}
}
BENCHMARK(BM_Resize);
#endif
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/resize_bilinear_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/resize_bilinear_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3467b85c-9553-47ea-8c9c-3ebd61a23de5 | cpp | tensorflow/tensorflow | stablehlo_gather | tensorflow/lite/kernels/stablehlo_gather.cc | tensorflow/lite/kernels/stablehlo_gather_test.cc | #include <algorithm>
#include <cstdint>
#include <memory>
#include <vector>
#include "Eigen/Core"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/tensor_slice_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace stablehlo_gather {
namespace {
constexpr int kOperandTensor = 0;
constexpr int kStartIndicesTensor = 1;
constexpr int kOutputTensor = 0;
using TfLiteIntArrayUniquePtr =
std::unique_ptr<TfLiteIntArray, decltype(&TfLiteIntArrayFree)>;
template <typename IndexType>
TfLiteStatus ClipStartingIndex(const RuntimeShape& operand_shape,
const int64_t* slice_sizes, int num_slice_sizes,
Index<IndexType>& starting_index) {
if (operand_shape.DimensionsCount() != starting_index.size() ||
operand_shape.DimensionsCount() != num_slice_sizes) {
return kTfLiteError;
}
for (int dim = 0; dim < starting_index.size(); ++dim) {
starting_index[dim] = std::min((int64_t)starting_index[dim],
operand_shape.Dims(dim) - slice_sizes[dim]);
}
return kTfLiteOk;
}
static std::vector<int64_t> GetCollapsedSliceShape(
const int64_t* slice_sizes, int num_slice_sizes,
const int64_t* collapsed_slice_dims, int num_collapsed_slice_dims) {
std::vector<int64_t> result(num_slice_sizes - num_collapsed_slice_dims);
int result_ctr = 0;
for (int dim = 0; dim < num_slice_sizes; dim++) {
if (!ArrayContains(collapsed_slice_dims, num_collapsed_slice_dims, dim)) {
result[result_ctr] = slice_sizes[dim];
result_ctr++;
}
}
return result;
}
static TfLiteIntArrayUniquePtr GetResultShape(
int64_t result_rank, const TfLiteStablehloGatherParams* data,
const RuntimeShape& start_indices_shape) {
TfLiteIntArrayUniquePtr result = TfLiteIntArrayUniquePtr(
TfLiteIntArrayCreate(result_rank), &TfLiteIntArrayFree);
int result_ctr = 0;
std::vector<int64_t> collapsed_slice_shape = GetCollapsedSliceShape(
data->slice_sizes, data->num_slice_sizes, data->collapsed_slice_dims,
data->num_collapsed_slice_dims);
int64_t slice_shape_ctr = 0;
int64_t start_indices_shape_ctr = 0;
for (int64_t dim = 0; dim < result_rank; dim++) {
if (ArrayContains(data->offset_dims, data->num_offset_dims, dim)) {
result->data[result_ctr] = collapsed_slice_shape[slice_shape_ctr];
slice_shape_ctr++;
} else {
if (start_indices_shape_ctr == data->index_vector_dim) {
start_indices_shape_ctr++;
}
result->data[result_ctr] =
start_indices_shape.Dims(start_indices_shape_ctr);
start_indices_shape_ctr++;
}
result_ctr++;
}
return result;
}
template <typename IndexType>
TfLiteStatus SetBatchAndOffsetIndices(const Index<IndexType>& result_index,
const int64_t* offset_dims,
int num_offset_dims,
Index<IndexType>& batch_index,
Index<IndexType>& offset_index) {
int offset_index_ctr = 0;
int batch_index_ctr = 0;
for (int result_dim = 0; result_dim < result_index.size(); ++result_dim) {
if (ArrayContains(offset_dims, num_offset_dims, result_dim)) {
if (offset_index_ctr >= num_offset_dims) {
return kTfLiteError;
}
offset_index[offset_index_ctr] = result_index[result_dim];
offset_index_ctr++;
} else {
if (batch_index_ctr >= result_index.size() - num_offset_dims) {
return kTfLiteError;
}
batch_index[batch_index_ctr] = result_index[result_dim];
batch_index_ctr++;
}
}
return kTfLiteOk;
}
template <typename IndexType, typename DataType>
TfLiteStatus EvalWithTypes(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* operand;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kOperandTensor, &operand));
int operand_rank = operand->dims->size;
RuntimeShape operand_shape = GetTensorShape(operand);
const TfLiteTensor* start_indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartIndicesTensor,
&start_indices));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteStablehloGatherParams* data =
reinterpret_cast<TfLiteStablehloGatherParams*>(node->builtin_data);
RuntimeShape start_indices_shape = GetTensorShape(start_indices);
int result_rank = output->dims->size;
RuntimeShape result_runtime_shape(result_rank, output->dims->data);
Index<IndexType> result_index = Index<IndexType>(result_rank, 0);
int64_t num_batch_dims = result_rank - data->num_offset_dims;
Index<IndexType> batch_index(num_batch_dims);
Index<IndexType> offset_index(data->num_offset_dims);
do {
TF_LITE_ENSURE_OK(
context, SetBatchAndOffsetIndices(result_index, data->offset_dims,
data->num_offset_dims, batch_index,
offset_index));
Index<IndexType> starting_index_vector =
ReadIndexVector(start_indices, start_indices_shape, batch_index,
data->index_vector_dim);
Index<IndexType> final_starting_index;
ScatterIndex(starting_index_vector, data->start_index_map,
data->num_start_index_map, operand_rank,
&final_starting_index);
TF_LITE_ENSURE_OK(
context,
ClipStartingIndex(operand_shape, data->slice_sizes,
data->num_slice_sizes, final_starting_index));
Index<IndexType> full_offset_index;
ExpandDims(offset_index, data->collapsed_slice_dims,
data->num_collapsed_slice_dims, &full_offset_index);
Index<IndexType> operand_lookup_index =
AddIndices(final_starting_index, full_offset_index);
const DataType* operand_data = GetTensorData<DataType>(operand);
IndexType flat_operand_index =
TensorIndexToFlat(operand_lookup_index.data(),
operand_lookup_index.size(), GetTensorShape(operand));
DataType looked_up_value = operand_data[flat_operand_index];
DataType* result_data = GetTensorData<DataType>(output);
IndexType flat_result_index = TensorIndexToFlat(
result_index.data(), result_index.size(), GetTensorShape(output));
result_data[flat_result_index] = looked_up_value;
} while (NextIndex(result_rank, result_runtime_shape.DimsData(),
result_index.data()));
return TfLiteStatus::kTfLiteOk;
}
template <typename IndexType>
TfLiteStatus EvalWithIndexType(TfLiteContext* context, TfLiteNode* node,
TfLiteType index_type, TfLiteType data_type) {
switch (data_type) {
case kTfLiteFloat16:
return EvalWithTypes<IndexType, Eigen::half>(context, node);
case kTfLiteFloat32:
return EvalWithTypes<IndexType, float>(context, node);
case kTfLiteFloat64:
return EvalWithTypes<IndexType, double>(context, node);
case kTfLiteInt8:
return EvalWithTypes<IndexType, int8_t>(context, node);
case kTfLiteInt16:
return EvalWithTypes<IndexType, int16_t>(context, node);
case kTfLiteInt32:
return EvalWithTypes<IndexType, int32_t>(context, node);
case kTfLiteInt64:
return EvalWithTypes<IndexType, int64_t>(context, node);
case kTfLiteUInt8:
return EvalWithTypes<IndexType, uint8_t>(context, node);
case kTfLiteUInt16:
return EvalWithTypes<IndexType, uint16_t>(context, node);
case kTfLiteUInt32:
return EvalWithTypes<IndexType, uint32_t>(context, node);
case kTfLiteUInt64:
return EvalWithTypes<IndexType, uint64_t>(context, node);
default:
TF_LITE_KERNEL_LOG(
context, "(Index Type: %s, Data Type: %s) currently not supported.\n",
TfLiteTypeGetName(index_type), TfLiteTypeGetName(data_type));
return TfLiteStatus::kTfLiteError;
}
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* operand;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kOperandTensor, &operand));
const TfLiteTensor* start_indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartIndicesTensor,
&start_indices));
TfLiteType index_type = start_indices->type;
TfLiteType data_type = operand->type;
if (index_type == kTfLiteInt32) {
return EvalWithIndexType<int32_t>(context, node, index_type, data_type);
} else if (index_type == kTfLiteInt64) {
return EvalWithIndexType<int64_t>(context, node, index_type, data_type);
} else {
TF_LITE_KERNEL_LOG(context, "(Index Type: %s) currently not supported.\n",
TfLiteTypeGetName(index_type));
return TfLiteStatus::kTfLiteError;
}
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* operand;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kOperandTensor, &operand));
const TfLiteTensor* start_indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartIndicesTensor,
&start_indices));
TfLiteType index_type = start_indices->type;
if (index_type != kTfLiteInt32 && index_type != kTfLiteInt64) {
TF_LITE_KERNEL_LOG(context, "(Index Type: %s) currently not supported.\n",
TfLiteTypeGetName(index_type));
return TfLiteStatus::kTfLiteError;
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteStablehloGatherParams* data =
reinterpret_cast<TfLiteStablehloGatherParams*>(node->builtin_data);
RuntimeShape start_indices_shape = GetTensorShape(start_indices);
TfLiteIntArrayUniquePtr result_shape =
GetResultShape(output->dims->size, data, start_indices_shape);
TF_LITE_ENSURE_STATUS(
context->ResizeTensor(context, output, result_shape.release()));
return TfLiteStatus::kTfLiteOk;
}
}
TfLiteRegistration* Register_STABLEHLO_GATHER() {
static TfLiteRegistration r = {nullptr, nullptr, stablehlo_gather::Prepare,
stablehlo_gather::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class StablehloGatherOpModel : public SingleOpModel {
public:
StablehloGatherOpModel(const TensorData& input, const TensorData& indices,
const TfLiteStablehloGatherParams& params) {
input_ = AddInput(input);
indices_ = AddInput(indices);
output_ = AddOutput(TensorData(input.type, {2, 3, 2, 2}));
SetBuiltinOp(
BuiltinOperator_STABLEHLO_GATHER,
BuiltinOptions2_StablehloGatherOptions,
CreateStablehloGatherOptions(
builder_,
builder_.CreateVector(
std::vector(params.offset_dims,
params.offset_dims + params.num_offset_dims)),
builder_.CreateVector(std::vector(
params.collapsed_slice_dims,
params.collapsed_slice_dims + params.num_collapsed_slice_dims)),
builder_.CreateVector(std::vector(
params.start_index_map,
params.start_index_map + params.num_start_index_map)),
params.index_vector_dim,
builder_.CreateVector(
std::vector(params.slice_sizes,
params.slice_sizes + params.num_slice_sizes)),
params.indices_are_sorted)
.Union());
BuildInterpreter({GetShape(input_), GetShape(indices_)});
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor<T>(input_, data);
}
template <typename T>
void SetIndices(std::initializer_list<T> data) {
PopulateTensor<T>(indices_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
protected:
int input_;
int indices_;
int output_;
};
TEST(StablehloScatterOpTest, GathersSlices) {
TfLiteStablehloGatherParams params = {
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
{1, 2, 2},
3,
false
};
StablehloGatherOpModel model({TensorType_FLOAT32, {3, 4, 2}},
{TensorType_INT64, {2, 3, 2}}, params);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 0, 1, 0, 2, 1, 0, 1, 1, 1, 0, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 3, 4, 3, 4, 5, 6,
13, 14, 15, 16, 9, 10, 11, 12,
11, 12, 13, 14, 17, 18, 19, 20};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
TEST(StablehloScatterOpTest, ClipsStartingIndices) {
TfLiteStablehloGatherParams params = {
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
{1, 2, 2},
3,
false
};
StablehloGatherOpModel model({TensorType_FLOAT32, {3, 4, 2}},
{TensorType_INT64, {2, 3, 2}}, params);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 0, 1, 0, 2, 1, 0, 1, 1, 1, 0, 9});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 3, 4, 3, 4, 5, 6,
13, 14, 15, 16, 9, 10, 11, 12,
11, 12, 13, 14, 17, 18, 19, 20};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
TEST(StablehloScatterOpTest, WorksWithDynamicShapes) {
TfLiteStablehloGatherParams params = {
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
{1, 2, 2},
3,
false
};
TensorData indices_tensor = {TensorType_INT64,
{2, 3, 2},
0.0f,
0.0f,
0.0f,
0,
false,
{},
{},
0,
{},
{},
{},
{},
{{-1, -1, 2}}};
StablehloGatherOpModel model({TensorType_FLOAT32, {3, 4, 2}}, indices_tensor,
params);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 0, 1, 0, 2, 1, 0, 1, 1, 1, 0, 9});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 3, 4, 3, 4, 5, 6,
13, 14, 15, 16, 9, 10, 11, 12,
11, 12, 13, 14, 17, 18, 19, 20};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_gather.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_gather_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1fc6fda9-8140-4f7a-9e71-44fa235ca800 | cpp | abseil/abseil-cpp | commandlineflag | absl/flags/internal/commandlineflag.cc | absl/flags/commandlineflag_test.cc | #include "absl/flags/internal/commandlineflag.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
FlagStateInterface::~FlagStateInterface() = default;
}
ABSL_NAMESPACE_END
} | #include "absl/flags/commandlineflag.h"
#include <memory>
#include <string>
#include "gtest/gtest.h"
#include "absl/flags/config.h"
#include "absl/flags/flag.h"
#include "absl/flags/internal/private_handle_accessor.h"
#include "absl/flags/reflection.h"
#include "absl/flags/usage_config.h"
#include "absl/memory/memory.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
ABSL_FLAG(int, int_flag, 201, "int_flag help");
ABSL_FLAG(std::string, string_flag, "dflt",
absl::StrCat("string_flag", " help"));
ABSL_RETIRED_FLAG(bool, bool_retired_flag, false, "bool_retired_flag help");
ABSL_FLAG(int, int_flag2, 201, "");
ABSL_FLAG(std::string, string_flag2, "dflt", "");
namespace {
namespace flags = absl::flags_internal;
class CommandLineFlagTest : public testing::Test {
protected:
static void SetUpTestSuite() {
absl::FlagsUsageConfig default_config;
default_config.normalize_filename = &CommandLineFlagTest::NormalizeFileName;
absl::SetFlagsUsageConfig(default_config);
}
void SetUp() override {
#if ABSL_FLAGS_STRIP_NAMES
GTEST_SKIP() << "This test requires flag names to be present";
#endif
flag_saver_ = absl::make_unique<absl::FlagSaver>();
}
void TearDown() override { flag_saver_.reset(); }
private:
static std::string NormalizeFileName(absl::string_view fname) {
#ifdef _WIN32
std::string normalized(fname);
std::replace(normalized.begin(), normalized.end(), '\\', '/');
fname = normalized;
#endif
return std::string(fname);
}
std::unique_ptr<absl::FlagSaver> flag_saver_;
};
TEST_F(CommandLineFlagTest, TestAttributesAccessMethods) {
auto* flag_01 = absl::FindCommandLineFlag("int_flag");
ASSERT_TRUE(flag_01);
EXPECT_EQ(flag_01->Name(), "int_flag");
EXPECT_EQ(flag_01->Help(), "int_flag help");
EXPECT_TRUE(!flag_01->IsRetired());
EXPECT_TRUE(flag_01->IsOfType<int>());
EXPECT_TRUE(!flag_01->IsOfType<bool>());
EXPECT_TRUE(!flag_01->IsOfType<std::string>());
EXPECT_TRUE(absl::EndsWith(flag_01->Filename(),
"absl/flags/commandlineflag_test.cc"))
<< flag_01->Filename();
auto* flag_02 = absl::FindCommandLineFlag("string_flag");
ASSERT_TRUE(flag_02);
EXPECT_EQ(flag_02->Name(), "string_flag");
EXPECT_EQ(flag_02->Help(), "string_flag help");
EXPECT_TRUE(!flag_02->IsRetired());
EXPECT_TRUE(flag_02->IsOfType<std::string>());
EXPECT_TRUE(!flag_02->IsOfType<bool>());
EXPECT_TRUE(!flag_02->IsOfType<int>());
EXPECT_TRUE(absl::EndsWith(flag_02->Filename(),
"absl/flags/commandlineflag_test.cc"))
<< flag_02->Filename();
}
TEST_F(CommandLineFlagTest, TestValueAccessMethods) {
absl::SetFlag(&FLAGS_int_flag2, 301);
auto* flag_01 = absl::FindCommandLineFlag("int_flag2");
ASSERT_TRUE(flag_01);
EXPECT_EQ(flag_01->CurrentValue(), "301");
EXPECT_EQ(flag_01->DefaultValue(), "201");
absl::SetFlag(&FLAGS_string_flag2, "new_str_value");
auto* flag_02 = absl::FindCommandLineFlag("string_flag2");
ASSERT_TRUE(flag_02);
EXPECT_EQ(flag_02->CurrentValue(), "new_str_value");
EXPECT_EQ(flag_02->DefaultValue(), "dflt");
}
TEST_F(CommandLineFlagTest, TestParseFromCurrentValue) {
std::string err;
auto* flag_01 = absl::FindCommandLineFlag("int_flag");
EXPECT_FALSE(
flags::PrivateHandleAccessor::IsSpecifiedOnCommandLine(*flag_01));
EXPECT_TRUE(flags::PrivateHandleAccessor::ParseFrom(
*flag_01, "11", flags::SET_FLAGS_VALUE, flags::kProgrammaticChange, err));
EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), 11);
EXPECT_FALSE(
flags::PrivateHandleAccessor::IsSpecifiedOnCommandLine(*flag_01));
EXPECT_TRUE(flags::PrivateHandleAccessor::ParseFrom(
*flag_01, "-123", flags::SET_FLAGS_VALUE, flags::kProgrammaticChange,
err));
EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), -123);
EXPECT_FALSE(
flags::PrivateHandleAccessor::IsSpecifiedOnCommandLine(*flag_01));
EXPECT_TRUE(!flags::PrivateHandleAccessor::ParseFrom(
*flag_01, "xyz", flags::SET_FLAGS_VALUE, flags::kProgrammaticChange,
err));
EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), -123);
EXPECT_EQ(err, "Illegal value 'xyz' specified for flag 'int_flag'");
EXPECT_FALSE(
flags::PrivateHandleAccessor::IsSpecifiedOnCommandLine(*flag_01));
EXPECT_TRUE(!flags::PrivateHandleAccessor::ParseFrom(
*flag_01, "A1", flags::SET_FLAGS_VALUE, flags::kProgrammaticChange, err));
EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), -123);
EXPECT_EQ(err, "Illegal value 'A1' specified for flag 'int_flag'");
EXPECT_FALSE(
flags::PrivateHandleAccessor::IsSpecifiedOnCommandLine(*flag_01));
EXPECT_TRUE(flags::PrivateHandleAccessor::ParseFrom(
*flag_01, "0x10", flags::SET_FLAGS_VALUE, flags::kProgrammaticChange,
err));
EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), 16);
EXPECT_FALSE(
flags::PrivateHandleAccessor::IsSpecifiedOnCommandLine(*flag_01));
EXPECT_TRUE(flags::PrivateHandleAccessor::ParseFrom(
*flag_01, "011", flags::SET_FLAGS_VALUE, flags::kCommandLine, err));
EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), 11);
EXPECT_TRUE(flags::PrivateHandleAccessor::IsSpecifiedOnCommandLine(*flag_01));
EXPECT_TRUE(!flags::PrivateHandleAccessor::ParseFrom(
*flag_01, "", flags::SET_FLAGS_VALUE, flags::kProgrammaticChange, err));
EXPECT_EQ(err, "Illegal value '' specified for flag 'int_flag'");
auto* flag_02 = absl::FindCommandLineFlag("string_flag");
EXPECT_TRUE(flags::PrivateHandleAccessor::ParseFrom(
*flag_02, "xyz", flags::SET_FLAGS_VALUE, flags::kProgrammaticChange,
err));
EXPECT_EQ(absl::GetFlag(FLAGS_string_flag), "xyz");
EXPECT_TRUE(flags::PrivateHandleAccessor::ParseFrom(
*flag_02, "", flags::SET_FLAGS_VALUE, flags::kProgrammaticChange, err));
EXPECT_EQ(absl::GetFlag(FLAGS_string_flag), "");
}
TEST_F(CommandLineFlagTest, TestParseFromDefaultValue) {
std::string err;
auto* flag_01 = absl::FindCommandLineFlag("int_flag");
EXPECT_TRUE(flags::PrivateHandleAccessor::ParseFrom(
*flag_01, "111", flags::SET_FLAGS_DEFAULT, flags::kProgrammaticChange,
err));
EXPECT_EQ(flag_01->DefaultValue(), "111");
auto* flag_02 = absl::FindCommandLineFlag("string_flag");
EXPECT_TRUE(flags::PrivateHandleAccessor::ParseFrom(
*flag_02, "abc", flags::SET_FLAGS_DEFAULT, flags::kProgrammaticChange,
err));
EXPECT_EQ(flag_02->DefaultValue(), "abc");
}
TEST_F(CommandLineFlagTest, TestParseFromIfDefault) {
std::string err;
auto* flag_01 = absl::FindCommandLineFlag("int_flag");
EXPECT_TRUE(flags::PrivateHandleAccessor::ParseFrom(
*flag_01, "22", flags::SET_FLAG_IF_DEFAULT, flags::kProgrammaticChange,
err))
<< err;
EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), 22);
EXPECT_TRUE(flags::PrivateHandleAccessor::ParseFrom(
*flag_01, "33", flags::SET_FLAG_IF_DEFAULT, flags::kProgrammaticChange,
err));
EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), 22);
EXPECT_TRUE(flags::PrivateHandleAccessor::ParseFrom(
*flag_01, "201", flags::SET_FLAGS_VALUE, flags::kProgrammaticChange,
err));
EXPECT_TRUE(flags::PrivateHandleAccessor::ParseFrom(
*flag_01, "33", flags::SET_FLAG_IF_DEFAULT, flags::kProgrammaticChange,
err));
EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), 201);
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/flags/internal/commandlineflag.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/flags/commandlineflag_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
60745d33-3a8b-4264-9983-da7112c95926 | cpp | tensorflow/tensorflow | signature_runner | tensorflow/lite/core/signature_runner.cc | tensorflow/lite/core/signature_runner_test.cc | #include "tensorflow/lite/core/signature_runner.h"
#include <vector>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/internal/signature_def.h"
namespace tflite {
namespace impl {
SignatureRunner::SignatureRunner(const internal::SignatureDef* signature_def,
Subgraph* subgraph)
: signature_def_(signature_def), subgraph_(subgraph) {
for (const auto& it : signature_def_->inputs) {
input_names_.push_back(it.first.c_str());
}
for (const auto& it : signature_def_->outputs) {
output_names_.push_back(it.first.c_str());
}
}
TfLiteTensor* SignatureRunner::input_tensor(const char* input_name) {
const auto& it = signature_def_->inputs.find(input_name);
if (it == signature_def_->inputs.end()) {
subgraph_->ReportError("Input name %s was not found", input_name);
return nullptr;
}
return subgraph_->tensor(it->second);
}
const TfLiteTensor* SignatureRunner::output_tensor(
const char* output_name) const {
const auto& it = signature_def_->outputs.find(output_name);
if (it == signature_def_->outputs.end()) {
subgraph_->ReportError("Output name %s was not found", output_name);
return nullptr;
}
return subgraph_->tensor(it->second);
}
TfLiteStatus SignatureRunner::SetInputBufferHandle(
const char* input_name, TfLiteBufferHandle buffer_handle,
TfLiteDelegate* delegate, bool release_existing_buffer_handle) {
const auto& it = signature_def_->inputs.find(input_name);
if (it == signature_def_->inputs.end()) {
subgraph_->ReportError("Input name %s was not found", input_name);
return kTfLiteError;
}
return subgraph_->SetBufferHandle(it->second, buffer_handle, delegate,
release_existing_buffer_handle);
}
TfLiteStatus SignatureRunner::SetOutputBufferHandle(
const char* output_name, TfLiteBufferHandle buffer_handle,
TfLiteDelegate* delegate, bool release_existing_buffer_handle) {
const auto& it = signature_def_->outputs.find(output_name);
if (it == signature_def_->outputs.end()) {
subgraph_->ReportError("Output name %s was not found", output_name);
return kTfLiteError;
}
return subgraph_->SetBufferHandle(it->second, buffer_handle, delegate,
release_existing_buffer_handle);
}
TfLiteStatus SignatureRunner::ResizeInputTensor(
const char* input_name, const std::vector<int>& new_size) {
const auto& it = signature_def_->inputs.find(input_name);
if (it == signature_def_->inputs.end()) {
subgraph_->ReportError("Input name %s was not found", input_name);
return kTfLiteError;
}
return subgraph_->ResizeInputTensor(it->second, new_size);
}
TfLiteStatus SignatureRunner::ResizeInputTensorStrict(
const char* input_name, const std::vector<int>& new_size) {
const auto& it = signature_def_->inputs.find(input_name);
if (it == signature_def_->inputs.end()) {
subgraph_->ReportError("Input name %s was not found", input_name);
return kTfLiteError;
}
return subgraph_->ResizeInputTensorStrict(it->second, new_size);
}
TfLiteStatus SignatureRunner::Invoke() {
if (subgraph_->continue_invocation_)
(void)subgraph_->continue_invocation_->test_and_set();
TF_LITE_ENSURE_STATUS(subgraph_->Invoke());
if (!allow_buffer_handle_output_) {
for (int tensor_index : subgraph_->outputs()) {
TF_LITE_ENSURE_STATUS(
subgraph_->EnsureTensorDataIsReadable(tensor_index));
}
}
return kTfLiteOk;
}
TfLiteStatus SignatureRunner::SetCustomAllocationForInputTensor(
const char* input_name, const TfLiteCustomAllocation& allocation,
int64_t flags) {
const auto& it = signature_def_->inputs.find(input_name);
if (it == signature_def_->inputs.end()) {
subgraph_->ReportError("Input name %s was not found", input_name);
return kTfLiteError;
}
return subgraph_->SetCustomAllocationForTensor(it->second, allocation, flags);
}
TfLiteStatus SignatureRunner::SetCustomAllocationForOutputTensor(
const char* output_name, const TfLiteCustomAllocation& allocation,
int64_t flags) {
const auto& it = signature_def_->outputs.find(output_name);
if (it == signature_def_->outputs.end()) {
subgraph_->ReportError("Output name %s was not found", output_name);
return kTfLiteError;
}
return subgraph_->SetCustomAllocationForTensor(it->second, allocation, flags);
}
}
} | #include "tensorflow/lite/core/signature_runner.h"
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace impl {
namespace {
TEST(SignatureRunnerTest, TestMultiSignatures) {
TestErrorReporter reporter;
auto model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/multi_signatures.bin", &reporter);
ASSERT_TRUE(model);
ops::builtin::BuiltinOpResolver resolver;
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> interpreter;
ASSERT_EQ(builder(&interpreter), kTfLiteOk);
ASSERT_NE(interpreter, nullptr);
std::vector<const std::string*> signature_defs =
interpreter->signature_keys();
ASSERT_EQ(signature_defs.size(), 2);
ASSERT_EQ(*(signature_defs[0]), "add");
ASSERT_EQ(*(signature_defs[1]), "sub");
ASSERT_EQ(interpreter->GetSignatureRunner("dummy"), nullptr);
SignatureRunner* add_runner =
interpreter->GetSignatureRunner(signature_defs[0]->c_str());
ASSERT_NE(add_runner, nullptr);
ASSERT_EQ(add_runner->signature_key(), "add");
const std::vector<const char*>& input_names = add_runner->input_names();
const std::vector<const char*>& output_names = add_runner->output_names();
ASSERT_EQ(input_names.size(), 1);
ASSERT_EQ(std::string(input_names[0]), "x");
ASSERT_EQ(output_names.size(), 1);
ASSERT_EQ(std::string(output_names[0]), "output_0");
ASSERT_EQ(add_runner->ResizeInputTensor("x", {2}), kTfLiteOk);
ASSERT_EQ(add_runner->AllocateTensors(), kTfLiteOk);
TfLiteTensor* add_input = add_runner->input_tensor("x");
ASSERT_EQ(add_runner->input_tensor("dummy"), nullptr);
const TfLiteTensor* add_output = add_runner->output_tensor("output_0");
ASSERT_EQ(add_runner->output_tensor("dummy"), nullptr);
ASSERT_NE(add_input, nullptr);
ASSERT_NE(add_output, nullptr);
add_input->data.f[0] = 2;
add_input->data.f[1] = 4;
ASSERT_EQ(add_runner->Invoke(), kTfLiteOk);
ASSERT_EQ(add_output->data.f[0], 4);
ASSERT_EQ(add_output->data.f[1], 6);
SignatureRunner* sub_runner = interpreter->GetSignatureRunner("sub");
ASSERT_NE(sub_runner, nullptr);
ASSERT_EQ(sub_runner->signature_key(), "sub");
const std::vector<const char*>& input_names2 = sub_runner->input_names();
const std::vector<const char*>& output_names2 = sub_runner->output_names();
ASSERT_EQ(input_names2.size(), 1);
ASSERT_EQ(std::string(input_names2[0]), "x");
ASSERT_EQ(output_names2.size(), 1);
ASSERT_EQ(std::string(output_names2[0]), "output_0");
ASSERT_EQ(sub_runner->ResizeInputTensor("x", {3}), kTfLiteOk);
ASSERT_EQ(sub_runner->AllocateTensors(), kTfLiteOk);
TfLiteTensor* sub_input = sub_runner->input_tensor("x");
const TfLiteTensor* sub_output = sub_runner->output_tensor("output_0");
ASSERT_NE(sub_input, nullptr);
ASSERT_NE(sub_output, nullptr);
sub_input->data.f[0] = 2;
sub_input->data.f[1] = 4;
sub_input->data.f[2] = 6;
ASSERT_EQ(sub_runner->Invoke(), kTfLiteOk);
ASSERT_EQ(sub_output->data.f[0], -1);
ASSERT_EQ(sub_output->data.f[1], 1);
ASSERT_EQ(sub_output->data.f[2], 3);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/signature_runner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/signature_runner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e05e458f-6e23-4833-bf54-9723da543bec | cpp | google/quiche | moq_chat | quiche/quic/moqt/tools/moq_chat.h | quiche/quic/moqt/tools/moq_chat_test.cc | #ifndef QUICHE_QUIC_MOQT_TOOLS_MOQ_CHAT_H
#define QUICHE_QUIC_MOQT_TOOLS_MOQ_CHAT_H
#include <string>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/moqt/moqt_messages.h"
namespace moqt {
class MoqChatStrings {
public:
explicit MoqChatStrings(absl::string_view chat_id) : chat_id_(chat_id) {}
static constexpr absl::string_view kBasePath = "moq-chat";
static constexpr absl::string_view kParticipantPath = "participant";
static constexpr absl::string_view kCatalogPath = "catalog";
static constexpr absl::string_view kCatalogHeader = "version=1\n";
bool IsValidPath(absl::string_view path) const {
return path == absl::StrCat("/", kBasePath);
}
std::string GetUsernameFromFullTrackName(
FullTrackName full_track_name) const {
if (full_track_name.tuple().size() != 2) {
return "";
}
if (!full_track_name.tuple()[1].empty()) {
return "";
}
std::vector<absl::string_view> elements =
absl::StrSplit(full_track_name.tuple()[0], '/');
if (elements.size() != 4 || elements[0] != kBasePath ||
elements[1] != chat_id_ || elements[2] != kParticipantPath) {
return "";
}
return std::string(elements[3]);
}
FullTrackName GetFullTrackNameFromUsername(absl::string_view username) const {
return FullTrackName{absl::StrCat(kBasePath, "/", chat_id_, "/",
kParticipantPath, "/", username),
""};
}
FullTrackName GetCatalogName() const {
return FullTrackName{absl::StrCat(kBasePath, "/", chat_id_),
absl::StrCat("/", kCatalogPath)};
}
private:
const std::string chat_id_;
};
}
#endif | #include "quiche/quic/moqt/tools/moq_chat.h"
#include "quiche/quic/moqt/moqt_messages.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace moqt {
namespace {
class MoqChatStringsTest : public quiche::test::QuicheTest {
public:
MoqChatStrings strings_{"chat-id"};
};
TEST_F(MoqChatStringsTest, IsValidPath) {
EXPECT_TRUE(strings_.IsValidPath("/moq-chat"));
EXPECT_FALSE(strings_.IsValidPath("moq-chat"));
EXPECT_FALSE(strings_.IsValidPath("/moq-cha"));
EXPECT_FALSE(strings_.IsValidPath("/moq-chats"));
EXPECT_FALSE(strings_.IsValidPath("/moq-chat/"));
}
TEST_F(MoqChatStringsTest, GetUsernameFromFullTrackName) {
EXPECT_EQ(strings_.GetUsernameFromFullTrackName(
FullTrackName{"moq-chat/chat-id/participant/user", ""}),
"user");
}
TEST_F(MoqChatStringsTest, GetUsernameFromFullTrackNameInvalidInput) {
EXPECT_EQ(strings_.GetUsernameFromFullTrackName(
FullTrackName{"/moq-chat/chat-id/participant/user", ""}),
"");
EXPECT_EQ(strings_.GetUsernameFromFullTrackName(
FullTrackName{"moq-chat/chat-id/participant/user/", ""}),
"");
EXPECT_EQ(strings_.GetUsernameFromFullTrackName(
FullTrackName{"moq-cha/chat-id/participant/user", ""}),
"");
EXPECT_EQ(strings_.GetUsernameFromFullTrackName(
FullTrackName{"moq-chat/chat-i/participant/user", ""}),
"");
EXPECT_EQ(strings_.GetUsernameFromFullTrackName(
FullTrackName{"moq-chat/chat-id/participan/user", ""}),
"");
EXPECT_EQ(strings_.GetUsernameFromFullTrackName(
FullTrackName{"moq-chat/chat-id/user", ""}),
"");
EXPECT_EQ(strings_.GetUsernameFromFullTrackName(
FullTrackName{"moq-chat/chat-id/participant/foo/user", ""}),
"");
EXPECT_EQ(strings_.GetUsernameFromFullTrackName(
FullTrackName{"moq-chat/chat-id/participant/user", "foo"}),
"");
EXPECT_EQ(strings_.GetUsernameFromFullTrackName(
FullTrackName{"moq-chat/chat-id/participant/user"}),
"");
EXPECT_EQ(strings_.GetUsernameFromFullTrackName(
FullTrackName{"foo", "moq-chat/chat-id/participant/user", ""}),
"");
}
TEST_F(MoqChatStringsTest, GetFullTrackNameFromUsername) {
EXPECT_EQ(strings_.GetFullTrackNameFromUsername("user"),
FullTrackName("moq-chat/chat-id/participant/user", ""));
}
TEST_F(MoqChatStringsTest, GetCatalogName) {
EXPECT_EQ(strings_.GetCatalogName(),
FullTrackName("moq-chat/chat-id", "/catalog"));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/tools/moq_chat.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/tools/moq_chat_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
e063e2c6-efc4-4009-a05a-2335b45ae426 | cpp | tensorflow/tensorflow | ragged_tensor_to_tensor_op | tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc | tensorflow/core/kernels/ragged_tensor_to_tensor_op_test.cc | #define EIGEN_USE_THREADS
#include <stddef.h>
#include <algorithm>
#include <string>
#include <vector>
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/broadcast_to_op.h"
#include "tensorflow/core/kernels/list_kernels.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/bfloat16.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/bcast.h"
#include "tensorflow/core/util/ragged_to_dense_util.h"
namespace tensorflow {
namespace {
typedef Eigen::ThreadPoolDevice CPUDevice;
using ::std::vector;
const int kShapeInputIndex = 0;
const int kValueInputIndex = 1;
const int kDefaultValueInputIndex = 2;
const int kFirstPartitionInputIndex = 3;
template <typename INDEX_TYPE>
class RaggedTensorToTensorBaseOp : public OpKernel {
public:
typedef
typename ::tensorflow::TTypes<const INDEX_TYPE>::Flat RowPartitionTensor;
explicit RaggedTensorToTensorBaseOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, GetRowPartitionTypes<OpKernelConstruction>(
context, &row_partition_types_));
ragged_rank_ = GetRaggedRank(row_partition_types_);
}
RowPartitionType GetRowPartitionTypeByDimension(int dimension) {
if (row_partition_types_[0] == RowPartitionType::FIRST_DIM_SIZE) {
return row_partition_types_[dimension + 1];
} else {
return row_partition_types_[dimension];
}
}
RowPartitionTensor GetRowPartitionTensor(OpKernelContext* c, int dimension) {
if (row_partition_types_[0] == RowPartitionType::FIRST_DIM_SIZE) {
return c->input(dimension + 1 + kFirstPartitionInputIndex)
.flat<INDEX_TYPE>();
} else {
return c->input(dimension + kFirstPartitionInputIndex).flat<INDEX_TYPE>();
}
}
Status GetMaxWidth(OpKernelContext* c, int dimension, INDEX_TYPE* result) {
const RowPartitionTensor row_partition_tensor =
GetRowPartitionTensor(c, dimension - 1);
switch (GetRowPartitionTypeByDimension(dimension - 1)) {
case RowPartitionType::VALUE_ROWIDS:
*result = GetMaxWidthValueRowID(row_partition_tensor);
return absl::OkStatus();
case RowPartitionType::ROW_SPLITS:
*result = GetMaxWidthRowSplit(row_partition_tensor);
return absl::OkStatus();
default:
return errors::InvalidArgument(
"Cannot handle partition type ",
RowPartitionTypeToString(
GetRowPartitionTypeByDimension(dimension - 1)));
}
}
static INDEX_TYPE GetMaxWidthRowSplit(const RowPartitionTensor& row_split) {
const INDEX_TYPE tensor_length = row_split.size();
if (tensor_length == 0 || tensor_length == 1) {
return 0;
}
INDEX_TYPE max_width = 0;
for (INDEX_TYPE i = 0; i < tensor_length - 1; ++i) {
const INDEX_TYPE current_width = row_split(i + 1) - row_split(i);
if (current_width > max_width) {
max_width = current_width;
}
}
return max_width;
}
static INDEX_TYPE GetMaxWidthValueRowID(
const RowPartitionTensor& value_rowids) {
const INDEX_TYPE index_length = value_rowids.size();
if (index_length == 0) {
return 0;
}
INDEX_TYPE first_equal_index = 0;
INDEX_TYPE first_equal_index_value = value_rowids(0);
INDEX_TYPE max_width = 0;
for (INDEX_TYPE i = 1; i < index_length; ++i) {
const INDEX_TYPE value = value_rowids(i);
if (value != first_equal_index_value) {
first_equal_index_value = value;
max_width = std::max(i - first_equal_index, max_width);
first_equal_index = i;
}
}
return std::max(index_length - first_equal_index, max_width);
}
Status CalculateOutputSize(INDEX_TYPE first_dim, OpKernelContext* c,
vector<INDEX_TYPE>* result) {
TensorShapeProto value_shape_proto;
c->input(kValueInputIndex).shape().AsProto(&value_shape_proto);
TensorShapeProto default_value_shape_proto;
c->input(kDefaultValueInputIndex)
.shape()
.AsProto(&default_value_shape_proto);
TensorShapeProto output_shape_proto;
TF_RETURN_IF_ERROR(ValidateDefaultValueShape(default_value_shape_proto,
value_shape_proto));
TensorShapeProto shape_proto;
{
PartialTensorShape partial_tensor_shape;
TF_RETURN_IF_ERROR(TensorShapeFromTensor(c->input(kShapeInputIndex),
&partial_tensor_shape));
partial_tensor_shape.AsProto(&shape_proto);
}
TF_RETURN_IF_ERROR(CombineRaggedTensorToTensorShapes(
ragged_rank_, shape_proto, value_shape_proto, &output_shape_proto));
result->reserve(output_shape_proto.dim_size());
for (const TensorShapeProto::Dim& dim : output_shape_proto.dim()) {
result->push_back(dim.size());
}
if ((*result)[0] < 0) {
(*result)[0] = first_dim;
}
for (int i = 1; i <= ragged_rank_; ++i) {
if ((*result)[i] < 0) {
TF_RETURN_IF_ERROR(GetMaxWidth(c, i, &(*result)[i]));
}
}
return absl::OkStatus();
}
void CalculateFirstParentOutputIndex(INDEX_TYPE first_dimension,
INDEX_TYPE output_index_multiplier,
INDEX_TYPE first_dimension_output,
vector<INDEX_TYPE>* result) {
const INDEX_TYPE min_dimension =
std::min(first_dimension, first_dimension_output);
result->reserve(first_dimension);
int current_output_index = 0;
for (INDEX_TYPE i = 0; i < min_dimension;
++i, current_output_index += output_index_multiplier) {
result->push_back(current_output_index);
}
for (INDEX_TYPE i = min_dimension; i < first_dimension; ++i) {
result->push_back(-1);
}
DCHECK_EQ(result->size(), first_dimension);
}
Status CalculateOutputIndexRowSplit(
const RowPartitionTensor& row_split,
const vector<INDEX_TYPE>& parent_output_index,
INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size,
vector<INDEX_TYPE>* result) {
INDEX_TYPE row_split_size = row_split.size();
if (row_split_size > 0) {
result->reserve(row_split(row_split_size - 1));
}
for (INDEX_TYPE i = 0; i < row_split_size - 1; ++i) {
INDEX_TYPE row_length = row_split(i + 1) - row_split(i);
INDEX_TYPE real_length = std::min(output_size, row_length);
INDEX_TYPE parent_output_index_current = parent_output_index[i];
if (parent_output_index_current == -1) {
real_length = 0;
}
for (INDEX_TYPE j = 0; j < real_length; ++j) {
result->push_back(parent_output_index_current);
parent_output_index_current += output_index_multiplier;
}
for (INDEX_TYPE j = 0; j < row_length - real_length; ++j) {
result->push_back(-1);
}
}
if (row_split_size > 0 && result->size() != row_split(row_split_size - 1)) {
return errors::InvalidArgument("Invalid row split size.");
}
return absl::OkStatus();
}
Status CalculateOutputIndexValueRowID(
const RowPartitionTensor& value_rowids,
const vector<INDEX_TYPE>& parent_output_index,
INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size,
vector<INDEX_TYPE>* result) {
const INDEX_TYPE index_size = value_rowids.size();
result->reserve(index_size);
if (index_size == 0) {
return absl::OkStatus();
}
INDEX_TYPE current_output_column = 0;
INDEX_TYPE current_value_rowid = value_rowids(0);
if (current_value_rowid >= parent_output_index.size()) {
return errors::InvalidArgument(
"Got current_value_rowid=", current_value_rowid,
" which is not less than ", parent_output_index.size());
}
INDEX_TYPE current_output_index = parent_output_index[current_value_rowid];
result->push_back(current_output_index);
for (INDEX_TYPE i = 1; i < index_size; ++i) {
INDEX_TYPE next_value_rowid = value_rowids(i);
if (next_value_rowid == current_value_rowid) {
if (current_output_index >= 0) {
++current_output_column;
if (current_output_column < output_size) {
current_output_index += output_index_multiplier;
} else {
current_output_index = -1;
}
}
} else {
current_output_column = 0;
current_value_rowid = next_value_rowid;
if (next_value_rowid >= parent_output_index.size()) {
return errors::InvalidArgument(
"Got next_value_rowid=", next_value_rowid,
" which is not less than ", parent_output_index.size());
}
current_output_index = parent_output_index[next_value_rowid];
}
result->push_back(current_output_index);
}
if (result->size() != value_rowids.size()) {
return errors::InvalidArgument("Invalid row ids.");
}
return absl::OkStatus();
}
Status CalculateOutputIndex(OpKernelContext* context, int dimension,
const vector<INDEX_TYPE>& parent_output_index,
INDEX_TYPE output_index_multiplier,
INDEX_TYPE output_size,
vector<INDEX_TYPE>* result) {
const RowPartitionTensor row_partition_tensor =
GetRowPartitionTensor(context, dimension);
auto partition_type = GetRowPartitionTypeByDimension(dimension);
switch (partition_type) {
case RowPartitionType::VALUE_ROWIDS:
return CalculateOutputIndexValueRowID(
row_partition_tensor, parent_output_index, output_index_multiplier,
output_size, result);
case RowPartitionType::ROW_SPLITS:
if (row_partition_tensor.size() - 1 > parent_output_index.size()) {
return errors::InvalidArgument(
"Row partition size is greater than output size: ",
row_partition_tensor.size() - 1, " > ",
parent_output_index.size());
}
return CalculateOutputIndexRowSplit(
row_partition_tensor, parent_output_index, output_index_multiplier,
output_size, result);
default:
return errors::InvalidArgument(
"Unsupported partition type:",
RowPartitionTypeToString(partition_type));
}
}
Status GetFirstDimensionSize(OpKernelContext* context, INDEX_TYPE* result) {
const Tensor first_partition_tensor =
context->input(kFirstPartitionInputIndex);
if (row_partition_types_.empty()) {
return errors::InvalidArgument("No row_partition_types given.");
}
const RowPartitionType first_partition_type = row_partition_types_[0];
switch (first_partition_type) {
case RowPartitionType::FIRST_DIM_SIZE:
*result = first_partition_tensor.scalar<INDEX_TYPE>()();
return absl::OkStatus();
case RowPartitionType::VALUE_ROWIDS:
return errors::InvalidArgument(
"Cannot handle VALUE_ROWIDS in first dimension.");
case RowPartitionType::ROW_SPLITS:
*result = first_partition_tensor.shape().dim_size(0) - 1;
return absl::OkStatus();
default:
return errors::InvalidArgument(
"Cannot handle type ",
RowPartitionTypeToString(first_partition_type));
}
}
void Compute(OpKernelContext* context) override {
INDEX_TYPE first_dimension;
const Tensor first_partition_tensor =
context->input(kFirstPartitionInputIndex);
OP_REQUIRES(context, first_partition_tensor.NumElements() > 0,
errors::InvalidArgument("Invalid first partition input. Tensor "
"requires at least one element."));
OP_REQUIRES_OK(context, GetFirstDimensionSize(context, &first_dimension));
vector<INDEX_TYPE> output_size;
OP_REQUIRES_OK(context,
CalculateOutputSize(first_dimension, context, &output_size));
vector<INDEX_TYPE> multiplier;
multiplier.resize(ragged_rank_ + 1);
multiplier[multiplier.size() - 1] = 1;
for (int i = multiplier.size() - 2; i >= 0; --i) {
multiplier[i] = multiplier[i + 1] * output_size[i + 1];
}
TensorShape output_shape;
OP_REQUIRES_OK(context,
TensorShapeUtils::MakeShape(output_size, &output_shape));
Tensor* output_tensor = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, output_shape, &output_tensor));
const INDEX_TYPE full_size = multiplier[0] * output_size[0];
if (full_size > 0) {
vector<INDEX_TYPE> output_index, new_output_index;
int nvals = context->input(kValueInputIndex).shape().dim_size(0);
output_index.reserve(nvals);
new_output_index.reserve(nvals);
CalculateFirstParentOutputIndex(first_dimension, multiplier[0],
output_size[0], &output_index);
for (int i = 1; i <= ragged_rank_; ++i) {
OP_REQUIRES_OK(context, CalculateOutputIndex(
context, i - 1, output_index, multiplier[i],
output_size[i], &new_output_index));
output_index.swap(new_output_index);
new_output_index.clear();
}
SetOutput(context, ragged_rank_, output_index, output_tensor);
}
}
virtual void SetOutput(OpKernelContext* context, int ragged_rank,
const vector<INDEX_TYPE>& output_index,
Tensor* output_tensor) = 0;
private:
vector<RowPartitionType> row_partition_types_;
int ragged_rank_;
};
template <typename VALUE_TYPE, typename INDEX_TYPE>
void slow_copy_array(VALUE_TYPE* dst, const VALUE_TYPE* src, INDEX_TYPE size) {
for (INDEX_TYPE index = 0; index < size; ++index) {
dst[index] = src[index];
}
}
template <typename VALUE_TYPE, typename INDEX_TYPE>
void copy_array(VALUE_TYPE* dst, const VALUE_TYPE* src, INDEX_TYPE size) {
memcpy(dst, src, size * sizeof(VALUE_TYPE));
}
template <>
void copy_array<tstring, int64_t>(tstring* dst, const tstring* src,
int64_t size) {
slow_copy_array(dst, src, size);
}
template <>
void copy_array<tstring, int32>(tstring* dst, const tstring* src,
int32_t size) {
slow_copy_array(dst, src, size);
}
template <>
void copy_array<Eigen::half, int64_t>(Eigen::half* dst, const Eigen::half* src,
int64_t size) {
slow_copy_array(dst, src, size);
}
template <>
void copy_array<Eigen::half, int32>(Eigen::half* dst, const Eigen::half* src,
int32_t size) {
slow_copy_array(dst, src, size);
}
template <typename VALUE_TYPE, typename INDEX_TYPE>
class RaggedTensorToTensorOp : public RaggedTensorToTensorBaseOp<INDEX_TYPE> {
public:
explicit RaggedTensorToTensorOp(OpKernelConstruction* context)
: RaggedTensorToTensorBaseOp<INDEX_TYPE>(context) {}
void SetOutput(OpKernelContext* context, int ragged_rank,
const vector<INDEX_TYPE>& output_index,
Tensor* output_tensor) override {
if (output_tensor->NumElements() == 0) return;
const auto& values_tensor = context->input(kValueInputIndex);
const VALUE_TYPE* values_base = values_tensor.flat<VALUE_TYPE>().data();
const auto& default_value_tensor = context->input(kDefaultValueInputIndex);
VALUE_TYPE* output_base = output_tensor->flat<VALUE_TYPE>().data();
TensorShape element_shape = output_tensor->shape();
element_shape.RemoveDimRange(0, ragged_rank + 1);
int value_element_size = element_shape.num_elements();
size_t output_index_size = output_index.size();
const VALUE_TYPE* default_value =
default_value_tensor.flat<VALUE_TYPE>().data();
Tensor bcast_default;
if (default_value_tensor.NumElements() != value_element_size &&
default_value_tensor.NumElements() != 1) {
const auto& src_shape = default_value_tensor.shape();
BCast bcast(BCast::FromShape(src_shape), BCast::FromShape(element_shape),
true);
OP_REQUIRES(context, bcast.IsValid(),
errors::InvalidArgument("Error broadcasting default_value"));
OP_REQUIRES_OK(context,
context->allocate_temp(default_value_tensor.dtype(),
element_shape, &bcast_default));
const CPUDevice& device = context->eigen_device<CPUDevice>();
functor::BroadcastTo<CPUDevice, VALUE_TYPE>()(
device, context, bcast_default, element_shape, default_value_tensor,
src_shape, bcast);
default_value = bcast_default.flat<VALUE_TYPE>().data();
}
INDEX_TYPE src_start = 0;
INDEX_TYPE dst_start = 0;
INDEX_TYPE dst_end = 0;
for (int src_i = 0; src_i <= output_index_size; ++src_i) {
INDEX_TYPE dst_i = src_i < output_index_size ? output_index[src_i] : -1;
if (dst_i == dst_end) {
++dst_end;
continue;
}
if (dst_start < dst_end) {
const VALUE_TYPE* src = values_base + src_start * value_element_size;
VALUE_TYPE* dst = output_base + dst_start * value_element_size;
INDEX_TYPE nvals = (dst_end - dst_start) * value_element_size;
copy_array<VALUE_TYPE, INDEX_TYPE>(dst, src, nvals);
}
if (src_i >= output_index_size) {
size_t output_size = output_tensor->NumElements();
dst_i = output_size / value_element_size;
}
if (dst_i > dst_end) {
if (default_value_tensor.NumElements() == 1) {
std::fill(output_base + dst_end * value_element_size,
output_base + dst_i * value_element_size, *default_value);
dst_end = dst_i;
} else {
while (dst_i > dst_end) {
VALUE_TYPE* dst = output_base + dst_end * value_element_size;
copy_array<VALUE_TYPE, INDEX_TYPE>(dst, default_value,
value_element_size);
++dst_end;
}
}
}
if (dst_i < 0) {
src_start = src_i + 1;
dst_start = dst_end;
} else {
src_start = src_i;
dst_start = dst_end;
dst_end = dst_start + 1;
}
}
}
};
#define REGISTER_CPU_KERNEL_INDEX_TYPE(value_type, index_type) \
REGISTER_KERNEL_BUILDER(Name("RaggedTensorToTensor") \
.Device(DEVICE_CPU) \
.TypeConstraint<value_type>("T") \
.TypeConstraint<index_type>("Tindex"), \
RaggedTensorToTensorOp<value_type, index_type>);
#define REGISTER_CPU_KERNEL(value_type) \
REGISTER_CPU_KERNEL_INDEX_TYPE(value_type, int64_t); \
REGISTER_CPU_KERNEL_INDEX_TYPE(value_type, tensorflow::int32);
TF_CALL_POD_TYPES(REGISTER_CPU_KERNEL);
TF_CALL_string(REGISTER_CPU_KERNEL);
TF_CALL_QUANTIZED_TYPES(REGISTER_CPU_KERNEL);
TF_CALL_quint16(REGISTER_CPU_KERNEL);
TF_CALL_qint16(REGISTER_CPU_KERNEL);
#undef REGISTER_CPU_KERNEL
}
} | #include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
template <typename VALUE_TYPE>
struct ShapeAndValues {
TensorShape shape;
std::vector<VALUE_TYPE> values;
};
template <typename VALUE_TYPE>
ShapeAndValues<VALUE_TYPE> createVector(const std::vector<VALUE_TYPE>& values) {
TensorShape shape({static_cast<int64_t>(values.size())});
return {shape, values};
}
template <typename VALUE_TYPE>
ShapeAndValues<VALUE_TYPE> createScalar(const VALUE_TYPE& values) {
TensorShape shape({});
return {shape, {values}};
}
class RaggedTensorToTensorOpTest : public ::tensorflow::OpsTestBase {
protected:
template <typename VALUE_TYPE, typename INDEX_TYPE>
void BuildRaggedTensorToTensorGraph(
const TensorShape& shape, const std::vector<string>& row_partition_types,
const ShapeAndValues<VALUE_TYPE>& values,
const ShapeAndValues<VALUE_TYPE>& default_value,
const std::vector<ShapeAndValues<INDEX_TYPE>>& row_partition_tensors) {
const auto& value_dtype = DataTypeToEnum<VALUE_TYPE>::v();
const auto& index_dtype = DataTypeToEnum<INDEX_TYPE>::v();
int num_row_partition_tensors = row_partition_tensors.size();
TF_ASSERT_OK(
NodeDefBuilder("tested_op", "RaggedTensorToTensor")
.Attr("T", value_dtype)
.Attr("Tindex", index_dtype)
.Attr("num_row_partition_tensors", num_row_partition_tensors)
.Attr("row_partition_types", row_partition_types)
.Input(FakeInput(index_dtype))
.Input(FakeInput(value_dtype))
.Input(FakeInput(value_dtype))
.Input(FakeInput(num_row_partition_tensors,
index_dtype))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
{
std::vector<INDEX_TYPE> shape_as_vector;
for (const auto& dim : shape.dim_sizes()) {
shape_as_vector.push_back(dim);
}
ShapeAndValues<INDEX_TYPE> shape_as_tensor =
createVector(shape_as_vector);
AddInputFromArray<INDEX_TYPE>(shape_as_tensor.shape,
shape_as_tensor.values);
}
AddInputFromArray<VALUE_TYPE>(values.shape, values.values);
AddInputFromArray<VALUE_TYPE>(default_value.shape, default_value.values);
for (const auto& row_partition_tensor : row_partition_tensors) {
AddInputFromArray<INDEX_TYPE>(row_partition_tensor.shape,
row_partition_tensor.values);
}
}
};
TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensor) {
BuildRaggedTensorToTensorGraph<float, int32>(
TensorShape({4, 4}),
{"FIRST_DIM_SIZE", "VALUE_ROWIDS"},
createVector<float>({.1, .2, .3, .4, .5, .6, .7, .8, .9}),
createScalar<float>(1.5),
{createScalar<int32>(4), createVector<int32>({0, 0, 0, 2, 2, 2, 2, 3, 3})}
);
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorNear<float>(
*GetOutput(0),
test::AsTensor<float>({.1, .2, .3, 1.5, 1.5, 1.5, 1.5, 1.5, .4, .5, .6,
.7, .8, .9, 1.5, 1.5},
TensorShape({4, 4})),
0.01);
}
TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensorRowSplits) {
BuildRaggedTensorToTensorGraph<float, int32>(
TensorShape({4, 4}),
{"ROW_SPLITS"},
createVector<float>({.1, .2, .3, .4, .5, .6, .7, .8, .9}),
createScalar<float>(1.5),
{createVector<int32>({0, 3, 3, 7, 9})}
);
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorNear<float>(
*GetOutput(0),
test::AsTensor<float>({.1, .2, .3, 1.5, 1.5, 1.5, 1.5, 1.5, .4, .5, .6,
.7, .8, .9, 1.5, 1.5},
TensorShape({4, 4})),
0.01);
}
TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensor_3DParams) {
BuildRaggedTensorToTensorGraph<float, int32>(
TensorShape({5, 2, 3}),
{"FIRST_DIM_SIZE", "VALUE_ROWIDS",
"VALUE_ROWIDS"},
createVector<float>({.1, .2, .3, .4, .5, .6, .7, .8, .9}),
createScalar<float>(1.5),
{
createScalar<int32>(5),
createVector<int32>({0, 1, 1, 3, 3, 4}),
createVector<int32>({1, 1, 2, 3, 3, 4, 4, 4, 5}),
}
);
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorNear<float>(
*GetOutput(0),
test::AsTensor<float>({1.5, 1.5, 1.5, 1.5, 1.5, 1.5, .1, .2, 1.5, .3,
1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, .4, .5,
1.5, .6, .7, .8, .9, 1.5, 1.5, 1.5, 1.5, 1.5},
TensorShape({5, 2, 3})),
0.1);
}
TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensor_3DParamsRowSplits) {
BuildRaggedTensorToTensorGraph<float, int32>(
TensorShape({5, 2, 3}),
{"ROW_SPLITS", "ROW_SPLITS"},
createVector<float>({.1, .2, .3, .4, .5, .6, .7, .8, .9}),
createScalar<float>(1.5),
{
createVector<int32>({0, 1, 3, 3, 5, 6}),
createVector<int32>({0, 0, 2, 3, 5, 8, 9}),
}
);
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorNear<float>(
*GetOutput(0),
test::AsTensor<float>({1.5, 1.5, 1.5, 1.5, 1.5, 1.5, .1, .2, 1.5, .3,
1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, .4, .5,
1.5, .6, .7, .8, .9, 1.5, 1.5, 1.5, 1.5, 1.5},
TensorShape({5, 2, 3})),
0.1);
}
TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensor_3DParamsRowSplits2) {
BuildRaggedTensorToTensorGraph<int64_t, int64_t>(
TensorShape({3, 2, 3}),
{"ROW_SPLITS", "ROW_SPLITS"},
createVector<int64_t>({0, 1, 2, 3}),
createScalar<int64_t>(5),
{
createVector<int64_t>({0, 2, 2, 3}),
createVector<int64_t>({0, 3, 3, 4}),
}
);
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*GetOutput(0), test::AsTensor<int64_t>(
{0, 1, 2, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 5, 5, 5, 5, 5},
TensorShape({3, 2, 3})));
}
TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensor_4DParams) {
BuildRaggedTensorToTensorGraph<int32, int32>(
TensorShape({4, 2, 3, 2}),
{"FIRST_DIM_SIZE", "VALUE_ROWIDS", "VALUE_ROWIDS",
"VALUE_ROWIDS"},
createVector<int32>({1, 2, 3, 4, 5, 6, 7, 8}),
createScalar<int32>(15),
{createScalar<int32>(5), createVector<int32>({0, 1, 1}),
createVector<int32>({1, 1, 1, 2}),
createVector<int32>({0, 0, 1, 1, 2, 2, 3, 3})}
);
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int32>(
*GetOutput(0),
test::AsTensor<int32>(
{15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 1, 2, 3, 4,
5, 6, 7, 8, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15},
TensorShape({4, 2, 3, 2})));
}
TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensor_4DParamsRowSplit) {
BuildRaggedTensorToTensorGraph<int32, int32>(
TensorShape({4, 2, 3, 2}),
{"ROW_SPLITS", "ROW_SPLITS", "ROW_SPLITS"},
createVector<int32>({1, 2, 3, 4, 5, 6, 7, 8}),
createScalar<int32>(15),
{createVector<int32>({0, 1, 3}), createVector<int32>({0, 0, 3, 4}),
createVector<int32>({0, 2, 4, 6, 8})}
);
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int32>(
*GetOutput(0),
test::AsTensor<int32>(
{15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 1, 2, 3, 4,
5, 6, 7, 8, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15},
TensorShape({4, 2, 3, 2})));
}
TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensorContractExpanded) {
BuildRaggedTensorToTensorGraph<float, int32>(
TensorShape({3, 5}),
{"FIRST_DIM_SIZE", "VALUE_ROWIDS"},
createVector<float>({.1, .2, .3, .4, .5, .6, .7, .8, .9}),
createScalar<float>(1.5),
{createScalar<int32>(4), createVector<int32>({0, 0, 0, 2, 2, 2, 2, 3, 3})}
);
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorNear<float>(
*GetOutput(0),
test::AsTensor<float>({.1, .2, .3, 1.5, 1.5,
1.5, 1.5, 1.5, 1.5, 1.5,
.4, .5, .6, .7, 1.5},
TensorShape({3, 5})),
0.01);
}
TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensorContractExpandedDense) {
BuildRaggedTensorToTensorGraph<float, int32>(
TensorShape({3, 5, 2}),
{"FIRST_DIM_SIZE", "VALUE_ROWIDS"},
ShapeAndValues<float>{TensorShape({9, 2}),
{.1, 1.1, .2, 1.2, .3, 1.3, .4, 1.4, .5, 1.5, .6,
1.6, .7, 1.7, .8, 1.8, .9, 1.9}},
createScalar<float>(1.5),
{createScalar<int32>(4), createVector<int32>({0, 0, 0, 2, 2, 2, 2, 3, 3})}
);
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorNear<float>(
*GetOutput(0),
test::AsTensor<float>(
{.1, 1.1, .2, 1.2, .3, 1.3, 1.5, 1.5, 1.5, 1.5,
1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5,
.4, 1.4, .5, 1.5, .6, 1.6, .7, 1.7, 1.5, 1.5},
TensorShape({3, 5, 2})),
0.01);
}
TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensorConstrained) {
BuildRaggedTensorToTensorGraph<float, int32>(
TensorShape({3, 3}),
{"FIRST_DIM_SIZE", "VALUE_ROWIDS"},
createVector<float>({.1, .2, .3, .4, .5, .6, .7, .8, .9}),
createScalar<float>(1.5),
{createScalar<int32>(4), createVector<int32>({0, 0, 0, 2, 2, 2, 2, 3, 3})}
);
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorNear<float>(*GetOutput(0),
test::AsTensor<float>(
{
.1, .2, .3,
1.5, 1.5, 1.5,
.4, .5, .6
},
TensorShape({3, 3})),
0.01);
}
TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensor_3DParamsConstrained) {
BuildRaggedTensorToTensorGraph<float, int32>(
TensorShape({4, 1, 2}),
{"FIRST_DIM_SIZE", "VALUE_ROWIDS",
"VALUE_ROWIDS"},
createVector<float>({.1, .2, .3, .4, .5, .6, .7, .8, .9}),
createScalar<float>(1.5),
{
createScalar<int32>(5),
createVector<int32>({0, 1, 1, 3, 3, 4}),
createVector<int32>({1, 1, 2, 3, 3, 4, 4, 4, 5}),
}
);
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorNear<float>(
*GetOutput(0),
test::AsTensor<float>({1.5, 1.5, .1, .2, 1.5, 1.5, .4, .5},
TensorShape({4, 1, 2})),
0.01);
}
TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensor_4DParamsConstrained) {
BuildRaggedTensorToTensorGraph<int32, int32>(
TensorShape({2, 2, 2, 2}),
{"FIRST_DIM_SIZE", "VALUE_ROWIDS", "VALUE_ROWIDS",
"VALUE_ROWIDS"},
createVector<int32>({1, 2, 3, 4, 5, 6, 7, 8}),
createScalar<int32>(15),
{createScalar<int32>(5), createVector<int32>({0, 1, 1}),
createVector<int32>({1, 1, 1, 2}),
createVector<int32>({0, 0, 1, 1, 2, 2, 3, 3})}
);
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int32>(*GetOutput(0), test::AsTensor<int32>(
{
15, 15, 15, 15,
15, 15, 15, 15,
1, 2, 3, 4,
7, 8, 15, 15,
},
TensorShape({2, 2, 2, 2})));
}
TEST_F(RaggedTensorToTensorOpTest, ShapeWrongDimensions) {
BuildRaggedTensorToTensorGraph<int32, int32>(
TensorShape({10, 7, 10, 20}),
{"FIRST_DIM_SIZE", "VALUE_ROWIDS",
"VALUE_ROWIDS"},
createVector<int32>({1, 2, 3, 4}),
createScalar<int32>(15),
{createScalar<int32>(5), createVector<int32>({0, 1, 1}),
createVector<int32>({1, 1, 1, 2})}
);
EXPECT_EQ(errors::IsInvalidArgument(RunOpKernel()), true);
}
class RaggedTensorToTensorOpUnknownShapeTest
: public ::tensorflow::OpsTestBase {
protected:
std::unique_ptr<ShapeInferenceTestOp> op_;
void SetAttributes(const absl::Span<const string> row_partition_types,
int num_row_partition_tensors) {
op_ = std::make_unique<ShapeInferenceTestOp>("RaggedTensorToTensor");
SetAttrValue(row_partition_types,
&((*op_->node_def.mutable_attr())["row_partition_types"]));
(*op_->node_def.mutable_attr())["num_row_partition_tensors"].set_i(
num_row_partition_tensors);
}
};
TEST_F(RaggedTensorToTensorOpUnknownShapeTest, ValueRowIDs) {
SetAttributes(absl::Span<const string>{"FIRST_DIM_SIZE", "VALUE_ROWIDS"}, 2);
INFER_OK(*op_, "?;?;?;?;?", "?");
INFER_OK(*op_, "?;[6];[];[];[6]", "[?,?]");
INFER_OK(*op_, "?;[6];?;[];[6]", "[?,?]");
INFER_OK(*op_, "?;?;[];[];[6]", "?");
INFER_OK(*op_, "?;[6];?;[];[6]", "[?,?]");
INFER_OK(*op_, "?;[6,2];?;[];[6]", "[?,?,2]");
INFER_OK(*op_, "?;[6,2];[2];[];[6]", "[?,?,2]");
INFER_OK(*op_, "?;[6,2,7];[2,7];[];[6]", "[?,?,2,7]");
INFER_ERROR(
"default_value.shape=[3] and rt_input.flat_values.shape=[6,2] "
"are incompatible",
*op_, "?;[6,2];[3];[];[6]");
INFER_ERROR(
"default_value.shape=[2,2] and rt_input.flat_values.shape="
"[6,2,1,2] are incompatible",
*op_, "?;[6,2,1,2];[2,2];[];[6]");
INFER_ERROR("must be a vector", *op_, "?;[6];[];[];[3,6]");
INFER_ERROR("must be a scalar", *op_, "?;[6];[];[7];[3]");
}
TEST_F(RaggedTensorToTensorOpUnknownShapeTest, RowSplits) {
SetAttributes(absl::Span<const string>{"ROW_SPLITS"}, 1);
INFER_OK(*op_, "?;?;?;?", "?");
INFER_OK(*op_, "?;[3];[];[6]", "[?,?]");
INFER_OK(*op_, "?;?;?;?", "?");
INFER_OK(*op_, "?;[3,2];[2];[6]", "[?,?,2]");
INFER_OK(*op_, "?;[3,2,7];[2,7];[6]", "[?,?,2,7]");
INFER_OK(*op_, "?;[3,2,7];[2,7];[6]", "[?,?,2,7]");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_tensor_to_tensor_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7efae547-41ee-469e-b08b-82b375f6c277 | cpp | tensorflow/tensorflow | label_image | tensorflow/lite/examples/label_image/label_image.cc | tensorflow/lite/examples/label_image/label_image_test.cc | #include "tensorflow/lite/examples/label_image/label_image.h"
#include <fcntl.h>
#include <getopt.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <unistd.h>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/examples/label_image/bitmap_helpers.h"
#include "tensorflow/lite/examples/label_image/get_top_n.h"
#include "tensorflow/lite/examples/label_image/log.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/optional_debug_tools.h"
#include "tensorflow/lite/profiling/profile_buffer.h"
#include "tensorflow/lite/profiling/profiler.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace label_image {
double get_us(struct timeval t) { return (t.tv_sec * 1000000 + t.tv_usec); }
using TfLiteDelegatePtr = tflite::Interpreter::TfLiteDelegatePtr;
using ProvidedDelegateList = tflite::tools::ProvidedDelegateList;
class DelegateProviders {
public:
DelegateProviders() : delegate_list_util_(¶ms_) {
delegate_list_util_.AddAllDelegateParams();
delegate_list_util_.AppendCmdlineFlags(flags_);
params_.RemoveParam("help");
delegate_list_util_.RemoveCmdlineFlag(flags_, "help");
}
bool InitFromCmdlineArgs(int* argc, const char** argv) {
return Flags::Parse(argc, argv, flags_);
}
void MergeSettingsIntoParams(const Settings& s) {
if (s.gl_backend) {
if (!params_.HasParam("use_gpu")) {
LOG(WARN) << "GPU delegate execution provider isn't linked or GPU "
"delegate isn't supported on the platform!";
} else {
params_.Set<bool>("use_gpu", true);
if (params_.HasParam("gpu_inference_for_sustained_speed")) {
params_.Set<bool>("gpu_inference_for_sustained_speed", true);
}
params_.Set<bool>("gpu_precision_loss_allowed", s.allow_fp16);
}
}
if (s.accel) {
if (!params_.HasParam("use_nnapi")) {
LOG(WARN) << "NNAPI delegate execution provider isn't linked or NNAPI "
"delegate isn't supported on the platform!";
} else {
params_.Set<bool>("use_nnapi", true);
params_.Set<bool>("nnapi_allow_fp16", s.allow_fp16);
}
}
if (s.hexagon_delegate) {
if (!params_.HasParam("use_hexagon")) {
LOG(WARN) << "Hexagon delegate execution provider isn't linked or "
"Hexagon delegate isn't supported on the platform!";
} else {
params_.Set<bool>("use_hexagon", true);
params_.Set<bool>("hexagon_profiling", s.profiling);
}
}
if (s.xnnpack_delegate) {
if (!params_.HasParam("use_xnnpack")) {
LOG(WARN) << "XNNPACK delegate execution provider isn't linked or "
"XNNPACK delegate isn't supported on the platform!";
} else {
params_.Set<bool>("use_xnnpack", true);
params_.Set<int32_t>("num_threads", s.number_of_threads);
}
}
}
std::vector<ProvidedDelegateList::ProvidedDelegate> CreateAllDelegates()
const {
return delegate_list_util_.CreateAllRankedDelegates();
}
std::string GetHelpMessage(const std::string& cmdline) const {
return Flags::Usage(cmdline, flags_);
}
private:
tflite::tools::ToolParams params_;
ProvidedDelegateList delegate_list_util_;
std::vector<tflite::Flag> flags_;
};
TfLiteStatus ReadLabelsFile(const string& file_name,
std::vector<string>* result,
size_t* found_label_count) {
std::ifstream file(file_name);
if (!file) {
LOG(ERROR) << "Labels file " << file_name << " not found";
return kTfLiteError;
}
result->clear();
string line;
while (std::getline(file, line)) {
result->push_back(line);
}
*found_label_count = result->size();
const int padding = 16;
while (result->size() % padding) {
result->emplace_back();
}
return kTfLiteOk;
}
void PrintProfilingInfo(const profiling::ProfileEvent* e,
uint32_t subgraph_index, uint32_t op_index,
TfLiteRegistration registration) {
LOG(INFO) << std::fixed << std::setw(10) << std::setprecision(3)
<< (e->elapsed_time) / 1000.0 << ", Subgraph " << std::setw(3)
<< std::setprecision(3) << subgraph_index << ", Node "
<< std::setw(3) << std::setprecision(3) << op_index << ", OpCode "
<< std::setw(3) << std::setprecision(3) << registration.builtin_code
<< ", "
<< EnumNameBuiltinOperator(
static_cast<BuiltinOperator>(registration.builtin_code));
}
void RunInference(Settings* settings,
const DelegateProviders& delegate_providers) {
if (!settings->model_name.c_str()) {
LOG(ERROR) << "no model file name";
exit(-1);
}
std::unique_ptr<tflite::FlatBufferModel> model;
std::unique_ptr<tflite::Interpreter> interpreter;
model = tflite::FlatBufferModel::BuildFromFile(settings->model_name.c_str());
if (!model) {
LOG(ERROR) << "Failed to mmap model " << settings->model_name;
exit(-1);
}
settings->model = model.get();
LOG(INFO) << "Loaded model " << settings->model_name;
model->error_reporter();
LOG(INFO) << "resolved reporter";
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder(*model, resolver)(&interpreter);
if (!interpreter) {
LOG(ERROR) << "Failed to construct interpreter";
exit(-1);
}
interpreter->SetAllowFp16PrecisionForFp32(settings->allow_fp16);
if (settings->verbose) {
LOG(INFO) << "tensors size: " << interpreter->tensors_size();
LOG(INFO) << "nodes size: " << interpreter->nodes_size();
LOG(INFO) << "inputs: " << interpreter->inputs().size();
LOG(INFO) << "input(0) name: " << interpreter->GetInputName(0);
int t_size = interpreter->tensors_size();
for (int i = 0; i < t_size; i++) {
if (interpreter->tensor(i)->name)
LOG(INFO) << i << ": " << interpreter->tensor(i)->name << ", "
<< interpreter->tensor(i)->bytes << ", "
<< interpreter->tensor(i)->type << ", "
<< interpreter->tensor(i)->params.scale << ", "
<< interpreter->tensor(i)->params.zero_point;
}
}
if (settings->number_of_threads != -1) {
interpreter->SetNumThreads(settings->number_of_threads);
}
int image_width = 224;
int image_height = 224;
int image_channels = 3;
std::vector<uint8_t> in = read_bmp(settings->input_bmp_name, &image_width,
&image_height, &image_channels, settings);
int input = interpreter->inputs()[0];
if (settings->verbose) LOG(INFO) << "input: " << input;
const std::vector<int> inputs = interpreter->inputs();
const std::vector<int> outputs = interpreter->outputs();
if (settings->verbose) {
LOG(INFO) << "number of inputs: " << inputs.size();
LOG(INFO) << "number of outputs: " << outputs.size();
}
auto profiler = std::make_unique<profiling::Profiler>(
settings->max_profiling_buffer_entries);
interpreter->SetProfiler(profiler.get());
auto delegates = delegate_providers.CreateAllDelegates();
for (auto& delegate : delegates) {
const auto delegate_name = delegate.provider->GetName();
if (interpreter->ModifyGraphWithDelegate(std::move(delegate.delegate)) !=
kTfLiteOk) {
LOG(ERROR) << "Failed to apply " << delegate_name << " delegate.";
exit(-1);
} else {
LOG(INFO) << "Applied " << delegate_name << " delegate.";
}
}
if (interpreter->AllocateTensors() != kTfLiteOk) {
LOG(ERROR) << "Failed to allocate tensors!";
exit(-1);
}
if (settings->verbose) PrintInterpreterState(interpreter.get());
TfLiteIntArray* dims = interpreter->tensor(input)->dims;
int wanted_height = dims->data[1];
int wanted_width = dims->data[2];
int wanted_channels = dims->data[3];
settings->input_type = interpreter->tensor(input)->type;
switch (settings->input_type) {
case kTfLiteFloat32:
resize<float>(interpreter->typed_tensor<float>(input), in.data(),
image_height, image_width, image_channels, wanted_height,
wanted_width, wanted_channels, settings);
break;
case kTfLiteInt8:
resize<int8_t>(interpreter->typed_tensor<int8_t>(input), in.data(),
image_height, image_width, image_channels, wanted_height,
wanted_width, wanted_channels, settings);
break;
case kTfLiteUInt8:
resize<uint8_t>(interpreter->typed_tensor<uint8_t>(input), in.data(),
image_height, image_width, image_channels, wanted_height,
wanted_width, wanted_channels, settings);
break;
default:
LOG(ERROR) << "cannot handle input type "
<< interpreter->tensor(input)->type << " yet";
exit(-1);
}
if (settings->profiling) profiler->StartProfiling();
for (int i = 0; i < settings->number_of_warmup_runs; i++) {
if (interpreter->Invoke() != kTfLiteOk) {
LOG(ERROR) << "Failed to invoke tflite!";
exit(-1);
}
}
struct timeval start_time, stop_time;
gettimeofday(&start_time, nullptr);
for (int i = 0; i < settings->loop_count; i++) {
if (interpreter->Invoke() != kTfLiteOk) {
LOG(ERROR) << "Failed to invoke tflite!";
exit(-1);
}
}
gettimeofday(&stop_time, nullptr);
LOG(INFO) << "invoked";
LOG(INFO) << "average time: "
<< (get_us(stop_time) - get_us(start_time)) /
(settings->loop_count * 1000)
<< " ms";
if (settings->profiling) {
profiler->StopProfiling();
auto profile_events = profiler->GetProfileEvents();
for (int i = 0; i < profile_events.size(); i++) {
auto subgraph_index = profile_events[i]->extra_event_metadata;
auto op_index = profile_events[i]->event_metadata;
const auto subgraph = interpreter->subgraph(subgraph_index);
const auto node_and_registration =
subgraph->node_and_registration(op_index);
const TfLiteRegistration registration = node_and_registration->second;
PrintProfilingInfo(profile_events[i], subgraph_index, op_index,
registration);
}
}
const float threshold = 0.001f;
std::vector<std::pair<float, int>> top_results;
int output = interpreter->outputs()[0];
TfLiteIntArray* output_dims = interpreter->tensor(output)->dims;
auto output_size = output_dims->data[output_dims->size - 1];
switch (interpreter->tensor(output)->type) {
case kTfLiteFloat32:
get_top_n<float>(interpreter->typed_output_tensor<float>(0), output_size,
settings->number_of_results, threshold, &top_results,
settings->input_type);
break;
case kTfLiteInt8:
get_top_n<int8_t>(interpreter->typed_output_tensor<int8_t>(0),
output_size, settings->number_of_results, threshold,
&top_results, settings->input_type);
break;
case kTfLiteUInt8:
get_top_n<uint8_t>(interpreter->typed_output_tensor<uint8_t>(0),
output_size, settings->number_of_results, threshold,
&top_results, settings->input_type);
break;
default:
LOG(ERROR) << "cannot handle output type "
<< interpreter->tensor(output)->type << " yet";
exit(-1);
}
std::vector<string> labels;
size_t label_count;
if (ReadLabelsFile(settings->labels_file_name, &labels, &label_count) !=
kTfLiteOk)
exit(-1);
for (const auto& result : top_results) {
const float confidence = result.first;
const int index = result.second;
LOG(INFO) << confidence << ": " << index << " " << labels[index];
}
interpreter.reset();
}
void display_usage(const DelegateProviders& delegate_providers) {
LOG(INFO)
<< "\n"
<< delegate_providers.GetHelpMessage("label_image")
<< "\t--accelerated, -a: [0|1] use Android NNAPI or not\n"
<< "\t--allow_fp16, -f: [0|1], allow running fp32 models with fp16 or "
"not\n"
<< "\t--count, -c: loop interpreter->Invoke() for certain times\n"
<< "\t--gl_backend, -g: [0|1]: use GL GPU Delegate on Android\n"
<< "\t--hexagon_delegate, -j: [0|1]: use Hexagon Delegate on Android\n"
<< "\t--input_mean, -b: input mean\n"
<< "\t--input_std, -s: input standard deviation\n"
<< "\t--image, -i: image_name.bmp\n"
<< "\t--labels, -l: labels for the model\n"
<< "\t--tflite_model, -m: model_name.tflite\n"
<< "\t--profiling, -p: [0|1], profiling or not\n"
<< "\t--num_results, -r: number of results to show\n"
<< "\t--threads, -t: number of threads\n"
<< "\t--verbose, -v: [0|1] print more information\n"
<< "\t--warmup_runs, -w: number of warmup runs\n"
<< "\t--xnnpack_delegate, -x [0:1]: xnnpack delegate\n"
<< "\t--help, -h: Print this help message\n";
}
int Main(int argc, char** argv) {
DelegateProviders delegate_providers;
bool parse_result = delegate_providers.InitFromCmdlineArgs(
&argc, const_cast<const char**>(argv));
if (!parse_result) {
display_usage(delegate_providers);
return EXIT_FAILURE;
}
Settings s;
int c;
while (true) {
static struct option long_options[] = {
{"accelerated", required_argument, nullptr, 'a'},
{"allow_fp16", required_argument, nullptr, 'f'},
{"count", required_argument, nullptr, 'c'},
{"verbose", required_argument, nullptr, 'v'},
{"image", required_argument, nullptr, 'i'},
{"labels", required_argument, nullptr, 'l'},
{"tflite_model", required_argument, nullptr, 'm'},
{"profiling", required_argument, nullptr, 'p'},
{"threads", required_argument, nullptr, 't'},
{"input_mean", required_argument, nullptr, 'b'},
{"input_std", required_argument, nullptr, 's'},
{"num_results", required_argument, nullptr, 'r'},
{"max_profiling_buffer_entries", required_argument, nullptr, 'e'},
{"warmup_runs", required_argument, nullptr, 'w'},
{"gl_backend", required_argument, nullptr, 'g'},
{"hexagon_delegate", required_argument, nullptr, 'j'},
{"xnnpack_delegate", required_argument, nullptr, 'x'},
{"help", no_argument, nullptr, 'h'},
{nullptr, 0, nullptr, 0}};
int option_index = 0;
c = getopt_long(argc, argv, "a:b:c:d:e:f:g:i:j:l:m:p:r:s:t:v:w:x:h",
long_options, &option_index);
if (c == -1) break;
switch (c) {
case 'a':
s.accel = strtol(optarg, nullptr, 10);
break;
case 'b':
s.input_mean = strtod(optarg, nullptr);
break;
case 'c':
s.loop_count =
strtol(optarg, nullptr, 10);
break;
case 'e':
s.max_profiling_buffer_entries =
strtol(optarg, nullptr, 10);
break;
case 'f':
s.allow_fp16 =
strtol(optarg, nullptr, 10);
break;
case 'g':
s.gl_backend =
strtol(optarg, nullptr, 10);
break;
case 'i':
s.input_bmp_name = optarg;
break;
case 'j':
s.hexagon_delegate = optarg;
break;
case 'l':
s.labels_file_name = optarg;
break;
case 'm':
s.model_name = optarg;
break;
case 'p':
s.profiling =
strtol(optarg, nullptr, 10);
break;
case 'r':
s.number_of_results =
strtol(optarg, nullptr, 10);
break;
case 's':
s.input_std = strtod(optarg, nullptr);
break;
case 't':
s.number_of_threads = strtol(
optarg, nullptr, 10);
break;
case 'v':
s.verbose =
strtol(optarg, nullptr, 10);
break;
case 'w':
s.number_of_warmup_runs =
strtol(optarg, nullptr, 10);
break;
case 'x':
s.xnnpack_delegate =
strtol(optarg, nullptr, 10);
break;
case 'h':
case '?':
display_usage(delegate_providers);
exit(-1);
default:
exit(-1);
}
}
delegate_providers.MergeSettingsIntoParams(s);
RunInference(&s, delegate_providers);
return 0;
}
}
}
int main(int argc, char** argv) {
return tflite::label_image::Main(argc, argv);
} | #include "tensorflow/lite/examples/label_image/label_image.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/examples/label_image/bitmap_helpers.h"
#include "tensorflow/lite/examples/label_image/get_top_n.h"
namespace tflite {
namespace label_image {
TEST(LabelImageTest, GraceHopper) {
std::string lena_file =
"tensorflow/lite/examples/label_image/testdata/"
"grace_hopper.bmp";
int height, width, channels;
Settings s;
s.input_type = kTfLiteUInt8;
std::vector<uint8_t> input =
read_bmp(lena_file, &width, &height, &channels, &s);
ASSERT_EQ(height, 606);
ASSERT_EQ(width, 517);
ASSERT_EQ(channels, 3);
std::vector<uint8_t> output(606 * 517 * 3);
resize<uint8_t>(output.data(), input.data(), 606, 517, 3, 214, 214, 3, &s);
ASSERT_EQ(output[0], 0x15);
ASSERT_EQ(output[214 * 214 * 3 - 1], 0x11);
}
TEST(LabelImageTest, GetTopN) {
uint8_t in[] = {1, 1, 2, 2, 4, 4, 16, 32, 128, 64};
std::vector<std::pair<float, int>> top_results;
get_top_n<uint8_t>(in, 10, 5, 0.025, &top_results, kTfLiteUInt8);
ASSERT_EQ(top_results.size(), 4);
ASSERT_EQ(top_results[0].second, 8);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/examples/label_image/label_image.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/examples/label_image/label_image_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
923ae6f7-74bb-4941-b236-d1ca0cc1e804 | cpp | google/tensorstore | metadata | tensorstore/internal/metrics/metadata.cc | tensorstore/internal/metrics/metadata_test.cc | #include "tensorstore/internal/metrics/metadata.h"
#include <cstddef>
#include <string_view>
#include "absl/base/optimization.h"
#include "absl/strings/ascii.h"
namespace tensorstore {
namespace internal_metrics {
bool IsValidMetricName(std::string_view name) {
if (name.size() < 2) return false;
if (name[0] != '/') return false;
if (name[name.size() - 1] == '/') return false;
if (!absl::ascii_isalpha(name[1])) return false;
size_t last_slash = 0;
for (size_t i = 1; i < name.size(); i++) {
const auto ch = name[i];
if (ch == '/') {
if (i - last_slash == 1) return false;
if (i - last_slash > 63) return false;
last_slash = i;
} else if (ch != '_' && !absl::ascii_isalnum(ch)) {
return false;
}
}
return true;
}
bool IsValidMetricLabel(std::string_view name) {
if (name.empty()) return false;
if (!absl::ascii_isalpha(name[0])) return false;
for (auto ch : name) {
if (ch != '_' && !absl::ascii_isalnum(ch)) {
return false;
}
}
return true;
}
std::string_view UnitsToString(Units units) {
switch (units) {
case Units::kUnknown:
return {};
case Units::kSeconds:
return "seconds";
case Units::kMilliseconds:
return "milliseconds";
case Units::kMicroseconds:
return "microseconds";
case Units::kNanoseconds:
return "nanoseconds";
case Units::kBits:
return "bits";
case Units::kBytes:
return "bytes";
case Units::kKilobytes:
return "kilobytes";
case Units::kMegabytes:
return "megabytes";
}
ABSL_UNREACHABLE();
}
}
} | #include "tensorstore/internal/metrics/metadata.h"
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal_metrics::IsValidMetricLabel;
using ::tensorstore::internal_metrics::IsValidMetricName;
using ::tensorstore::internal_metrics::Units;
using ::tensorstore::internal_metrics::UnitsToString;
TEST(MetadataTest, IsValidMetricName) {
EXPECT_FALSE(IsValidMetricName(""));
EXPECT_FALSE(IsValidMetricName("/"));
EXPECT_FALSE(IsValidMetricName("
EXPECT_FALSE(IsValidMetricName("/foo/"));
EXPECT_FALSE(IsValidMetricName("/foo
EXPECT_FALSE(IsValidMetricName("/_foo"));
EXPECT_FALSE(IsValidMetricName("/foo%"));
EXPECT_FALSE(IsValidMetricName("/foo%"));
EXPECT_FALSE(IsValidMetricName("/foo.bar"));
EXPECT_FALSE(IsValidMetricName("foo_1"));
EXPECT_TRUE(IsValidMetricName("/foo/1_bar/Baz"));
}
TEST(MetadataTest, IsValidMetricLabel) {
EXPECT_FALSE(IsValidMetricLabel(""));
EXPECT_FALSE(IsValidMetricLabel("/"));
EXPECT_FALSE(IsValidMetricLabel("1_bar"));
EXPECT_FALSE(IsValidMetricLabel("_bar"));
EXPECT_FALSE(IsValidMetricLabel("foo/bar"));
EXPECT_FALSE(IsValidMetricLabel("foo-bar"));
EXPECT_FALSE(IsValidMetricLabel("foo.bar"));
EXPECT_TRUE(IsValidMetricLabel("a"));
EXPECT_TRUE(IsValidMetricLabel("foB_1"));
}
TEST(MetadataTest, UnitsToString) {
EXPECT_THAT(UnitsToString(Units::kUnknown), ::testing::IsEmpty());
EXPECT_THAT(UnitsToString(Units::kSeconds), ::testing::Eq("seconds"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/metrics/metadata.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/metrics/metadata_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f03b26a7-efe8-47dd-867f-fb78537061cf | cpp | tensorflow/tensorflow | tf_rendezvous_c_api | tensorflow/core/common_runtime/next_pluggable_device/c/tf_rendezvous_c_api.h | tensorflow/core/common_runtime/next_pluggable_device/c/tf_rendezvous_c_api_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_NEXT_PLUGGABLE_DEVICE_C_TF_RENDEZVOUS_C_API_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_NEXT_PLUGGABLE_DEVICE_C_TF_RENDEZVOUS_C_API_H_
#include <stdint.h>
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_tensor.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct TF_DeviceContext TF_DeviceContext;
typedef struct TFDevice_AllocatorAttributes {
uint32_t value;
int32_t scope_id;
} TFDevice_AllocatorAttributes;
typedef struct TFE_CancellationManager TFE_CancellationManager;
typedef struct TF_RendezvousArgsStruct {
TF_DeviceContext* device_context;
TFDevice_AllocatorAttributes alloc_attrs;
TFE_CancellationManager* cancellation_manager;
} TF_RendezvousArgsStruct;
typedef struct TF_RendezvousParsedKey {
char* full_key;
uint32_t full_key_size;
} TF_RendezvousParsedKey;
typedef struct TF_RendezvousSend_Params {
const TF_RendezvousParsedKey* key;
const TF_RendezvousArgsStruct* args;
TF_Tensor* tensor;
bool is_dead;
TF_Status* status;
} TF_RendezvousSend_Params;
typedef void (*TF_RendezvousSend_Function)(void*, TF_RendezvousSend_Params*);
typedef struct TF_RendezvousDoneCallback_Params {
void* context;
const TF_Status* status;
const TF_Tensor* tensor;
bool is_dead;
} TF_RendezvousDoneCallback_Params;
typedef void (*TF_RendezvousDoneCallback_Function)(
void*, TF_RendezvousDoneCallback_Params*);
typedef struct TF_RendezvousDoneCallbackImpl {
void* context;
TF_RendezvousDoneCallback_Function callback;
} TF_RendezvousDoneCallbackImpl;
typedef struct TF_RendezvousAsyncRecv_Params {
void* context;
const TF_RendezvousParsedKey* key;
const TF_RendezvousArgsStruct* args;
TF_RendezvousDoneCallbackImpl on_done;
} TF_RendezvousAsyncRecv_Params;
typedef void (*TF_RendezvousAsyncRecv_Function)(void*,
TF_RendezvousAsyncRecv_Params*);
typedef void (*TF_RendezvousStartAbort_Function)(void* context,
const TF_Status*);
typedef struct TF_RendezvousThunk {
void* rendezvous;
TF_RendezvousSend_Function send_func;
TF_RendezvousAsyncRecv_Function async_recv_func;
TF_RendezvousStartAbort_Function start_abort_func;
} TF_RendezvousThunk;
#ifdef __cplusplus
}
#endif
#endif | #include "tensorflow/core/common_runtime/next_pluggable_device/c/tf_rendezvous_c_api.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/notification.h"
#include "xla/tsl/framework/allocator.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/next_pluggable_device/c/tf_rendezvous_c_api_helper.h"
#include "tensorflow/core/common_runtime/next_pluggable_device/c/tf_rendezvous_c_api_internal.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/control_flow.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/rendezvous.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace {
Tensor CreateTestTensor() {
Tensor t(DT_INT8, TensorShape({10, 20}));
for (int64_t a = 0; a < t.shape().dim_size(0); a++) {
for (int64_t b = 0; b < t.shape().dim_size(1); b++) {
t.matrix<int8>()(a, b) = static_cast<int8>((a + 1) * (b + 1));
}
}
return t;
}
class FakeAllocator : public Allocator {
public:
std::string Name() override { return "fake"; }
void* AllocateRaw(size_t alignment, size_t num_bytes) override {
return port::AlignedMalloc(num_bytes, alignment);
}
void DeallocateRaw(void* ptr) override { return port::AlignedFree(ptr); }
};
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override {
return allocator_.get();
}
static std::unique_ptr<Device> Make(absl::string_view name,
absl::string_view type) {
DeviceAttributes device_attributes;
device_attributes.set_name(std::string(name));
device_attributes.set_device_type(std::string(type));
return std::unique_ptr<Device>(new FakeDevice(device_attributes));
}
private:
std::unique_ptr<FakeAllocator> allocator_ = std::make_unique<FakeAllocator>();
};
class FakeDeviceManager : public DeviceMgr {
public:
void ListDeviceAttributes(
std::vector<DeviceAttributes>* devices) const override {
devices->clear();
}
std::vector<Device*> ListDevices() const override {
return std::vector<Device*>();
}
std::string DebugString() const override { return ""; }
std::string DeviceMappingString() const override { return ""; }
absl::Status LookupDevice(StringPiece name, Device** device) const override {
*device = fake_device_.get();
return absl::OkStatus();
}
bool ContainsDevice(int64_t device_incarnation) const override {
return false;
}
void ClearContainers(absl::Span<const string> containers) const override {}
int NumDeviceType(const string& type) const override { return 0; }
int NumDevices() const override { return 0; }
Device* HostCPU() const override { return nullptr; }
private:
std::unique_ptr<Device> fake_device_ = FakeDevice::Make("/cpu:0", "fake");
};
class TestDeviceContext : public DeviceContext {
public:
void CopyCPUTensorToDevice(const Tensor* cpu_tensor, Device* device,
Tensor* device_tensor, StatusCallback done,
bool sync_dst_compute) const override {
Tensor test_tensor = CreateTestTensor();
test::ExpectTensorEqual<int8>(test_tensor, *cpu_tensor);
done(absl::OkStatus());
}
void CopyDeviceTensorToCPU(const Tensor* device_tensor,
absl::string_view tensor_name, Device* device,
Tensor* cpu_tensor, StatusCallback done) override {
*cpu_tensor = CreateTestTensor();
done(absl::OkStatus());
}
void CopyTensorInSameDevice(const Tensor* input_tensor, Device* device,
Tensor* output_tensor,
tsl::StatusCallback done) const override {
done(absl::InternalError("TPU->TPU copy not implemented."));
}
};
std::string CreateRendezvousKey(bool to_host) {
const std::string task_prefix = "/job:worker/replica:0/task:0";
const std::string src_device = to_host ? "/device:TPU:0" : "/device:CPU:0";
const std::string dst_device = to_host ? "/device:CPU:0" : "/device:TPU:0";
const std::string rendezvous_key_base = "rendezvous_key_base";
return Rendezvous::CreateKey(absl::StrCat(task_prefix, src_device),
1,
absl::StrCat(task_prefix, dst_device),
rendezvous_key_base, FrameAndIter(0, 0));
}
TEST(RendezvousCAPI, DeviceToHost) {
auto device_manager = std::make_unique<FakeDeviceManager>();
core::RefCountPtr<Rendezvous> rendezvous = core::RefCountPtr<Rendezvous>(
new IntraProcessRendezvous(device_manager.get()));
core::RefCountPtr<TestDeviceContext> device_context =
core::RefCountPtr<TestDeviceContext>(new TestDeviceContext());
std::string key = CreateRendezvousKey(true);
Rendezvous::ParsedKey parsed_key;
TF_ASSERT_OK(Rendezvous::ParseKey(key, &parsed_key));
TF_RendezvousThunk* thunk = ToC(rendezvous.get());
std::unique_ptr<tensorflow::RendezvousInterface> thunk_rendezvous =
FromC(thunk);
Rendezvous::Args send_args;
send_args.device_context = device_context.get();
TF_CHECK_OK(thunk_rendezvous->Send(parsed_key, send_args, Tensor(), false));
Tensor result;
absl::Notification callback_done;
Rendezvous::Args recv_args;
recv_args.device_context = device_context.get();
recv_args.alloc_attrs.set_on_host(true);
rendezvous->RecvAsync(parsed_key, recv_args,
[&](const absl::Status& status,
const RefCountedIntraProcessRendezvous::Args&,
const RefCountedIntraProcessRendezvous::Args&,
const Tensor& tensor, const bool) {
TF_ASSERT_OK(status);
result = tensor;
callback_done.Notify();
});
callback_done.WaitForNotification();
Tensor test_tensor = CreateTestTensor();
test::ExpectTensorEqual<int8>(test_tensor, result);
Destroy(thunk);
delete thunk;
}
TEST(RendezvousCAPI, HostToDevice) {
auto device_manager = std::make_unique<FakeDeviceManager>();
core::RefCountPtr<Rendezvous> rendezvous = core::RefCountPtr<Rendezvous>(
new IntraProcessRendezvous(device_manager.get()));
core::RefCountPtr<TestDeviceContext> device_context =
core::RefCountPtr<TestDeviceContext>(new TestDeviceContext());
std::string key = CreateRendezvousKey(false);
Rendezvous::ParsedKey parsed_key;
TF_ASSERT_OK(Rendezvous::ParseKey(key, &parsed_key));
TF_RendezvousThunk* thunk = ToC(rendezvous.get());
std::unique_ptr<tensorflow::RendezvousInterface> thunk_rendezvous =
FromC(thunk);
Rendezvous::Args recv_args;
recv_args.device_context = device_context.get();
Tensor result;
absl::Notification callback_done;
thunk_rendezvous->RecvAsync(parsed_key, recv_args,
[&](const absl::Status& status,
const RefCountedIntraProcessRendezvous::Args&,
const RefCountedIntraProcessRendezvous::Args&,
const Tensor& tensor, const bool) {
TF_ASSERT_OK(status);
result = tensor;
callback_done.Notify();
});
Rendezvous::Args send_args;
send_args.device_context = device_context.get();
send_args.alloc_attrs.set_on_host(true);
Tensor test_tensor = CreateTestTensor();
TF_CHECK_OK(rendezvous->Send(parsed_key, send_args, test_tensor, false));
callback_done.WaitForNotification();
Destroy(thunk);
delete thunk;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/next_pluggable_device/c/tf_rendezvous_c_api.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/next_pluggable_device/c/tf_rendezvous_c_api_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f6d26ff3-c6e3-4c5f-9e01-90b52242a5cb | cpp | tensorflow/tensorflow | batch_norm_op | tensorflow/compiler/tf2xla/kernels/batch_norm_op.cc | tensorflow/core/kernels/batch_norm_op_test.cc | #include <algorithm>
#include <numeric>
#include <string>
#include <vector>
#include "tensorflow/compiler/tf2xla/kernels/relu_op.h"
#include "tensorflow/compiler/tf2xla/mlir_xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/math.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/util.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
namespace {
class FusedBatchNormOp : public XlaOpKernel {
public:
explicit FusedBatchNormOp(OpKernelConstruction* ctx)
: FusedBatchNormOp(ctx, false) {}
FusedBatchNormOp(OpKernelConstruction* ctx, bool is_batch_norm_ex)
: XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("epsilon", &epsilon_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("is_training", &is_training_));
OP_REQUIRES_OK(
ctx, ctx->GetAttr("exponential_avg_factor", &exponential_avg_factor_));
string data_format_str;
OP_REQUIRES_OK(ctx, ctx->GetAttr("data_format", &data_format_str));
OP_REQUIRES(
ctx, FormatFromString(data_format_str, &data_format_),
errors::InvalidArgument("Invalid data format: ", data_format_str));
if (is_batch_norm_ex) {
int num_side_inputs;
OP_REQUIRES_OK(ctx, ctx->GetAttr("num_side_inputs", &num_side_inputs));
OP_REQUIRES(ctx, num_side_inputs >= 0 && num_side_inputs <= 1,
errors::InvalidArgument(
"FusedBatchNormEx supports at most 1 side input."));
add_side_input_ = (num_side_inputs == 1);
string activation_mode;
OP_REQUIRES_OK(ctx, ctx->GetAttr("activation_mode", &activation_mode));
OP_REQUIRES(ctx,
activation_mode == "Identity" || activation_mode == "Relu",
errors::InvalidArgument(
"Unsupported FusedBatchNormEx activation mode: ",
activation_mode));
apply_relu_ = (activation_mode == "Relu");
} else {
add_side_input_ = false;
apply_relu_ = false;
}
is_on_gpu_ = ctx->device_type().type_string() == DEVICE_GPU_XLA_JIT;
}
void Compile(XlaOpKernelContext* ctx) override { CompileImpl(ctx); }
protected:
virtual void CompileImpl(XlaOpKernelContext* ctx) {
xla::XlaBuilder* const b = ctx->builder();
xla::PrimitiveType input_type;
OP_REQUIRES_OK(ctx,
DataTypeToPrimitiveType(ctx->input_type(0), &input_type));
xla::PrimitiveType scale_type;
OP_REQUIRES_OK(ctx,
DataTypeToPrimitiveType(ctx->input_type(1), &scale_type));
xla::XlaOp input = ctx->Input(0);
TensorShape input_shape = ctx->InputShape(0);
int feature_index =
GetTensorFeatureDimIndex(input_shape.dims(), data_format_);
input = xla::ConvertElementType(input, scale_type);
if (is_training_) {
xla::XlaOp output = xla::BatchNormTraining(
input, ctx->Input(1), ctx->Input(2), epsilon_, feature_index);
xla::XlaOp converted =
xla::ConvertElementType(xla::GetTupleElement(output, 0), input_type);
if (add_side_input_ && apply_relu_) {
ctx->SetOutput(0, xla::Relu(xla::Add(ctx->Input(5), converted)));
} else if (apply_relu_) {
ctx->SetOutput(0, xla::Relu(converted));
} else {
ctx->SetOutput(0, converted);
}
xla::XlaOp variance = xla::GetTupleElement(output, 2);
int total_input_size = ctx->InputShape(0).num_elements();
int total_scale_size = ctx->InputShape(1).num_elements();
int sample_size =
total_scale_size > 0 ? total_input_size / total_scale_size : 0;
int sample_size_minus_one = std::max(1, sample_size - 1);
double factor = static_cast<double>(sample_size) /
static_cast<double>(sample_size_minus_one);
constexpr int kVarianceOutputIndex = 2;
xla::XlaOp corrected =
xla::Mul(variance, xla::ScalarLike(variance, factor));
if (input_shape.num_elements() == 0) {
auto status_or_output_shape = b->GetShape(corrected);
OP_REQUIRES_OK(ctx, status_or_output_shape.status());
ctx->SetOutput(1, xla::GetTupleElement(output, 1));
ctx->SetOutput(
kVarianceOutputIndex,
xla::Broadcast(
xla::NanValue(b, ctx->output_xla_type(kVarianceOutputIndex)),
status_or_output_shape.value().dimensions()));
} else {
if (exponential_avg_factor_ == 1.0f) {
ctx->SetOutput(1, xla::GetTupleElement(output, 1));
ctx->SetOutput(2, corrected);
} else {
xla::XlaOp old_mean = ctx->Input(3);
xla::XlaOp alpha =
xla::ScalarLike(old_mean, 1.0f - exponential_avg_factor_);
xla::XlaOp beta = xla::ScalarLike(old_mean, exponential_avg_factor_);
xla::XlaOp new_running_mean =
xla::Add(xla::Mul(old_mean, alpha),
xla::Mul(xla::GetTupleElement(output, 1), beta));
ctx->SetOutput(1, new_running_mean);
xla::XlaOp old_variance = ctx->Input(4);
xla::XlaOp new_running_variance = xla::Add(
xla::Mul(old_variance, alpha), xla::Mul(corrected, beta));
ctx->SetOutput(2, new_running_variance);
}
}
ctx->SetOutput(3, xla::GetTupleElement(output, 1));
if (is_on_gpu_) {
ctx->SetOutput(4, xla::Rsqrt(xla::Add(
variance, xla::ScalarLike(variance, epsilon_))));
} else {
ctx->SetOutput(4, variance);
}
} else {
xla::XlaOp output = xla::BatchNormInference(
input, ctx->Input(1), ctx->Input(2), ctx->Input(3), ctx->Input(4),
epsilon_, feature_index);
xla::XlaOp converted = xla::ConvertElementType(output, input_type);
if (add_side_input_ && apply_relu_) {
ctx->SetOutput(0, xla::Relu(xla::Add(ctx->Input(5), converted)));
} else if (apply_relu_) {
ctx->SetOutput(0, xla::Relu(converted));
} else {
ctx->SetOutput(0, converted);
}
ctx->SetOutput(1, ctx->Input(3));
ctx->SetOutput(2, ctx->Input(4));
ctx->SetOutput(3, ctx->Input(3));
ctx->SetOutput(4, ctx->Input(4));
}
}
private:
float epsilon_;
TensorFormat data_format_;
bool is_training_;
float exponential_avg_factor_;
bool add_side_input_;
bool apply_relu_;
bool is_on_gpu_;
};
class FusedBatchNormOpV3 : public FusedBatchNormOp {
public:
explicit FusedBatchNormOpV3(OpKernelConstruction* ctx)
: FusedBatchNormOp(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
FusedBatchNormOp::CompileImpl(ctx);
if (!ctx->status().ok()) {
return;
}
ctx->SetConstantOutput(5, Tensor());
}
};
class FusedBatchNormOpEx : public FusedBatchNormOp {
public:
explicit FusedBatchNormOpEx(OpKernelConstruction* ctx)
: FusedBatchNormOp(ctx, true) {}
void Compile(XlaOpKernelContext* ctx) override {
FusedBatchNormOp::CompileImpl(ctx);
if (!ctx->status().ok()) {
return;
}
ctx->SetConstantOutput(5, Tensor());
}
};
REGISTER_XLA_OP(Name("FusedBatchNorm"), FusedBatchNormOp);
REGISTER_XLA_OP(Name("FusedBatchNormV2"), FusedBatchNormOp);
REGISTER_XLA_OP(Name("FusedBatchNormV3"), MlirXlaOpKernel);
REGISTER_XLA_OP(Name("_FusedBatchNormEx"), FusedBatchNormOpEx);
class FusedBatchNormGradOp : public XlaOpKernel {
public:
explicit FusedBatchNormGradOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("epsilon", &epsilon_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("is_training", &is_training_));
string data_format_str;
OP_REQUIRES_OK(ctx, ctx->GetAttr("data_format", &data_format_str));
OP_REQUIRES(
ctx, FormatFromString(data_format_str, &data_format_),
errors::InvalidArgument("Invalid data format: ", data_format_str));
is_on_gpu_ = ctx->device_type().type_string() == DEVICE_GPU_XLA_JIT;
}
void Compile(XlaOpKernelContext* ctx) override {
xla::XlaBuilder* const b = ctx->builder();
DataType input_dtype = ctx->input_type(0);
DataType scale_dtype = ctx->input_type(2);
auto grad_backprop =
XlaHelpers::ConvertElementType(ctx->Input(0), scale_dtype);
auto activations =
XlaHelpers::ConvertElementType(ctx->Input(1), scale_dtype);
auto scale = ctx->Input(2);
auto mean = ctx->Input(3);
auto var = ctx->Input(4);
const int input_dims = ctx->InputShape(0).dims();
const int feature_index =
GetTensorFeatureDimIndex(input_dims, data_format_);
xla::XlaOp x_backprop;
xla::XlaOp scale_backprop;
xla::XlaOp offset_backprop;
if (is_training_) {
if (is_on_gpu_) {
xla::XlaOp one = xla::ScalarLike(var, 1.0f);
xla::XlaOp epsilon = xla::ScalarLike(var, epsilon_);
var = xla::Sub(one / (var * var), epsilon);
}
xla::XlaOp output =
xla::BatchNormGrad(activations, scale, mean, var, grad_backprop,
epsilon_, feature_index);
x_backprop = xla::GetTupleElement(output, 0);
scale_backprop = xla::GetTupleElement(output, 1);
offset_backprop = xla::GetTupleElement(output, 2);
} else {
std::vector<int64_t> reduction_dims(input_dims - 1);
std::iota(reduction_dims.begin(), reduction_dims.begin() + feature_index,
0);
std::iota(reduction_dims.begin() + feature_index, reduction_dims.end(),
feature_index + 1);
const DataType accumulation_type =
XlaHelpers::SumAccumulationType(scale_dtype);
auto converted =
XlaHelpers::ConvertElementType(grad_backprop, accumulation_type);
auto reduce =
xla::Reduce(converted, XlaHelpers::Zero(b, accumulation_type),
*ctx->GetOrCreateAdd(accumulation_type), reduction_dims);
offset_backprop = XlaHelpers::ConvertElementType(reduce, scale_dtype);
auto epsilon = XlaHelpers::FloatLiteral(b, scale_dtype, epsilon_);
auto scratch1 = xla::Rsqrt(xla::Add(var, epsilon));
auto mul =
xla::Mul(grad_backprop, xla::Sub(activations, mean, {feature_index}));
converted = XlaHelpers::ConvertElementType(mul, accumulation_type);
reduce =
xla::Reduce(converted, XlaHelpers::Zero(b, accumulation_type),
*ctx->GetOrCreateAdd(accumulation_type), reduction_dims);
auto scratch2 = XlaHelpers::ConvertElementType(reduce, scale_dtype);
x_backprop =
xla::Mul(grad_backprop, xla::Mul(scratch1, scale), {feature_index});
scale_backprop = xla::Mul(scratch1, scratch2);
}
ctx->SetOutput(0, XlaHelpers::ConvertElementType(x_backprop, input_dtype));
ctx->SetOutput(1, scale_backprop);
ctx->SetOutput(2, offset_backprop);
ctx->SetConstantOutput(3, Tensor());
ctx->SetConstantOutput(4, Tensor());
}
private:
TensorFormat data_format_;
float epsilon_;
bool is_training_;
bool is_on_gpu_;
};
REGISTER_XLA_OP(Name("FusedBatchNormGrad"), FusedBatchNormGradOp);
REGISTER_XLA_OP(Name("FusedBatchNormGradV2"), FusedBatchNormGradOp);
REGISTER_XLA_OP(Name("FusedBatchNormGradV3"), MlirXlaOpKernel);
}
} | #include <vector>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
template <typename T>
struct BatchNormOpTest : public OpsTestBase {
static constexpr auto TValueType = DataTypeToEnum<T>::value;
void run_me() {
TF_EXPECT_OK(
NodeDefBuilder("batch_norm_op", "BatchNormWithGlobalNormalization")
.Input(FakeInput(TValueType))
.Input(FakeInput(TValueType))
.Input(FakeInput(TValueType))
.Input(FakeInput(TValueType))
.Input(FakeInput(TValueType))
.Attr("scale_after_normalization", false)
.Attr("variance_epsilon", 0.001)
.Finalize(node_def()));
TF_EXPECT_OK(InitOpWithGraphVersion(8));
AddInputFromList<T>(TensorShape({1, 1, 6, 2}),
{1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6});
AddInputFromList<T>(TensorShape({2}), {10, 20});
AddInputFromList<T>(TensorShape({2}), {0.25, 0.5});
AddInputFromList<T>(TensorShape({2}), {0.1, 0.6});
AddInputFromList<T>(TensorShape({2}), {0.0, 0.0});
TF_ASSERT_OK(RunOpKernel());
double atol = TValueType == DT_FLOAT ? 0.01 : 0.1;
Tensor expected(allocator(), TValueType, TensorShape({1, 1, 6, 2}));
test::FillValues<T>(&expected,
{-17.86f, -22.00f, -15.87f, -20.59f, -13.87f, -19.18f,
-21.86f, -33.31f, -23.85f, -34.72f, -25.85f, -36.13f});
test::ExpectTensorNear<T>(expected, *GetOutput(0), atol);
}
};
TYPED_TEST_SUITE_P(BatchNormOpTest);
TYPED_TEST_P(BatchNormOpTest, Simple) { this->run_me(); }
REGISTER_TYPED_TEST_SUITE_P(BatchNormOpTest, Simple);
using DataTypes = ::testing::Types<float, Eigen::half>;
INSTANTIATE_TYPED_TEST_SUITE_P(Test, BatchNormOpTest, DataTypes);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/batch_norm_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batch_norm_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a0b74122-fd6d-4716-ae45-35839fa3648e | cpp | tensorflow/tensorflow | leaky_relu | tensorflow/lite/kernels/internal/reference/leaky_relu.h | tensorflow/lite/delegates/xnnpack/leaky_relu_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LEAKY_RELU_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LEAKY_RELU_H_
#include <algorithm>
#include <limits>
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_ops {
inline void LeakyRelu(const tflite::LeakyReluParams& params,
const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
const float val = input_data[i];
output_data[i] = val > 0 ? val : val * params.alpha;
}
}
template <typename T>
inline void QuantizeLeakyRelu(const LeakyReluParams& params,
const RuntimeShape& input_shape,
const T* input_data,
const RuntimeShape& output_shape,
T* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
static const int32_t quantized_min = std::numeric_limits<T>::min();
static const int32_t quantized_max = std::numeric_limits<T>::max();
for (int i = 0; i < flat_size; ++i) {
const int32_t input_value = input_data[i] - params.input_offset;
int32_t unclamped_output;
if (input_value >= 0) {
unclamped_output = params.output_offset +
MultiplyByQuantizedMultiplier(
input_value, params.output_multiplier_identity,
params.output_shift_identity);
} else {
unclamped_output = params.output_offset +
MultiplyByQuantizedMultiplier(
input_value, params.output_multiplier_alpha,
params.output_shift_alpha);
}
const T clamped_output =
std::min(quantized_max, std::max(quantized_min, unclamped_output));
output_data[i] = static_cast<T>(clamped_output);
}
}
}
}
#endif | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/leaky_relu_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(LeakyRelu, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
LeakyReluTester()
.Shape({batch, height, width, channels})
.Test(xnnpack_delegate.get());
}
TEST(LeakyRelu, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
LeakyReluTester()
.Shape({batch, width, channels})
.Test(xnnpack_delegate.get());
}
TEST(LeakyRelu, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
LeakyReluTester().Shape({batch, channels}).Test(xnnpack_delegate.get());
}
TEST(LeakyRelu, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
LeakyReluTester().Shape({batch}).Test(xnnpack_delegate.get());
}
TEST(LeakyRelu, NegativeSlope) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
LeakyReluTester()
.Shape({batch, height, width, channels})
.NegativeSlope(-0.75f)
.Test(xnnpack_delegate.get());
}
TEST(LeakyRelu, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
LeakyReluTester()
.Shape({batch, height, width, channels})
.Test(xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/reference/leaky_relu.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/leaky_relu_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f3ed1296-aa66-4676-b427-1b02afef8845 | cpp | tensorflow/tensorflow | replicate_constants_pass | tensorflow/core/common_runtime/replicate_constants_pass.cc | tensorflow/core/common_runtime/replicate_constants_pass_test.cc | #include "tensorflow/core/common_runtime/replicate_constants_pass.h"
#include <algorithm>
#include <cstdint>
#include <limits>
#include <string>
#include <vector>
#include "absl/container/btree_map.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/config/flags.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
constexpr int64_t kMaxSize = 16;
void SetUniqueName(Graph* graph, Node* node) {
node->set_name(graph->NewName(absl::StrCat(node->name(), "/replicate")));
}
bool HasControlOut(Node* node) {
auto control_out_it =
std::find_if(node->out_edges().begin(), node->out_edges().end(),
[](const auto& e) { return e->IsControlEdge(); });
return control_out_it != node->out_edges().end();
}
bool HasCpuDevice(const Node* node) {
DeviceNameUtils::ParsedName device;
if (!DeviceNameUtils::ParseFullName(node->assigned_device_name(), &device))
return false;
return device.type == "CPU";
}
Status DeviceNameToCpuDeviceNameWithDeviceId(const string& device_name,
string* host_device_name) {
DeviceNameUtils::ParsedName device;
if (!DeviceNameUtils::ParseFullName(device_name, &device)) {
return absl::InternalError(
absl::StrCat("Could not parse device name ", device_name));
}
if (flags::Global().enable_aggressive_constant_replication.value() &&
device.type == "CPU") {
*host_device_name = device_name;
} else {
device.type = "CPU";
device.has_type = true;
device.id = 0;
device.has_id = true;
*host_device_name = DeviceNameUtils::ParsedNameToString(device);
}
return absl::OkStatus();
}
Status GetDestinationCpuDevice(const Node* dst, std::string* device) {
if (!dst->has_assigned_device_name())
return absl::AbortedError(
absl::StrCat("Node name: ", dst->name(), " has no assigned device."));
return DeviceNameToCpuDeviceNameWithDeviceId(dst->assigned_device_name(),
device);
}
Status GetSuccessorEdges(
Node* node,
absl::btree_map<std::string, std::vector<const Edge*>>& device_to_edges) {
for (const auto& edge : node->out_edges()) {
const Node* dst = edge->dst();
std::string device;
TF_RETURN_IF_ERROR(GetDestinationCpuDevice(dst, &device));
if (!device_to_edges.count(device)) device_to_edges.insert({device, {}});
device_to_edges[device].push_back(edge);
}
return absl::OkStatus();
}
void ReplicateToEachDevice(
Graph* graph, Node* node,
absl::btree_map<std::string, std::vector<const Edge*>>& device_to_edges) {
for (const auto& pair : device_to_edges) {
Node* copy = graph->CopyNode(node);
SetUniqueName(graph, copy);
const std::string device = pair.first;
copy->set_assigned_device_name(device);
for (const Edge* edge : pair.second) {
graph->AddEdge(copy, edge->src_output(), edge->dst(), edge->dst_input());
}
for (Node* src : node->in_nodes()) {
graph->AddControlEdge(src, copy, true);
}
}
graph->RemoveNode(node);
}
}
Status ReplicateConstantsPass::Run(
const GraphOptimizationPassOptions& options) {
VLOG(1) << "replicate_constants_pass will replicate constants with "
"number-of-elements <= "
<< kMaxSize;
if (options.graph == nullptr) {
VLOG(1) << "No graph in replicate_constants_pass.";
return absl::OkStatus();
}
Graph* graph = options.graph->get();
if (VLOG_IS_ON(1)) {
VLOG(1) << DumpGraphToFile("before_replicate_constants_pass", *graph,
options.flib_def);
}
int64_t min_skipped = std::numeric_limits<int64_t>::max();
int64_t max_skipped = std::numeric_limits<int64_t>::min();
for (Node* node : graph->nodes()) {
if (!node->IsConstant()) continue;
if (node->out_edges().size() <= 1) continue;
if (HasControlOut(node)) continue;
const TensorProto* value = nullptr;
TF_RETURN_IF_ERROR(GetNodeAttr(node->attrs(), "value", &value));
TF_ASSIGN_OR_RETURN(TensorShape shape,
TensorShape::BuildTensorShape(value->tensor_shape()));
if (shape.num_elements() > kMaxSize) {
min_skipped = std::min(min_skipped, shape.num_elements());
max_skipped = std::max(max_skipped, shape.num_elements());
continue;
}
if (!node->has_assigned_device_name()) continue;
if (!HasCpuDevice(node)) continue;
absl::btree_map<std::string, std::vector<const Edge*>> device_to_edges;
TF_RETURN_IF_ERROR(GetSuccessorEdges(node, device_to_edges));
if (device_to_edges.size() <= 1) continue;
ReplicateToEachDevice(graph, node, device_to_edges);
}
if (min_skipped != std::numeric_limits<int64_t>::max()) {
VLOG(1) << "replicate_constants_pass skipped replicating constants with "
"number of elements in the range "
<< min_skipped << " to " << max_skipped << ".";
}
if (VLOG_IS_ON(1)) {
VLOG(1) << DumpGraphToFile("after_replicate_constants_pass", *graph,
options.flib_def);
}
return absl::OkStatus();
}
REGISTER_OPTIMIZATION(OptimizationPassRegistry::POST_REWRITE_FOR_EXEC, 3,
ReplicateConstantsPass);
} | #include "tensorflow/core/common_runtime/replicate_constants_pass.h"
#include <memory>
#include <string>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/config/flags.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
namespace tensorflow {
const char kCpu0[] = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0";
const char kCpu1[] = "/job:tpu_host_worker/replica:0/task:1/device:CPU:0";
const char kTpu00[] = "/job:tpu_host_worker/replica:0/task:0/device:TPU:0";
const char kTpu01[] = "/job:tpu_host_worker/replica:0/task:0/device:TPU:1";
const char kTpu10[] = "/job:tpu_host_worker/replica:0/task:1/device:TPU:0";
const char kTpu11[] = "/job:tpu_host_worker/replica:0/task:1/device:TPU:1";
Node* GetNode(const Graph& graph, const std::string& name) {
for (Node* node : graph.nodes()) {
if (node->name() == name) return node;
}
CHECK(false) << "Unknown node name: " << name;
return nullptr;
}
Node* GetPredecessor(Node* node) {
auto it = node->in_nodes().begin();
CHECK(it != node->in_nodes().end())
<< "No predecessor for " << node->name() << "\n";
return *it;
}
bool IsEdge(Node* src, Node* dst) {
for (Node* node : src->out_nodes()) {
if (node == dst) return true;
}
return false;
}
TEST(ReplicateConstantsPassTest, TestSmallConstant) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const"), 1.0f, TensorShape({}));
ops::Negate dst0(scope.WithOpName("dst0"), const0);
ops::Negate dst1(scope.WithOpName("dst1"), const0);
ops::Negate dst2(scope.WithOpName("dst2"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst1")->set_assigned_device_name(kCpu1);
GetNode(*graph, "dst2")->set_assigned_device_name(kCpu1);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(dst0->assigned_device_name(),
GetPredecessor(dst0)->assigned_device_name());
EXPECT_EQ(dst1->assigned_device_name(),
GetPredecessor(dst1)->assigned_device_name());
EXPECT_EQ(dst2->assigned_device_name(),
GetPredecessor(dst2)->assigned_device_name());
}
TEST(ReplicateConstantsPassTest, TestLargeConstant) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const"),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ops::Negate dst0(scope.WithOpName("dst0"), const0);
ops::Negate dst1(scope.WithOpName("dst1"), const0);
ops::Negate dst2(scope.WithOpName("dst2"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst1")->set_assigned_device_name(kCpu1);
GetNode(*graph, "dst2")->set_assigned_device_name(kCpu1);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(dst0->assigned_device_name(),
GetPredecessor(dst0)->assigned_device_name());
EXPECT_NE(dst1->assigned_device_name(),
GetPredecessor(dst1)->assigned_device_name());
EXPECT_NE(dst2->assigned_device_name(),
GetPredecessor(dst2)->assigned_device_name());
}
TEST(ReplicateConstantsPassTest, TestControlOut) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const0"), 1.0f, TensorShape({}));
Output ctrl_succ =
ops::Const(scope.WithOpName("ctrl_succ"), 1.0f, TensorShape({}));
ops::Negate dst0(scope.WithOpName("dst0"), const0);
ops::Negate dst1(scope.WithOpName("dst1"), const0);
ops::Negate dst2(scope.WithOpName("dst2"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "ctrl_succ")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst1")->set_assigned_device_name(kCpu1);
GetNode(*graph, "dst2")->set_assigned_device_name(kCpu1);
graph->AddControlEdge(GetNode(*graph, "const0"),
GetNode(*graph, "ctrl_succ"));
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(dst0->assigned_device_name(),
GetPredecessor(dst0)->assigned_device_name());
EXPECT_NE(dst1->assigned_device_name(),
GetPredecessor(dst1)->assigned_device_name());
EXPECT_NE(dst2->assigned_device_name(),
GetPredecessor(dst2)->assigned_device_name());
}
TEST(ReplicateConstantsPassTest, TestTpuConst) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const0"), 1.0f, TensorShape({}));
ops::Negate dst0(scope.WithOpName("dst0"), const0);
ops::Negate dst1(scope.WithOpName("dst1"), const0);
ops::Negate dst2(scope.WithOpName("dst2"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const0")->set_assigned_device_name(kTpu00);
GetNode(*graph, "dst0")->set_assigned_device_name(kTpu00);
GetNode(*graph, "dst1")->set_assigned_device_name(kTpu10);
GetNode(*graph, "dst2")->set_assigned_device_name(kTpu10);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(dst0->assigned_device_name(),
GetPredecessor(dst0)->assigned_device_name());
EXPECT_NE(dst1->assigned_device_name(),
GetPredecessor(dst1)->assigned_device_name());
EXPECT_NE(dst2->assigned_device_name(),
GetPredecessor(dst2)->assigned_device_name());
}
TEST(ReplicateConstantsPassTest, TestSmallAndLargeConstants) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output small = ops::Const(scope.WithOpName("small"), 1.0f, TensorShape({}));
Output large =
ops::Const(scope.WithOpName("large"),
{0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f});
ops::Add dst0(scope.WithOpName("dst0"), small, large);
ops::Add dst1(scope.WithOpName("dst1"), small, large);
ops::Add dst2(scope.WithOpName("dst2"), small, large);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "small")->set_assigned_device_name(kCpu0);
GetNode(*graph, "large")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst1")->set_assigned_device_name(kCpu1);
GetNode(*graph, "dst2")->set_assigned_device_name(kCpu1);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* small0 = GetNode(*graph, "small/replicate/_0");
Node* small1 = GetNode(*graph, "small/replicate/_1");
Node* large = GetNode(*graph, "large");
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(small0->assigned_device_name(), kCpu0);
EXPECT_EQ(small1->assigned_device_name(), kCpu1);
EXPECT_EQ(large->assigned_device_name(), kCpu0);
EXPECT_EQ(dst0->assigned_device_name(), kCpu0);
EXPECT_EQ(dst1->assigned_device_name(), kCpu1);
EXPECT_EQ(dst1->assigned_device_name(), kCpu1);
EXPECT_TRUE(IsEdge(small0, dst0));
EXPECT_TRUE(IsEdge(large, dst0));
EXPECT_TRUE(IsEdge(small1, dst1));
EXPECT_TRUE(IsEdge(large, dst1));
EXPECT_TRUE(IsEdge(small1, dst2));
EXPECT_TRUE(IsEdge(large, dst2));
}
TEST(ReplicateConstantsPassTest, TestTpuDestinations) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const"), 1.0f, TensorShape({}));
ops::Negate dst00(scope.WithOpName("dst00"), const0);
ops::Negate dst01(scope.WithOpName("dst01"), const0);
ops::Negate dst10(scope.WithOpName("dst10"), const0);
ops::Negate dst11(scope.WithOpName("dst11"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst00")->set_assigned_device_name(kTpu00);
GetNode(*graph, "dst01")->set_assigned_device_name(kTpu01);
GetNode(*graph, "dst10")->set_assigned_device_name(kTpu10);
GetNode(*graph, "dst11")->set_assigned_device_name(kTpu11);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* const0 = GetNode(*graph, "const/replicate/_0");
Node* const1 = GetNode(*graph, "const/replicate/_1");
Node* dst00 = GetNode(*graph, "dst00");
Node* dst01 = GetNode(*graph, "dst01");
Node* dst10 = GetNode(*graph, "dst10");
Node* dst11 = GetNode(*graph, "dst11");
EXPECT_EQ(const0->assigned_device_name(), kCpu0);
EXPECT_EQ(const1->assigned_device_name(), kCpu1);
EXPECT_TRUE(IsEdge(const0, dst00));
EXPECT_TRUE(IsEdge(const0, dst01));
EXPECT_TRUE(IsEdge(const1, dst10));
EXPECT_TRUE(IsEdge(const1, dst11));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/replicate_constants_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/replicate_constants_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0038681c-5490-4984-af4b-34301d2f6652 | cpp | google/cel-cpp | arena_string | common/internal/arena_string.h | common/arena_string_test.cc | #ifndef THIRD_PARTY_CEL_CPP_COMMON_INTERNAL_ARENA_STRING_H_
#define THIRD_PARTY_CEL_CPP_COMMON_INTERNAL_ARENA_STRING_H_
#include "absl/strings/string_view.h"
namespace cel::common_internal {
class ArenaString final {
public:
ArenaString() = default;
ArenaString(const ArenaString&) = default;
ArenaString& operator=(const ArenaString&) = default;
explicit ArenaString(absl::string_view content) : content_(content) {}
typename absl::string_view::size_type size() const { return content_.size(); }
typename absl::string_view::const_pointer data() const {
return content_.data();
}
operator absl::string_view() const { return content_; }
private:
absl::string_view content_;
};
}
#endif | #include "common/arena_string.h"
#include "absl/hash/hash.h"
#include "absl/hash/hash_testing.h"
#include "absl/strings/string_view.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::testing::Eq;
using ::testing::Ge;
using ::testing::Gt;
using ::testing::IsEmpty;
using ::testing::Le;
using ::testing::Lt;
using ::testing::Ne;
using ::testing::SizeIs;
TEST(ArenaString, Default) {
ArenaString string;
EXPECT_THAT(string, IsEmpty());
EXPECT_THAT(string, SizeIs(0));
EXPECT_THAT(string, Eq(ArenaString()));
}
TEST(ArenaString, Iterator) {
ArenaString string = ArenaString::Static("Hello World!");
auto it = string.cbegin();
EXPECT_THAT(*it++, Eq('H'));
EXPECT_THAT(*it++, Eq('e'));
EXPECT_THAT(*it++, Eq('l'));
EXPECT_THAT(*it++, Eq('l'));
EXPECT_THAT(*it++, Eq('o'));
EXPECT_THAT(*it++, Eq(' '));
EXPECT_THAT(*it++, Eq('W'));
EXPECT_THAT(*it++, Eq('o'));
EXPECT_THAT(*it++, Eq('r'));
EXPECT_THAT(*it++, Eq('l'));
EXPECT_THAT(*it++, Eq('d'));
EXPECT_THAT(*it++, Eq('!'));
EXPECT_THAT(it, Eq(string.cend()));
}
TEST(ArenaString, ReverseIterator) {
ArenaString string = ArenaString::Static("Hello World!");
auto it = string.crbegin();
EXPECT_THAT(*it++, Eq('!'));
EXPECT_THAT(*it++, Eq('d'));
EXPECT_THAT(*it++, Eq('l'));
EXPECT_THAT(*it++, Eq('r'));
EXPECT_THAT(*it++, Eq('o'));
EXPECT_THAT(*it++, Eq('W'));
EXPECT_THAT(*it++, Eq(' '));
EXPECT_THAT(*it++, Eq('o'));
EXPECT_THAT(*it++, Eq('l'));
EXPECT_THAT(*it++, Eq('l'));
EXPECT_THAT(*it++, Eq('e'));
EXPECT_THAT(*it++, Eq('H'));
EXPECT_THAT(it, Eq(string.crend()));
}
TEST(ArenaString, RemovePrefix) {
ArenaString string = ArenaString::Static("Hello World!");
string.remove_prefix(6);
EXPECT_EQ(string, "World!");
}
TEST(ArenaString, RemoveSuffix) {
ArenaString string = ArenaString::Static("Hello World!");
string.remove_suffix(7);
EXPECT_EQ(string, "Hello");
}
TEST(ArenaString, Equal) {
EXPECT_THAT(ArenaString::Static("1"), Eq(ArenaString::Static("1")));
}
TEST(ArenaString, NotEqual) {
EXPECT_THAT(ArenaString::Static("1"), Ne(ArenaString::Static("2")));
}
TEST(ArenaString, Less) {
EXPECT_THAT(ArenaString::Static("1"), Lt(ArenaString::Static("2")));
}
TEST(ArenaString, LessEqual) {
EXPECT_THAT(ArenaString::Static("1"), Le(ArenaString::Static("1")));
}
TEST(ArenaString, Greater) {
EXPECT_THAT(ArenaString::Static("2"), Gt(ArenaString::Static("1")));
}
TEST(ArenaString, GreaterEqual) {
EXPECT_THAT(ArenaString::Static("1"), Ge(ArenaString::Static("1")));
}
TEST(ArenaString, ImplementsAbslHashCorrectly) {
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
{ArenaString::Static(""), ArenaString::Static("Hello World!"),
ArenaString::Static("How much wood could a woodchuck chuck if a "
"woodchuck could chuck wood?")}));
}
TEST(ArenaString, Hash) {
EXPECT_EQ(absl::HashOf(ArenaString::Static("Hello World!")),
absl::HashOf(absl::string_view("Hello World!")));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/internal/arena_string.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/arena_string_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
40856a9f-870b-42bd-a43a-53b03f6a6ef2 | cpp | google/cel-cpp | proto_wire | internal/proto_wire.cc | internal/proto_wire_test.cc | #include "internal/proto_wire.h"
#include <limits>
#include <string>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
namespace cel::internal {
bool SkipLengthValue(absl::Cord& data, ProtoWireType type) {
switch (type) {
case ProtoWireType::kVarint:
if (auto result = VarintDecode<uint64_t>(data);
ABSL_PREDICT_TRUE(result.has_value())) {
data.RemovePrefix(result->size_bytes);
return true;
}
return false;
case ProtoWireType::kFixed64:
if (ABSL_PREDICT_FALSE(data.size() < 8)) {
return false;
}
data.RemovePrefix(8);
return true;
case ProtoWireType::kLengthDelimited:
if (auto result = VarintDecode<uint32_t>(data);
ABSL_PREDICT_TRUE(result.has_value())) {
if (ABSL_PREDICT_TRUE(data.size() - result->size_bytes >=
result->value)) {
data.RemovePrefix(result->size_bytes + result->value);
return true;
}
}
return false;
case ProtoWireType::kFixed32:
if (ABSL_PREDICT_FALSE(data.size() < 4)) {
return false;
}
data.RemovePrefix(4);
return true;
case ProtoWireType::kStartGroup:
ABSL_FALLTHROUGH_INTENDED;
case ProtoWireType::kEndGroup:
ABSL_FALLTHROUGH_INTENDED;
default:
return false;
}
}
absl::StatusOr<ProtoWireTag> ProtoWireDecoder::ReadTag() {
ABSL_DCHECK(!tag_.has_value());
auto tag = internal::VarintDecode<uint32_t>(data_);
if (ABSL_PREDICT_FALSE(!tag.has_value())) {
return absl::DataLossError(
absl::StrCat("malformed tag encountered decoding ", message_));
}
auto field = internal::DecodeProtoWireTag(tag->value);
if (ABSL_PREDICT_FALSE(!field.has_value())) {
return absl::DataLossError(
absl::StrCat("invalid wire type or field number encountered decoding ",
message_, ": ", static_cast<std::string>(data_)));
}
data_.RemovePrefix(tag->size_bytes);
tag_.emplace(*field);
return *field;
}
absl::Status ProtoWireDecoder::SkipLengthValue() {
ABSL_DCHECK(tag_.has_value());
if (ABSL_PREDICT_FALSE(!internal::SkipLengthValue(data_, tag_->type()))) {
return absl::DataLossError(
absl::StrCat("malformed length or value encountered decoding field ",
tag_->field_number(), " of ", message_));
}
tag_.reset();
return absl::OkStatus();
}
absl::StatusOr<absl::Cord> ProtoWireDecoder::ReadLengthDelimited() {
ABSL_DCHECK(tag_.has_value() &&
tag_->type() == ProtoWireType::kLengthDelimited);
auto length = internal::VarintDecode<uint32_t>(data_);
if (ABSL_PREDICT_FALSE(!length.has_value())) {
return absl::DataLossError(
absl::StrCat("malformed length encountered decoding field ",
tag_->field_number(), " of ", message_));
}
data_.RemovePrefix(length->size_bytes);
if (ABSL_PREDICT_FALSE(data_.size() < length->value)) {
return absl::DataLossError(absl::StrCat(
"out of range length encountered decoding field ", tag_->field_number(),
" of ", message_, ": ", length->value));
}
auto result = data_.Subcord(0, length->value);
data_.RemovePrefix(length->value);
tag_.reset();
return result;
}
absl::Status ProtoWireEncoder::WriteTag(ProtoWireTag tag) {
ABSL_DCHECK(!tag_.has_value());
if (ABSL_PREDICT_FALSE(tag.field_number() == 0)) {
return absl::InvalidArgumentError(
absl::StrCat("invalid field number encountered encoding ", message_));
}
if (ABSL_PREDICT_FALSE(!ProtoWireTypeIsValid(tag.type()))) {
return absl::InvalidArgumentError(
absl::StrCat("invalid wire type encountered encoding field ",
tag.field_number(), " of ", message_));
}
VarintEncode(static_cast<uint32_t>(tag), data_);
tag_.emplace(tag);
return absl::OkStatus();
}
absl::Status ProtoWireEncoder::WriteLengthDelimited(absl::Cord data) {
ABSL_DCHECK(tag_.has_value() &&
tag_->type() == ProtoWireType::kLengthDelimited);
if (ABSL_PREDICT_FALSE(data.size() > std::numeric_limits<uint32_t>::max())) {
return absl::InvalidArgumentError(
absl::StrCat("out of range length encountered encoding field ",
tag_->field_number(), " of ", message_));
}
VarintEncode(static_cast<uint32_t>(data.size()), data_);
data_.Append(std::move(data));
tag_.reset();
return absl::OkStatus();
}
absl::Status ProtoWireEncoder::WriteLengthDelimited(absl::string_view data) {
ABSL_DCHECK(tag_.has_value() &&
tag_->type() == ProtoWireType::kLengthDelimited);
if (ABSL_PREDICT_FALSE(data.size() > std::numeric_limits<uint32_t>::max())) {
return absl::InvalidArgumentError(
absl::StrCat("out of range length encountered encoding field ",
tag_->field_number(), " of ", message_));
}
VarintEncode(static_cast<uint32_t>(data.size()), data_);
data_.Append(data);
tag_.reset();
return absl::OkStatus();
}
} | #include "internal/proto_wire.h"
#include <limits>
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "internal/testing.h"
namespace cel::internal {
template <typename T>
inline constexpr bool operator==(const VarintDecodeResult<T>& lhs,
const VarintDecodeResult<T>& rhs) {
return lhs.value == rhs.value && lhs.size_bytes == rhs.size_bytes;
}
inline constexpr bool operator==(const ProtoWireTag& lhs,
const ProtoWireTag& rhs) {
return lhs.field_number() == rhs.field_number() && lhs.type() == rhs.type();
}
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::Eq;
using ::testing::Optional;
TEST(Varint, Size) {
EXPECT_EQ(VarintSize(int32_t{-1}),
VarintSize(std::numeric_limits<uint64_t>::max()));
EXPECT_EQ(VarintSize(int64_t{-1}),
VarintSize(std::numeric_limits<uint64_t>::max()));
}
TEST(Varint, MaxSize) {
EXPECT_EQ(kMaxVarintSize<bool>, 1);
EXPECT_EQ(kMaxVarintSize<int32_t>, 10);
EXPECT_EQ(kMaxVarintSize<int64_t>, 10);
EXPECT_EQ(kMaxVarintSize<uint32_t>, 5);
EXPECT_EQ(kMaxVarintSize<uint64_t>, 10);
}
namespace {
template <typename T>
absl::Cord VarintEncode(T value) {
absl::Cord cord;
internal::VarintEncode(value, cord);
return cord;
}
}
TEST(Varint, Encode) {
EXPECT_EQ(VarintEncode(true), "\x01");
EXPECT_EQ(VarintEncode(int32_t{1}), "\x01");
EXPECT_EQ(VarintEncode(int64_t{1}), "\x01");
EXPECT_EQ(VarintEncode(uint32_t{1}), "\x01");
EXPECT_EQ(VarintEncode(uint64_t{1}), "\x01");
EXPECT_EQ(VarintEncode(int32_t{-1}),
VarintEncode(std::numeric_limits<uint64_t>::max()));
EXPECT_EQ(VarintEncode(int64_t{-1}),
VarintEncode(std::numeric_limits<uint64_t>::max()));
EXPECT_EQ(VarintEncode(std::numeric_limits<uint32_t>::max()),
"\xff\xff\xff\xff\x0f");
EXPECT_EQ(VarintEncode(std::numeric_limits<uint64_t>::max()),
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01");
}
TEST(Varint, Decode) {
EXPECT_THAT(VarintDecode<bool>(absl::Cord("\x01")),
Optional(Eq(VarintDecodeResult<bool>{true, 1})));
EXPECT_THAT(VarintDecode<int32_t>(absl::Cord("\x01")),
Optional(Eq(VarintDecodeResult<int32_t>{1, 1})));
EXPECT_THAT(VarintDecode<int64_t>(absl::Cord("\x01")),
Optional(Eq(VarintDecodeResult<int64_t>{1, 1})));
EXPECT_THAT(VarintDecode<uint32_t>(absl::Cord("\x01")),
Optional(Eq(VarintDecodeResult<uint32_t>{1, 1})));
EXPECT_THAT(VarintDecode<uint64_t>(absl::Cord("\x01")),
Optional(Eq(VarintDecodeResult<uint64_t>{1, 1})));
EXPECT_THAT(VarintDecode<uint32_t>(absl::Cord("\xff\xff\xff\xff\x0f")),
Optional(Eq(VarintDecodeResult<uint32_t>{
std::numeric_limits<uint32_t>::max(), 5})));
EXPECT_THAT(VarintDecode<int64_t>(
absl::Cord("\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01")),
Optional(Eq(VarintDecodeResult<int64_t>{int64_t{-1}, 10})));
EXPECT_THAT(VarintDecode<uint64_t>(
absl::Cord("\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01")),
Optional(Eq(VarintDecodeResult<uint64_t>{
std::numeric_limits<uint64_t>::max(), 10})));
}
namespace {
template <typename T>
absl::Cord Fixed64Encode(T value) {
absl::Cord cord;
internal::Fixed64Encode(value, cord);
return cord;
}
template <typename T>
absl::Cord Fixed32Encode(T value) {
absl::Cord cord;
internal::Fixed32Encode(value, cord);
return cord;
}
}
TEST(Fixed64, Encode) {
EXPECT_EQ(Fixed64Encode(0.0), Fixed64Encode(uint64_t{0}));
}
TEST(Fixed64, Decode) {
EXPECT_THAT(Fixed64Decode<double>(Fixed64Encode(0.0)), Optional(Eq(0.0)));
}
TEST(Fixed32, Encode) {
EXPECT_EQ(Fixed32Encode(0.0f), Fixed32Encode(uint32_t{0}));
}
TEST(Fixed32, Decode) {
EXPECT_THAT(Fixed32Decode<float>(
absl::Cord(absl::string_view("\x00\x00\x00\x00", 4))),
Optional(Eq(0.0)));
}
TEST(DecodeProtoWireTag, Uint64TooLarge) {
EXPECT_THAT(DecodeProtoWireTag(uint64_t{1} << 32), Eq(absl::nullopt));
}
TEST(DecodeProtoWireTag, Uint64ZeroFieldNumber) {
EXPECT_THAT(DecodeProtoWireTag(uint64_t{0}), Eq(absl::nullopt));
}
TEST(DecodeProtoWireTag, Uint32ZeroFieldNumber) {
EXPECT_THAT(DecodeProtoWireTag(uint32_t{0}), Eq(absl::nullopt));
}
TEST(DecodeProtoWireTag, Success) {
EXPECT_THAT(DecodeProtoWireTag(uint64_t{1} << 3),
Optional(Eq(ProtoWireTag(1, ProtoWireType::kVarint))));
EXPECT_THAT(DecodeProtoWireTag(uint32_t{1} << 3),
Optional(Eq(ProtoWireTag(1, ProtoWireType::kVarint))));
}
void TestSkipLengthValueSuccess(absl::Cord data, ProtoWireType type,
size_t skipped) {
size_t before = data.size();
EXPECT_TRUE(SkipLengthValue(data, type));
EXPECT_EQ(before - skipped, data.size());
}
void TestSkipLengthValueFailure(absl::Cord data, ProtoWireType type) {
EXPECT_FALSE(SkipLengthValue(data, type));
}
TEST(SkipLengthValue, Varint) {
TestSkipLengthValueSuccess(
absl::Cord("\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01"),
ProtoWireType::kVarint, 10);
TestSkipLengthValueSuccess(absl::Cord("\x01"), ProtoWireType::kVarint, 1);
TestSkipLengthValueFailure(
absl::Cord("\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01"),
ProtoWireType::kVarint);
}
TEST(SkipLengthValue, Fixed64) {
TestSkipLengthValueSuccess(
absl::Cord(
absl::string_view("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8)),
ProtoWireType::kFixed64, 8);
TestSkipLengthValueFailure(absl::Cord(absl::string_view("\x00", 1)),
ProtoWireType::kFixed64);
}
TEST(SkipLengthValue, LengthDelimited) {
TestSkipLengthValueSuccess(absl::Cord(absl::string_view("\x00", 1)),
ProtoWireType::kLengthDelimited, 1);
TestSkipLengthValueSuccess(absl::Cord(absl::string_view("\x01\x00", 2)),
ProtoWireType::kLengthDelimited, 2);
TestSkipLengthValueFailure(absl::Cord("\x01"),
ProtoWireType::kLengthDelimited);
}
TEST(SkipLengthValue, Fixed32) {
TestSkipLengthValueSuccess(
absl::Cord(absl::string_view("\x00\x00\x00\x00", 4)),
ProtoWireType::kFixed32, 4);
TestSkipLengthValueFailure(absl::Cord(absl::string_view("\x00", 1)),
ProtoWireType::kFixed32);
}
TEST(SkipLengthValue, Decoder) {
{
ProtoWireDecoder decoder("", absl::Cord(absl::string_view("\x0a\x00", 2)));
ASSERT_TRUE(decoder.HasNext());
EXPECT_THAT(
decoder.ReadTag(),
IsOkAndHolds(Eq(ProtoWireTag(1, ProtoWireType::kLengthDelimited))));
EXPECT_OK(decoder.SkipLengthValue());
ASSERT_FALSE(decoder.HasNext());
}
}
TEST(ProtoWireEncoder, BadTag) {
absl::Cord data;
ProtoWireEncoder encoder("foo.Bar", data);
EXPECT_TRUE(encoder.empty());
EXPECT_EQ(encoder.size(), 0);
EXPECT_OK(encoder.WriteTag(ProtoWireTag(1, ProtoWireType::kVarint)));
EXPECT_OK(encoder.WriteVarint(1));
encoder.EnsureFullyEncoded();
EXPECT_FALSE(encoder.empty());
EXPECT_EQ(encoder.size(), 2);
EXPECT_EQ(data, "\x08\x01");
}
TEST(ProtoWireEncoder, Varint) {
absl::Cord data;
ProtoWireEncoder encoder("foo.Bar", data);
EXPECT_TRUE(encoder.empty());
EXPECT_EQ(encoder.size(), 0);
EXPECT_OK(encoder.WriteTag(ProtoWireTag(1, ProtoWireType::kVarint)));
EXPECT_OK(encoder.WriteVarint(1));
encoder.EnsureFullyEncoded();
EXPECT_FALSE(encoder.empty());
EXPECT_EQ(encoder.size(), 2);
EXPECT_EQ(data, "\x08\x01");
}
TEST(ProtoWireEncoder, Fixed32) {
absl::Cord data;
ProtoWireEncoder encoder("foo.Bar", data);
EXPECT_TRUE(encoder.empty());
EXPECT_EQ(encoder.size(), 0);
EXPECT_OK(encoder.WriteTag(ProtoWireTag(1, ProtoWireType::kFixed32)));
EXPECT_OK(encoder.WriteFixed32(0.0f));
encoder.EnsureFullyEncoded();
EXPECT_FALSE(encoder.empty());
EXPECT_EQ(encoder.size(), 5);
EXPECT_EQ(data, absl::string_view("\x0d\x00\x00\x00\x00", 5));
}
TEST(ProtoWireEncoder, Fixed64) {
absl::Cord data;
ProtoWireEncoder encoder("foo.Bar", data);
EXPECT_TRUE(encoder.empty());
EXPECT_EQ(encoder.size(), 0);
EXPECT_OK(encoder.WriteTag(ProtoWireTag(1, ProtoWireType::kFixed64)));
EXPECT_OK(encoder.WriteFixed64(0.0));
encoder.EnsureFullyEncoded();
EXPECT_FALSE(encoder.empty());
EXPECT_EQ(encoder.size(), 9);
EXPECT_EQ(data, absl::string_view("\x09\x00\x00\x00\x00\x00\x00\x00\x00", 9));
}
TEST(ProtoWireEncoder, LengthDelimited) {
absl::Cord data;
ProtoWireEncoder encoder("foo.Bar", data);
EXPECT_TRUE(encoder.empty());
EXPECT_EQ(encoder.size(), 0);
EXPECT_OK(encoder.WriteTag(ProtoWireTag(1, ProtoWireType::kLengthDelimited)));
EXPECT_OK(encoder.WriteLengthDelimited(absl::Cord("foo")));
encoder.EnsureFullyEncoded();
EXPECT_FALSE(encoder.empty());
EXPECT_EQ(encoder.size(), 5);
EXPECT_EQ(data,
"\x0a\x03"
"foo");
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/proto_wire.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/proto_wire_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
0d5029fc-e5a7-4df6-8d8f-b8c571a6e12a | cpp | tensorflow/tensorflow | writer | tensorflow/lite/tools/serialization/writer.cc | tensorflow/lite/tools/serialization/writer_test.cc | #include <iostream>
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/tools/serialization/writer_lib.h"
int main(int argc, char* argv[]) {
if (argc != 3) {
fprintf(stderr, "Usage: %s input_file output_file\n", argv[0]);
return 1;
}
std::unique_ptr<tflite::FlatBufferModel> model =
tflite::FlatBufferModel::BuildFromFile(argv[1]);
std::unique_ptr<tflite::Interpreter> interpreter;
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates
builtin_op_resolver;
tflite::InterpreterBuilder(*model, builtin_op_resolver)(&interpreter);
tflite::ModelWriter writer(interpreter.get());
writer.Write(argv[2]);
return 0;
} | #include <iostream>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/tools/serialization/writer_lib.h"
int main(int argc, char* argv[]) {
if (argc != 2) {
fprintf(stderr, "Usage: %s input_file\n", argv[0]);
return 1;
}
std::unique_ptr<tflite::FlatBufferModel> model =
tflite::FlatBufferModel::BuildFromFile(argv[1]);
std::unique_ptr<tflite::Interpreter> interpreter;
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates
builtin_op_resolver;
tflite::InterpreterBuilder(*model, builtin_op_resolver)(&interpreter);
tflite::ModelWriter writer(interpreter.get());
std::unique_ptr<uint8_t[]> output_buffer;
size_t output_buffer_size;
writer.GetBuffer(&output_buffer, &output_buffer_size);
std::unique_ptr<tflite::Interpreter> new_interpreter;
model = tflite::FlatBufferModel::BuildFromBuffer(
reinterpret_cast<char*>(output_buffer.get()), output_buffer_size);
tflite::InterpreterBuilder(*model, builtin_op_resolver)(&new_interpreter);
if (new_interpreter->AllocateTensors() != kTfLiteOk) {
fprintf(stderr, "AllocateTensors failed on the round-tripped model.\n");
return 1;
}
return 0;
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/serialization/writer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/serialization/writer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c7794f54-4a87-4d2c-aa6d-221068b35fe2 | cpp | tensorflow/tensorflow | reduce_dataset_op | tensorflow/core/kernels/data/reduce_dataset_op.cc | tensorflow/core/kernels/data/reduce_dataset_op_test.cc | #include "tensorflow/core/kernels/data/reduce_dataset_op.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/root_dataset.h"
#include "tensorflow/core/platform/resource.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
namespace data {
namespace {
const char kOutputShapes[] = "output_shapes";
const char kOutputTypes[] = "output_types";
}
ReduceDatasetOp::ReduceDatasetOp(OpKernelConstruction* ctx)
: HybridAsyncOpKernel(ctx, "tf_data_reduce_dataset") {
FunctionMetadata::Params params;
OP_REQUIRES_OK(ctx, ctx->GetAttr("use_inter_op_parallelism",
¶ms.use_inter_op_parallelism));
params.use_default_device = false;
OP_REQUIRES_OK(ctx,
FunctionMetadata::Create(ctx, "f", params, &func_metadata_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
Status ReduceDatasetOp::DoCompute(OpKernelContext* ctx) {
tsl::profiler::TraceMe traceme(
[&] {
return tsl::profiler::TraceMeEncode("ReduceDatasetOp::DoCompute",
{{"id", ctx->step_id()}});
},
profiler::kInfo);
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
metrics::RecordTFDataFetchOp("ReduceDatasetOp");
DatasetBase* dataset;
TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(ctx->input(0), &dataset));
OpInputList inputs;
TF_RETURN_IF_ERROR(ctx->input_list("initial_state", &inputs));
std::vector<Tensor> state(inputs.begin(), inputs.end());
std::unique_ptr<CapturedFunction> captured_func;
TF_RETURN_IF_ERROR(CapturedFunction::Create(
ctx, func_metadata_, "other_arguments", &captured_func));
IteratorContext::Params params(ctx);
auto function_handle_cache =
std::make_unique<FunctionHandleCache>(params.flr);
params.function_handle_cache = function_handle_cache.get();
ResourceMgr resource_mgr;
params.resource_mgr = &resource_mgr;
CancellationManager cancellation_manager(ctx->cancellation_manager());
params.cancellation_manager = &cancellation_manager;
IteratorContext iter_ctx(std::move(params));
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func;
TF_RETURN_IF_ERROR(
captured_func->Instantiate(&iter_ctx, &instantiated_captured_func));
std::unique_ptr<IteratorBase> iterator;
if (ctx->function_library()->device()->device_type() == DEVICE_CPU) {
DatasetBase* finalized_dataset = nullptr;
TF_RETURN_IF_ERROR(FinalizeDataset(ctx, dataset, &finalized_dataset));
core::ScopedUnref unref(finalized_dataset);
TF_RETURN_IF_ERROR(finalized_dataset->MakeIterator(
&iter_ctx, nullptr, "ReduceIterator", &iterator));
} else {
TF_RETURN_IF_ERROR(dataset->MakeIterator(&iter_ctx, nullptr,
"ReduceIterator", &iterator));
}
while (true) {
if (ctx->cancellation_manager()->IsCancelled()) {
return errors::Cancelled("Operation was cancelled");
}
std::vector<Tensor> next_input_element;
bool end_of_input;
TF_RETURN_IF_ERROR(
iterator->GetNext(&iter_ctx, &next_input_element, &end_of_input));
if (end_of_input) {
break;
}
std::vector<Tensor> args;
args.reserve(state.size() + next_input_element.size());
std::copy(state.begin(), state.end(), std::back_inserter(args));
std::copy(next_input_element.begin(), next_input_element.end(),
std::back_inserter(args));
std::vector<Tensor> reduce_func_output;
TF_RETURN_IF_ERROR(instantiated_captured_func->Run(
&iter_ctx, std::move(args), &reduce_func_output, nullptr));
if (reduce_func_output.size() != state.size()) {
return errors::InvalidArgument(
"The number of components of the initial state and the "
"reduce "
"function output does not match. (initial_state=",
state.size(), ", output=", reduce_func_output.size(), ").");
}
std::swap(reduce_func_output, state);
}
TF_RETURN_IF_ERROR(VerifyTypesMatch(output_types_, state));
TF_RETURN_IF_ERROR(VerifyShapesCompatible(output_shapes_, state));
for (size_t i = 0; i < state.size(); ++i) {
ctx->set_output(i, state[i]);
}
return absl::OkStatus();
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ReduceDataset").Device(DEVICE_CPU),
ReduceDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("ReduceDataset");
}
}
} | #include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "reduce_dataset";
class ReduceDatasetParams : public DatasetParams {
public:
template <typename T>
ReduceDatasetParams(T input_dataset_params, std::vector<Tensor> initial_state,
std::vector<Tensor> other_arguments,
FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib,
DataTypeVector type_state, DataTypeVector type_arguments,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
bool use_inter_op_parallelism, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
initial_state_(std::move(initial_state)),
other_arguments_(std::move(other_arguments)),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
type_state_(std::move(type_state)),
type_arguments_(std::move(type_arguments)),
use_inter_op_parallelism_(use_inter_op_parallelism) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> input_tensors = initial_state_;
input_tensors.insert(input_tensors.end(), other_arguments_.begin(),
other_arguments_.end());
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back("input_dataset");
for (int i = 0; i < initial_state_.size(); ++i) {
input_names->emplace_back(strings::StrCat("initial_state_", i));
}
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(strings::StrCat("other_arguments_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
*attr_vector = {{"f", func_},
{"Tstate", type_state_},
{"Targuments", type_arguments_},
{"output_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"use_inter_op_parallelism", use_inter_op_parallelism_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override { return "Reduce"; }
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::vector<Tensor> initial_state_;
std::vector<Tensor> other_arguments_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_state_;
DataTypeVector type_arguments_;
bool use_inter_op_parallelism_;
};
class ReduceDatasetOpTest : public DatasetOpsTestBase {};
ReduceDatasetParams ReduceDatasetParams1() {
return ReduceDatasetParams(
RangeDatasetParams(0, 10, 1),
CreateTensors<int64_t>(TensorShape({}), {{1}}),
{},
FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}}),
{test::function::XAddY()},
{DT_INT64},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
kNodeName);
}
ReduceDatasetParams ReduceDatasetParams2() {
return ReduceDatasetParams(
RangeDatasetParams(1, 10, 1),
CreateTensors<int64_t>(TensorShape({}), {{1}, {1}}),
{},
FunctionDefHelper::FunctionRef("XPlusOneXTimesY", {{"T", DT_INT64}}),
{test::function::XPlusOneXTimesY()},
{DT_INT64, DT_INT64},
{},
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
true,
kNodeName);
}
ReduceDatasetParams ReduceDatasetParams3() {
return ReduceDatasetParams(
RangeDatasetParams(0, 0, 1),
CreateTensors<int64_t>(TensorShape({}), {{1}, {3}}),
{},
FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}}),
{test::function::XAddY()},
{DT_INT64, DT_INT64},
{},
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
true,
kNodeName);
}
std::vector<GetNextTestCase<ReduceDatasetParams>> GetNextTestCases() {
return {{
ReduceDatasetParams1(),
CreateTensors<int64_t>(TensorShape({}), {{46}})},
{ReduceDatasetParams2(),
CreateTensors<int64_t>(TensorShape({}),
{{10}, {1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9}})},
{
ReduceDatasetParams3(),
CreateTensors<int64_t>(TensorShape{}, {{1}, {3}})}};
}
class ParameterizedReduceDatasetOpTest
: public ReduceDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<ReduceDatasetParams>> {};
TEST_P(ParameterizedReduceDatasetOpTest, Compute) {
auto test_case = GetParam();
TF_ASSERT_OK(InitializeRuntime(test_case.dataset_params));
std::vector<Tensor> output;
TF_ASSERT_OK(RunDatasetOp(test_case.dataset_params, &output));
TF_EXPECT_OK(
ExpectEqual(test_case.expected_outputs, output, true));
}
INSTANTIATE_TEST_SUITE_P(ReduceDatasetOpTest, ParameterizedReduceDatasetOpTest,
::testing::ValuesIn(GetNextTestCases()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/reduce_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/reduce_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fe2113da-8f9e-41d6-a72e-7ca1a449fdba | cpp | tensorflow/tensorflow | debug_ops | tensorflow/core/ops/debug_ops.cc | tensorflow/core/kernels/debug_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
REGISTER_OP("Copy")
.Input("input: T")
.Output("output: T")
.Attr("T: type")
.Attr("tensor_name: string = ''")
.Attr("debug_ops_spec: list(string) = []")
.SetAllowsUninitializedInput()
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("CopyHost")
.Input("input: T")
.Output("output: T")
.Attr("T: type")
.Attr("tensor_name: string = ''")
.Attr("debug_ops_spec: list(string) = []")
.SetAllowsUninitializedInput()
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("DebugIdentity")
.Input("input: T")
.Output("output: T")
.Attr("T: type")
.Attr("device_name: string = ''")
.Attr("tensor_name: string = ''")
.Attr("debug_urls: list(string) = []")
.Attr("gated_grpc: bool = false")
.SetAllowsUninitializedInput()
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("DebugIdentityV3")
.Input("input: T")
.Output("output: T")
.Attr("T: type")
.Attr("device_name: string = ''")
.Attr("tensor_name: string = ''")
.Attr("io_of_node: string = ''")
.Attr("is_input: bool = false")
.Attr("io_index: int = -1")
.SetIsStateful()
.Attr("debug_urls: list(string) = []")
.Attr("gated_grpc: bool = false")
.SetAllowsUninitializedInput()
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("DebugNanCount")
.Input("input: T")
.Output("output: int64")
.Attr("T: type")
.Attr("device_name: string = ''")
.Attr("tensor_name: string = ''")
.Attr("debug_urls: list(string) = []")
.Attr("gated_grpc: bool = false")
.SetAllowsUninitializedInput()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("DebugNumericSummary")
.Input("input: T")
.Output("output: double")
.Attr("T: type")
.Attr("device_name: string = ''")
.Attr("tensor_name: string = ''")
.Attr("debug_urls: list(string) = []")
.Attr("lower_bound: float = -inf")
.Attr("upper_bound: float = inf")
.Attr("mute_if_healthy: bool = false")
.Attr("gated_grpc: bool = false")
.SetAllowsUninitializedInput()
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("DebugIdentityV2")
.Input("input: T")
.Output("output: T")
.Attr("T: type")
.Attr("tfdbg_context_id: string = ''")
.Attr("op_name: string = ''")
.Attr("output_slot: int = -1")
.Attr("tensor_debug_mode: int = -1")
.Attr("debug_urls: list(string) = []")
.Attr("circular_buffer_size: int = 1000")
.Attr("tfdbg_run_id: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("DebugNumericSummaryV2")
.Input("input: T")
.Output("output: output_dtype")
.Attr("output_dtype: {float32, float64} = DT_FLOAT")
.Attr("T: type")
.Attr("tensor_debug_mode: int = -1")
.Attr("tensor_id: int = -1")
.SetShapeFn(shape_inference::UnknownShape);
} | #include <string.h>
#include <fstream>
#include <vector>
#include "tensorflow/core/debug/debug_io_utils.h"
#include "tensorflow/core/debug/debug_node_key.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
class DebugIdentityOpTest : public OpsTestBase {
protected:
Status Init(DataType input_type, const std::vector<string>& debug_urls) {
env_ = Env::Default();
TF_CHECK_OK(NodeDefBuilder("op", "DebugIdentity")
.Input(FakeInput(input_type))
.Attr("tensor_name", "FakeTensor:0")
.Attr("debug_urls", debug_urls)
.Finalize(node_def()));
return InitOp();
}
Status Init(DataType input_type) {
std::vector<string> empty_debug_urls;
return Init(input_type, empty_debug_urls);
}
Env* env_;
};
TEST_F(DebugIdentityOpTest, Int32Success_6) {
TF_ASSERT_OK(Init(DT_INT32));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(DebugIdentityOpTest, Int32Success_6_FileURLs) {
const int kNumDumpDirs = 3;
const string tmp_dir = testing::TmpDir();
std::vector<string> dump_roots;
std::vector<string> debug_urls;
for (int i = 0; i < kNumDumpDirs; ++i) {
const string dump_root = strings::StrCat(tmp_dir, "_", i);
dump_roots.push_back(dump_root);
debug_urls.push_back(strings::StrCat("file:
}
uint64 wall_time = Env::Default()->NowMicros();
TF_ASSERT_OK(Init(DT_INT32, debug_urls));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
for (int i = 0; i < kNumDumpDirs; ++i) {
ASSERT_TRUE(env_->FileExists(dump_roots[i]).ok());
ASSERT_TRUE(env_->IsDirectory(dump_roots[i]).ok());
std::vector<string> device_roots;
FileSystem* fs = nullptr;
TF_ASSERT_OK(Env::Default()->GetFileSystemForFile(dump_roots[i], &fs));
std::vector<string> children;
TF_ASSERT_OK(fs->GetChildren(dump_roots[i], &children));
const string kDeviceDirPrefix = strings::StrCat(
DebugNodeKey::kMetadataFilePrefix, DebugNodeKey::kDeviceTag);
for (const string child : children) {
if (!strncmp(child.c_str(), kDeviceDirPrefix.c_str(),
kDeviceDirPrefix.size())) {
device_roots.push_back(io::JoinPath(dump_roots[i], child));
}
}
ASSERT_EQ(1, device_roots.size());
const string& device_root = device_roots[0];
TF_ASSERT_OK(Env::Default()->GetFileSystemForFile(device_root, &fs));
TF_ASSERT_OK(fs->GetChildren(device_root, &children));
int dump_files_found = 0;
for (const string child : children) {
dump_files_found++;
const string dump_file_path = io::JoinPath(device_root, child);
std::fstream ifs(dump_file_path, std::ios::in | std::ios::binary);
Event event;
event.ParseFromIstream(&ifs);
ifs.close();
ASSERT_GE(event.wall_time(), wall_time);
ASSERT_EQ(1, event.summary().value().size());
ASSERT_EQ(strings::StrCat("FakeTensor", ":", 0, ":", "DebugIdentity"),
event.summary().value(0).node_name());
Tensor tensor_prime(DT_INT32);
ASSERT_TRUE(tensor_prime.FromProto(event.summary().value(0).tensor()));
ASSERT_EQ(TensorShape({6}), tensor_prime.shape());
for (int j = 0; j < 6; ++j) {
ASSERT_EQ(j + 1, tensor_prime.flat<int32>()(j));
}
}
ASSERT_EQ(1, dump_files_found);
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
ASSERT_TRUE(env_->DeleteRecursively(dump_roots[i], &undeleted_files,
&undeleted_dirs)
.ok());
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
}
TEST_F(DebugIdentityOpTest, Int32Success_2_3) {
TF_ASSERT_OK(Init(DT_INT32));
AddInputFromArray<int32>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2, 3}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(DebugIdentityOpTest, StringSuccess) {
TF_ASSERT_OK(Init(DT_STRING));
AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({6}));
test::FillValues<tstring>(&expected, {"A", "b", "C", "d", "E", "f"});
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
}
class DebugNanCountOpTest : public OpsTestBase {
protected:
Status Init(DataType input_type) {
TF_CHECK_OK(NodeDefBuilder("op", "DebugNanCount")
.Input(FakeInput(input_type))
.Attr("tensor_name", "FakeTensor:0")
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(DebugNanCountOpTest, Float_has_NaNs) {
TF_ASSERT_OK(Init(DT_FLOAT));
AddInputFromArray<float>(TensorShape({6}),
{1.1, std::numeric_limits<float>::quiet_NaN(), 3.3,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN(), 6.6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_nan_count(allocator(), DT_INT64, TensorShape({1}));
test::FillValues<int64_t>(&expected_nan_count, {3});
test::ExpectTensorEqual<int64_t>(expected_nan_count, *GetOutput(0));
}
TEST_F(DebugNanCountOpTest, Float_no_NaNs) {
TF_ASSERT_OK(Init(DT_FLOAT));
AddInputFromArray<float>(
TensorShape({6}),
{1.1, 2.2, 3.3, std::numeric_limits<float>::infinity(), 5.5, 6.6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_nan_count(allocator(), DT_INT64, TensorShape({1}));
test::FillValues<int64_t>(&expected_nan_count, {0});
test::ExpectTensorEqual<int64_t>(expected_nan_count, *GetOutput(0));
}
TEST_F(DebugNanCountOpTest, Double_has_NaNs) {
TF_ASSERT_OK(Init(DT_DOUBLE));
AddInputFromArray<double>(TensorShape({6}),
{1.1, std::numeric_limits<double>::quiet_NaN(), 3.3,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::quiet_NaN(), 6.6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_nan_count(allocator(), DT_INT64, TensorShape({1}));
test::FillValues<int64_t>(&expected_nan_count, {3});
test::ExpectTensorEqual<int64_t>(expected_nan_count, *GetOutput(0));
}
TEST_F(DebugNanCountOpTest, Double_no_NaNs) {
TF_ASSERT_OK(Init(DT_DOUBLE));
AddInputFromArray<double>(
TensorShape({6}),
{1.1, 2.2, 3.3, std::numeric_limits<double>::infinity(), 5.5, 6.6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_nan_count(allocator(), DT_INT64, TensorShape({1}));
test::FillValues<int64_t>(&expected_nan_count, {0});
test::ExpectTensorEqual<int64_t>(expected_nan_count, *GetOutput(0));
}
class DebugNumericSummaryOpTest : public OpsTestBase {
protected:
Status Init(DataType input_type) {
TF_CHECK_OK(NodeDefBuilder("op", "DebugNumericSummary")
.Input(FakeInput(input_type))
.Attr("tensor_name", "FakeTensor:0")
.Finalize(node_def()));
return InitOp();
}
Status InitGated(DataType input_type, const std::vector<string>& debug_urls) {
TF_CHECK_OK(NodeDefBuilder("op", "DebugNumericSummary")
.Input(FakeInput(input_type))
.Attr("tensor_name", "FakeTensor:0")
.Attr("gated_grpc", true)
.Attr("debug_urls", debug_urls)
.Finalize(node_def()));
return InitOp();
}
#if defined(PLATFORM_GOOGLE)
void ClearEnabledWatchKeys() { DebugGrpcIO::ClearEnabledWatchKeys(); }
#endif
};
TEST_F(DebugNumericSummaryOpTest, Float_full_house) {
TF_ASSERT_OK(Init(DT_FLOAT));
AddInputFromArray<float>(
TensorShape({18}),
{std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN(), 0.0f, 0.0f, 0.0f, -1.0f, -3.0f,
3.0f, 7.0f, -std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN()});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({15}));
test::FillValues<double>(
&expected,
{1.0,
18.0,
4.0,
2.0,
2.0,
3.0,
2.0,
5.0,
-3.0,
7.0,
0.85714285714,
8.97959183673,
static_cast<double>(DT_FLOAT),
1.0,
18.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, Double_full_house) {
TF_ASSERT_OK(Init(DT_DOUBLE));
AddInputFromArray<double>(
TensorShape({18}),
{std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::quiet_NaN(), 0.0, 0.0, 0.0, -1.0, -3.0, 3.0,
7.0, -std::numeric_limits<double>::infinity(),
-std::numeric_limits<double>::infinity(),
std::numeric_limits<double>::infinity(),
std::numeric_limits<double>::infinity(),
std::numeric_limits<double>::infinity(),
std::numeric_limits<double>::infinity(),
std::numeric_limits<double>::infinity(),
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::quiet_NaN()});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({15}));
test::FillValues<double>(
&expected,
{1.0,
18.0,
4.0,
2.0,
2.0,
3.0,
2.0,
5.0,
-3.0,
7.0,
0.85714285714,
8.97959183673,
static_cast<double>(DT_DOUBLE),
1.0,
18.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, Float_only_valid_values) {
TF_ASSERT_OK(Init(DT_FLOAT));
AddInputFromArray<float>(TensorShape({2, 3}),
{0.0f, 0.0f, -1.0f, 3.0f, 3.0f, 7.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({16}));
test::FillValues<double>(
&expected,
{1.0,
6.0,
0.0,
0.0,
1.0,
2.0,
3.0,
0.0,
-1.0,
7.0,
2.0,
7.33333333333,
static_cast<double>(DT_FLOAT),
2.0,
2.0, 3.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, Float_all_Inf_or_NaN) {
TF_ASSERT_OK(Init(DT_FLOAT));
AddInputFromArray<float>(TensorShape({3, 3}),
{std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN(),
-std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN()});
TF_ASSERT_OK(RunOpKernel());
Tensor output_tensor = *GetOutput(0);
const double* output = output_tensor.template flat<double>().data();
ASSERT_NEAR(1.0, output[0], 1e-8);
ASSERT_NEAR(9.0, output[1], 1e-8);
ASSERT_NEAR(4.0, output[2], 1e-8);
ASSERT_NEAR(2.0, output[3], 1e-8);
ASSERT_NEAR(0.0, output[4], 1e-8);
ASSERT_NEAR(0.0, output[5], 1e-8);
ASSERT_NEAR(0.0, output[6], 1e-8);
ASSERT_NEAR(3.0, output[7], 1e-8);
ASSERT_EQ(std::numeric_limits<float>::infinity(), output[8]);
ASSERT_EQ(-std::numeric_limits<float>::infinity(), output[9]);
ASSERT_TRUE(Eigen::numext::isnan(output[10]));
ASSERT_TRUE(Eigen::numext::isnan(output[11]));
ASSERT_EQ(static_cast<double>(DT_FLOAT), output[12]);
ASSERT_EQ(2.0, output[13]);
ASSERT_EQ(3.0, output[14]);
ASSERT_EQ(3.0, output[15]);
}
TEST_F(DebugNumericSummaryOpTest, Many_dimensions_tensor_shape) {
TF_ASSERT_OK(Init(DT_FLOAT));
AddInputFromArray<float>(TensorShape({1, 3, 1, 1, 1, 1, 1}),
{std::numeric_limits<float>::quiet_NaN(),
-std::numeric_limits<float>::infinity(), -8.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({21}));
test::FillValues<double>(&expected,
{1.0,
3.0,
1.0,
1.0,
1.0,
0.0,
0.0,
0.0,
-8.0,
-8.0,
-8.0,
0.0,
static_cast<double>(DT_FLOAT),
7.0,
1.0,
3.0,
1.0,
1.0,
1.0,
1.0,
1.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, Scalar_tensor_shape) {
TF_ASSERT_OK(Init(DT_FLOAT));
AddInputFromArray<float>(TensorShape({}), {42.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({14}));
test::FillValues<double>(&expected,
{1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
42.0,
42.0,
42.0,
0.0,
static_cast<double>(DT_FLOAT),
0.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, Int16Success) {
TF_ASSERT_OK(Init(DT_INT16));
AddInputFromArray<int16>(TensorShape({4, 1}), {-1, -3, 3, 7});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({16}));
test::FillValues<double>(&expected,
{1.0,
4.0,
0.0,
0.0,
2.0,
0.0,
2.0,
0.0,
-3.0,
7.0,
1.5,
14.75,
static_cast<double>(DT_INT16),
2.0,
4.0, 1.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, Int32Success) {
TF_ASSERT_OK(Init(DT_INT32));
AddInputFromArray<int32>(TensorShape({2, 3}), {0, 0, -1, 3, 3, 7});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({16}));
test::FillValues<double>(
&expected,
{1.0,
6.0,
0.0,
0.0,
1.0,
2.0,
3.0,
0.0,
-1.0,
7.0,
2.0,
7.33333333333,
static_cast<double>(DT_INT32),
2.0,
2.0, 3.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, Int64Success) {
TF_ASSERT_OK(Init(DT_INT64));
AddInputFromArray<int64_t>(TensorShape({2, 2, 2}), {0, 0, -1, 3, 3, 7, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({17}));
test::FillValues<double>(&expected,
{1.0,
8.0,
0.0,
0.0,
1.0,
4.0,
3.0,
0.0,
-1.0,
7.0,
1.5,
6.25,
static_cast<double>(DT_INT64),
3.0,
2.0, 2.0, 2.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, UInt8Success) {
TF_ASSERT_OK(Init(DT_UINT8));
AddInputFromArray<uint8>(TensorShape({1, 5}), {0, 10, 30, 30, 70});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({16}));
test::FillValues<double>(&expected,
{1.0,
5.0,
0.0,
0.0,
0.0,
1.0,
4.0,
0.0,
0.0,
70.0,
28.0,
576.0,
static_cast<double>(DT_UINT8),
2.0,
1.0, 5.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, BoolSuccess) {
TF_ASSERT_OK(Init(DT_BOOL));
AddInputFromArray<bool>(TensorShape({2, 3}),
{false, false, true, true, true, false});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({16}));
test::FillValues<double>(&expected,
{1.0,
6.0,
0.0,
0.0,
0.0,
3.0,
3.0,
0.0,
0.0,
1.0,
0.5,
0.25,
static_cast<double>(DT_BOOL),
2.0,
2.0, 3.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
#if defined(PLATFORM_GOOGLE)
TEST_F(DebugNumericSummaryOpTest, DisabledDueToEmptyEnabledSet) {
ClearEnabledWatchKeys();
std::vector<string> debug_urls({"grpc:
TF_ASSERT_OK(InitGated(DT_FLOAT, debug_urls));
AddInputFromArray<float>(TensorShape({2, 2}), {1.0, 3.0, 3.0, 7.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_disabled(allocator(), DT_DOUBLE, TensorShape({0}));
test::ExpectTensorNear<double>(expected_disabled, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, DisabledDueToNonMatchingWatchKey) {
ClearEnabledWatchKeys();
DebugGrpcIO::SetDebugNodeKeyGrpcState(
"grpc:
EventReply::DebugOpStateChange::READ_ONLY);
std::vector<string> debug_urls({"grpc:
TF_ASSERT_OK(InitGated(DT_FLOAT, debug_urls));
AddInputFromArray<float>(TensorShape({2, 2}), {1.0, 3.0, 3.0, 7.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_disabled(allocator(), DT_DOUBLE, TensorShape({0}));
test::ExpectTensorNear<double>(expected_disabled, *GetOutput(0), 1e-8);
}
#endif
class DebugNumericSummaryOpCustomLowerBoundTest : public OpsTestBase {
protected:
Status Init(DataType input_type) {
TF_CHECK_OK(NodeDefBuilder("op", "DebugNumericSummary")
.Input(FakeInput(input_type))
.Attr("tensor_name", "FakeTensor:0")
.Attr("lower_bound", -1.2f)
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(DebugNumericSummaryOpCustomLowerBoundTest, Float_full_house) {
TF_ASSERT_OK(Init(DT_FLOAT));
AddInputFromArray<float>(
TensorShape({18}),
{std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN(), 0.0f, 0.0f, 0.0f, -1.0f, -3.0f,
3.0f, 7.0f, -std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN()});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({15}));
test::FillValues<double>(
&expected,
{1.0,
18.0,
4.0,
3.0,
1.0,
3.0,
2.0,
5.0,
-3.0,
7.0,
0.85714285714,
8.97959183673,
static_cast<double>(DT_FLOAT),
1.0,
18.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
class DebugNumericSummaryOpCustomLowerUpperBoundsTest : public OpsTestBase {
protected:
Status Init(DataType input_type) {
TF_CHECK_OK(NodeDefBuilder("op", "DebugNumericSummary")
.Input(FakeInput(input_type))
.Attr("tensor_name", "FakeTensor:0")
.Attr("lower_bound", -0.5f)
.Attr("upper_bound", 3.6f)
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(DebugNumericSummaryOpCustomLowerUpperBoundsTest, Int32Success) {
TF_ASSERT_OK(Init(DT_INT32));
AddInputFromArray<int32>(TensorShape({2, 3}), {0, 0, -1, 3, 3, 7});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({16}));
test::FillValues<double>(
&expected,
{1.0,
6.0,
0.0,
1.0,
0.0,
2.0,
2.0,
1.0,
-1.0,
7.0,
2.0,
7.33333333333,
static_cast<double>(DT_INT32),
2.0,
2.0, 3.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/debug_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/debug_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f0b886bd-62f2-4655-ab1b-f6fd4c08d769 | cpp | tensorflow/tensorflow | xla_builder | third_party/xla/xla/hlo/builder/xla_builder.cc | third_party/xla/xla/hlo/builder/xla_builder_test.cc | #include "xla/hlo/builder/xla_builder.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <numeric>
#include <optional>
#include <queue>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/comparison_util.h"
#include "xla/hlo/builder/padding.h"
#include "xla/hlo/builder/sharding_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/sharding_op_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/stacktrace.h"
#include "tsl/platform/statusor.h"
namespace xla {
using absl::StrCat;
namespace {
static const char kNameSeparator = '.';
std::string GetBaseName(const std::string& name, char separator) {
auto pos = name.rfind(separator);
CHECK_NE(pos, std::string::npos) << name;
return name.substr(0, pos);
}
std::string GetFullName(const std::string& base_name, char separator,
int64_t id) {
const char separator_str[] = {separator, '\0'};
return StrCat(base_name, separator_str, id);
}
template <typename T>
void SetProtoIdAndName(T* entry, const std::string& base_name, char separator,
int64_t id) {
entry->set_id(id);
entry->set_name(GetFullName(base_name, separator, id));
}
bool InstrIsSetBound(const HloInstructionProto* instr_proto) {
HloOpcode opcode = StringToHloOpcode(instr_proto->opcode()).value();
if (opcode == HloOpcode::kCustomCall &&
instr_proto->custom_call_target() == "SetBound") {
return true;
}
return false;
}
absl::Status NormalizeAndAssignSharing(HloInstructionProto* instr,
const OpSharding& op_sharding) {
Shape shape(instr->shape());
TF_ASSIGN_OR_RETURN(HloSharding sharding,
HloSharding::FromProto(op_sharding));
sharding = sharding.NormalizeTupleSharding(shape);
TF_RETURN_IF_ERROR(sharding.Validate(shape));
*instr->mutable_sharding() = sharding.ToProto();
return absl::OkStatus();
}
}
namespace internal {
XlaOp XlaBuilderFriend::BuildAddDependency(XlaBuilder* builder, XlaOp operand,
XlaOp token, const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kAddDependency,
{operand, token});
});
}
XlaOp XlaBuilderFriend::BuildFusion(
XlaBuilder* builder, absl::Span<const XlaOp> operands,
absl::string_view fusion_kind, const XlaComputation& fused_computation,
absl::Span<const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
output_operand_aliasing) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
instr.set_fusion_kind(std::string(fusion_kind));
if (!output_operand_aliasing.empty()) {
for (const auto& pair : output_operand_aliasing) {
auto aliasing = instr.add_output_operand_aliasing();
aliasing->set_operand_index(pair.second.first);
for (int64_t index : pair.second.second) {
aliasing->add_operand_shape_index(index);
}
for (int64_t index : pair.first) {
aliasing->add_output_shape_index(index);
}
}
}
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(auto program_shape,
fused_computation.GetProgramShape());
*instr.mutable_shape() = program_shape.result().ToProto();
builder->AddCalledComputation(fused_computation, &instr);
return builder->AddInstruction(std::move(instr), HloOpcode::kFusion,
operands);
});
}
std::pair<XlaOp, int64_t> XlaBuilderFriend::BuildAsyncStart(
XlaBuilder* builder, absl::Span<const XlaOp> operands,
std::string execution_thread, const XlaComputation& called_computation,
const Shape& shape) {
int64_t called_computation_id;
auto start_op = builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.set_async_execution_thread(execution_thread);
builder->AddCalledComputation(called_computation, &instr);
called_computation_id = instr.called_computation_ids()[0];
return builder->AddInstruction(std::move(instr), HloOpcode::kAsyncStart,
operands);
});
return {start_op, called_computation_id};
}
XlaOp XlaBuilderFriend::BuildAsyncUpdate(XlaBuilder* builder,
const XlaOp operand,
const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kAsyncUpdate,
{operand});
});
}
XlaOp XlaBuilderFriend::BuildAsyncDone(XlaBuilder* builder, const XlaOp operand,
const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kAsyncDone,
{operand});
});
}
XlaOp XlaBuilderFriend::BuildAllGatherStart(
XlaBuilder* builder, const XlaOp operand, int64_t all_gather_dimension,
int64_t shard_count, absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Layout>& layout,
const std::optional<bool> use_global_device_ids) {
return builder->AllGatherImpl(operand, all_gather_dimension, shard_count,
replica_groups, channel_id, layout,
use_global_device_ids, true);
}
XlaOp XlaBuilderFriend::BuildAllGatherDone(XlaBuilder* builder,
const XlaOp operand,
const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kAllGatherDone,
{operand});
});
}
XlaOp XlaBuilderFriend::BuildAllReduceStart(
XlaBuilder* builder, XlaOp operand, const XlaComputation& computation,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Shape>& layout,
const std::optional<bool> use_global_device_ids) {
return builder->AllReduceImpl(operand, computation, replica_groups,
channel_id, layout, use_global_device_ids,
true);
}
XlaOp XlaBuilderFriend::BuildAllReduceDone(XlaBuilder* builder,
const XlaOp operand,
const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kAllReduceDone,
{operand});
});
}
XlaOp XlaBuilderFriend::BuildCopyStart(
XlaBuilder* builder, const XlaOp operand,
std::optional<int> cross_program_prefetch_index) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
if (cross_program_prefetch_index) {
instr.set_cross_program_prefetch_index(*cross_program_prefetch_index);
}
TF_ASSIGN_OR_RETURN(const Shape* operand_shape,
builder->GetShapePtr(operand));
Shape u32 = ShapeUtil::MakeScalarShape(PrimitiveType::U32);
Shape shape =
ShapeUtil::MakeTupleShapeWithPtrs({operand_shape, operand_shape, &u32});
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kCopyStart,
{operand});
});
}
XlaOp XlaBuilderFriend::BuildCopyDone(XlaBuilder* builder, const XlaOp operand,
const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kCopyDone,
{operand});
});
}
XlaOp XlaBuilderFriend::BuildCollectivePermuteStart(
XlaBuilder* builder, XlaOp operand,
const std::vector<std::pair<int64_t, int64_t>>& source_target_pairs,
const std::optional<ChannelHandle>& channel_id) {
return builder->CollectivePermuteImpl(operand, source_target_pairs,
channel_id, true);
}
XlaOp XlaBuilderFriend::BuildCollectivePermuteDone(XlaBuilder* builder,
const XlaOp operand,
const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(
std::move(instr), HloOpcode::kCollectivePermuteDone, {operand});
});
}
XlaOp XlaBuilderFriend::BuildBitcast(XlaBuilder* builder, XlaOp operand,
const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kBitcast,
{operand});
});
}
XlaOp XlaBuilderFriend::BuildDomain(XlaBuilder* builder, XlaOp operand,
const OpSharding entry,
const OpSharding exit, const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_domain_entry_sharding() = entry;
*instr.mutable_domain_exit_sharding() = exit;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kDomain,
{operand});
});
}
XlaOp XlaBuilderFriend::BuildPartitionId(XlaBuilder* builder,
const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kPartitionId);
});
}
XlaOp XlaBuilderFriend::BuildSend(XlaBuilder* builder, XlaOp operand,
XlaOp token, const ChannelHandle& handle,
bool is_host_transfer) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto send_instr;
TF_ASSIGN_OR_RETURN(const Shape* shape, builder->GetShapePtr(operand));
*send_instr.mutable_shape() =
ShapeUtil::MakeTupleShape({*shape, ShapeUtil::MakeShape(U32, {}),
ShapeUtil::MakeTokenShape()})
.ToProto();
send_instr.set_channel_id(handle.handle());
send_instr.set_is_host_transfer(is_host_transfer);
return builder->AddInstruction(std::move(send_instr), HloOpcode::kSend,
{operand, token});
});
}
XlaOp XlaBuilderFriend::BuildSendDone(XlaBuilder* builder, XlaOp operand,
const ChannelHandle& handle,
bool is_host_transfer) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto send_done_instr;
*send_done_instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
send_done_instr.set_channel_id(handle.handle());
send_done_instr.set_is_host_transfer(is_host_transfer);
return builder->AddInstruction(std::move(send_done_instr),
HloOpcode::kSendDone, {operand});
});
}
XlaOp XlaBuilderFriend::BuildRecv(XlaBuilder* builder, XlaOp token,
const Shape& shape,
const ChannelHandle& handle,
bool is_host_transfer) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto recv_instr;
*recv_instr.mutable_shape() =
ShapeUtil::MakeTupleShape(
{shape, ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeTokenShape()})
.ToProto();
recv_instr.set_channel_id(handle.handle());
recv_instr.set_is_host_transfer(is_host_transfer);
return builder->AddInstruction(std::move(recv_instr), HloOpcode::kRecv,
{token});
});
}
XlaOp XlaBuilderFriend::BuildRecvDone(XlaBuilder* builder, XlaOp token,
const Shape& shape,
const ChannelHandle& handle,
bool is_host_transfer) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto recv_done_instr;
*recv_done_instr.mutable_shape() =
ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeTokenShape()})
.ToProto();
recv_done_instr.set_channel_id(handle.handle());
recv_done_instr.set_is_host_transfer(is_host_transfer);
return builder->AddInstruction(std::move(recv_done_instr),
HloOpcode::kRecvDone, {token});
});
}
XlaOp XlaBuilderFriend::BuildRngGetAndUpdateState(XlaBuilder* builder,
int64_t delta,
const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
instr.set_delta(delta);
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr),
HloOpcode::kRngGetAndUpdateState);
});
}
HloInstructionProto* XlaBuilderFriend::GetInstruction(XlaOp op) {
return &op.builder()
->instructions_[op.builder()->handle_to_index_[op.handle_]];
}
HloInstructionProto* XlaBuilderFriend::GetInstructionByHandle(
XlaBuilder* builder, int64_t handle) {
return &builder->instructions_[builder->handle_to_index_[handle]];
}
}
XlaOp operator-(XlaOp x) { return Neg(x); }
XlaOp operator+(XlaOp x, XlaOp y) { return Add(x, y); }
XlaOp operator-(XlaOp x, XlaOp y) { return Sub(x, y); }
XlaOp operator*(XlaOp x, XlaOp y) { return Mul(x, y); }
XlaOp operator/(XlaOp x, XlaOp y) { return Div(x, y); }
XlaOp operator%(XlaOp x, XlaOp y) { return Rem(x, y); }
XlaOp operator~(XlaOp x) { return Not(x); }
XlaOp operator&(XlaOp x, XlaOp y) { return And(x, y); }
XlaOp operator|(XlaOp x, XlaOp y) { return Or(x, y); }
XlaOp operator^(XlaOp x, XlaOp y) { return Xor(x, y); }
XlaOp operator<<(XlaOp x, XlaOp y) { return ShiftLeft(x, y); }
XlaOp operator>>(XlaOp x, XlaOp y) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* shape, builder->GetShapePtr(x));
if (!ShapeUtil::ElementIsIntegral(*shape)) {
return InvalidArgument(
"Argument to >> operator does not have an integral type (%s).",
ShapeUtil::HumanString(*shape));
}
if (ShapeUtil::ElementIsSigned(*shape)) {
return ShiftRightArithmetic(x, y);
} else {
return ShiftRightLogical(x, y);
}
});
}
absl::StatusOr<const Shape*> XlaBuilder::GetShapePtr(XlaOp op) const {
TF_RETURN_IF_ERROR(first_error_);
TF_RETURN_IF_ERROR(CheckOpBuilder(op));
auto it = handle_to_index_.find(op.handle());
if (it == handle_to_index_.end()) {
return InvalidArgument("No XlaOp with handle %d", op.handle());
}
return instruction_shapes_.at(it->second).get();
}
absl::StatusOr<Shape> XlaBuilder::GetShape(XlaOp op) const {
TF_ASSIGN_OR_RETURN(const Shape* shape, GetShapePtr(op));
return *shape;
}
absl::StatusOr<std::vector<Shape>> XlaBuilder::GetOperandShapes(
absl::Span<const XlaOp> operands) const {
std::vector<Shape> operand_shapes;
operand_shapes.reserve(operands.size());
for (XlaOp operand : operands) {
TF_ASSIGN_OR_RETURN(const Shape* shape, GetShapePtr(operand));
operand_shapes.push_back(*shape);
}
return operand_shapes;
}
absl::StatusOr<std::optional<OpSharding>> XlaBuilder::GetOpSharding(
XlaOp op) const {
TF_ASSIGN_OR_RETURN(auto instr_proto, LookUpInstruction(op));
if (instr_proto->has_sharding()) {
return instr_proto->sharding();
}
return std::nullopt;
}
std::string XlaBuilder::OpToString(XlaOp op) const {
std::string s;
ToStringHelper(&s, 0, op.handle());
return s;
}
static std::string ShapeToString(const ShapeProto& shape) {
if (shape.tuple_shapes_size() > 1) {
return absl::StrCat(
"(",
absl::StrJoin(shape.tuple_shapes(), ", ",
[&](std::string* s, const ShapeProto& subshape) {
absl::StrAppend(s, ShapeToString(subshape));
}),
")");
}
return absl::StrCat("[", absl::StrJoin(shape.dimensions(), ", "), "]");
}
void XlaBuilder::ToStringHelper(std::string* out, int ident,
int64_t op_handle) const {
const HloInstructionProto& instr =
*(LookUpInstructionByHandle(op_handle).value());
absl::StrAppend(out, std::string(ident, ' '), instr.opcode(),
", shape=", ShapeToString(instr.shape()));
if (instr.has_metadata()) {
absl::StrAppend(out, ", metadata={", instr.metadata().source_file(), ":",
instr.metadata().source_line(), "}");
}
if (instr.operand_ids_size()) {
absl::StrAppend(out, "\n");
}
absl::StrAppend(out, absl::StrJoin(instr.operand_ids(), "\n",
[&](std::string* s, int64_t subop) {
ToStringHelper(s, ident + 2, subop);
}));
}
XlaBuilder::XlaBuilder(const std::string& computation_name)
: name_(computation_name) {}
XlaBuilder::~XlaBuilder() = default;
XlaOp XlaBuilder::ReportError(const absl::Status& error) {
CHECK(!error.ok());
if (die_immediately_on_error_) {
LOG(FATAL) << "error building computation: " << error;
}
if (first_error_.ok()) {
first_error_ = error;
first_error_backtrace_.CreateCurrent(1);
}
return XlaOp(this);
}
XlaOp XlaBuilder::ReportErrorOrReturn(const absl::StatusOr<XlaOp>& op) {
if (!first_error_.ok()) {
return XlaOp(this);
}
if (!op.ok()) {
return ReportError(op.status());
}
return op.value();
}
XlaOp XlaBuilder::ReportErrorOrReturn(
absl::FunctionRef<absl::StatusOr<XlaOp>()> op_creator) {
return ReportErrorOrReturn(op_creator());
}
absl::StatusOr<ProgramShape> XlaBuilder::GetProgramShape(
int64_t root_id) const {
TF_RETURN_IF_ERROR(first_error_);
TF_ASSIGN_OR_RETURN(const HloInstructionProto* root_proto,
LookUpInstructionByHandle(root_id));
ProgramShape program_shape;
*program_shape.mutable_result() = Shape(root_proto->shape());
const int64_t param_count = parameter_numbers_.size();
for (int64_t i = 0; i < param_count; i++) {
program_shape.add_parameters();
program_shape.add_parameter_names();
}
for (const HloInstructionProto& instr : instructions_) {
if (instr.opcode() == HloOpcodeString(HloOpcode::kParameter)) {
const int64_t index = instr.parameter_number();
TF_RET_CHECK(index >= 0 && index < param_count)
<< "invalid parameter number: " << index;
*program_shape.mutable_parameters(index) = Shape(instr.shape());
*program_shape.mutable_parameter_names(index) = instr.name();
}
}
return program_shape;
}
absl::StatusOr<ProgramShape> XlaBuilder::GetProgramShape() const {
TF_RET_CHECK(!instructions_.empty());
return GetProgramShape(instructions_.back().id());
}
absl::StatusOr<ProgramShape> XlaBuilder::GetProgramShape(XlaOp root) const {
if (root.builder_ != this) {
return InvalidArgument("Given root operation is not in this computation.");
}
return GetProgramShape(root.handle());
}
void XlaBuilder::IsConstantVisitor(const int64_t op_handle, int depth,
absl::flat_hash_set<int64_t>* visited,
bool* is_constant) const {
if (visited->contains(op_handle) || !*is_constant) {
return;
}
const HloInstructionProto& instr =
*(LookUpInstructionByHandle(op_handle).value());
HloInstructionProto to_print(instr);
to_print.clear_shape();
const HloOpcode opcode = StringToHloOpcode(instr.opcode()).value();
const std::string indent =
absl::StrJoin(std::vector<absl::string_view>(depth, " "), "");
if (VLOG_IS_ON(2)) {
VLOG(2) << indent << "Visiting:";
for (const auto& l : absl::StrSplit(to_print.DebugString(), '\n')) {
VLOG(2) << indent << l;
}
}
switch (opcode) {
default:
for (const int64_t operand_id : instr.operand_ids()) {
IsConstantVisitor(operand_id, depth + 1, visited, is_constant);
}
break;
case HloOpcode::kGetDimensionSize:
break;
case HloOpcode::kRng:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kInfeed:
case HloOpcode::kOutfeed:
case HloOpcode::kCall:
case HloOpcode::kCustomCall:
if (instr.custom_call_target() == "SetBound") {
break;
}
[[fallthrough]];
case HloOpcode::kWhile:
case HloOpcode::kScatter:
case HloOpcode::kSend:
case HloOpcode::kRecv:
case HloOpcode::kParameter:
*is_constant = false;
break;
case HloOpcode::kGetTupleElement: {
const HloInstructionProto& operand_instr =
*(LookUpInstructionByHandle(instr.operand_ids(0)).value());
if (HloOpcodeString(HloOpcode::kTuple) == operand_instr.opcode()) {
IsConstantVisitor(operand_instr.operand_ids(instr.tuple_index()),
depth + 1, visited, is_constant);
} else {
for (const int64_t operand_id : instr.operand_ids()) {
IsConstantVisitor(operand_id, depth + 1, visited, is_constant);
}
}
}
}
if (VLOG_IS_ON(1) && !*is_constant) {
VLOG(1) << indent << "Non-constant: ";
for (const auto& l : absl::StrSplit(to_print.DebugString(), '\n')) {
VLOG(1) << indent << l;
}
}
visited->insert(op_handle);
}
absl::Status XlaBuilder::SetInstructionFrontendAttribute(const XlaOp op,
std::string attribute,
std::string value) {
TF_ASSIGN_OR_RETURN(auto instr_proto, LookUpMutableInstruction(op));
auto* frontend_attributes = instr_proto->mutable_frontend_attributes();
(*frontend_attributes->mutable_map())[attribute] = std::move(value);
return absl::OkStatus();
}
absl::Status XlaBuilder::SetInstructionSharding(
XlaOp op, const std::optional<OpSharding>& sharding) {
TF_ASSIGN_OR_RETURN(auto instr_proto, LookUpMutableInstruction(op));
if (!sharding.has_value()) {
instr_proto->clear_sharding();
return absl::OkStatus();
}
return NormalizeAndAssignSharing(instr_proto, sharding.value());
}
XlaComputation XlaBuilder::BuildAndNoteError() {
DCHECK(parent_builder_ != nullptr);
auto build_status = Build();
if (!build_status.ok()) {
parent_builder_->ReportError(
AddStatus(build_status.status(), absl::StrCat("error from: ", name_)));
return {};
}
return std::move(build_status).value();
}
absl::Status XlaBuilder::GetCurrentStatus() const {
if (!first_error_.ok()) {
std::string backtrace;
first_error_backtrace_.Dump(tsl::DebugWriteToString, &backtrace);
return AppendStatus(first_error_, backtrace);
}
return absl::OkStatus();
}
absl::StatusOr<XlaComputation> XlaBuilder::Build(
bool remove_dynamic_dimensions) {
TF_RETURN_IF_ERROR(GetCurrentStatus());
return Build(instructions_.back().id(), remove_dynamic_dimensions);
}
absl::StatusOr<XlaComputation> XlaBuilder::Build(
XlaOp root, bool remove_dynamic_dimensions) {
if (root.builder_ != this) {
return InvalidArgument("Given root operation is not in this computation.");
}
return Build(root.handle(), remove_dynamic_dimensions);
}
absl::StatusOr<XlaComputation> XlaBuilder::Build(
int64_t root_id, bool remove_dynamic_dimensions) {
TF_RETURN_IF_ERROR(GetCurrentStatus());
if (remove_dynamic_dimensions) {
std::function<void(Shape*)> remove_dynamic_dimension = [&](Shape* shape) {
if (shape->tuple_shapes_size() != 0) {
for (int i = 0; i < shape->tuple_shapes_size(); ++i) {
remove_dynamic_dimension(shape->mutable_tuple_shapes(i));
}
}
for (int64_t i = 0; i < shape->dimensions_size(); ++i) {
shape->set_dynamic_dimension(i, false);
}
};
for (size_t index = 0; index < instructions_.size(); ++index) {
remove_dynamic_dimension(instruction_shapes_[index].get());
*instructions_[index].mutable_shape() =
instruction_shapes_[index]->ToProto();
}
}
HloComputationProto entry;
SetProtoIdAndName(&entry, name_, kNameSeparator, GetNextId());
TF_ASSIGN_OR_RETURN(ProgramShape program_shape, GetProgramShape(root_id));
*entry.mutable_program_shape() = program_shape.ToProto();
entry.set_root_id(root_id);
for (auto& instruction : instructions_) {
instruction.set_name(
GetFullName(instruction.name(), kNameSeparator, instruction.id()));
entry.add_instructions()->Swap(&instruction);
}
XlaComputation computation(entry.id());
HloModuleProto* module = computation.mutable_proto();
module->set_name(entry.name());
module->set_id(entry.id());
module->set_entry_computation_name(entry.name());
module->set_entry_computation_id(entry.id());
*module->mutable_host_program_shape() = entry.program_shape();
for (auto& e : embedded_) {
module->add_computations()->Swap(&e.second);
}
module->add_computations()->Swap(&entry);
if (!input_output_aliases_.empty() || !buffer_donors_.empty()) {
TF_RETURN_IF_ERROR(PopulateInputOutputAliasAndBufferDonor(
module, program_shape, input_output_aliases_, buffer_donors_));
}
this->instructions_.clear();
this->instruction_shapes_.clear();
this->handle_to_index_.clear();
this->embedded_.clear();
this->parameter_numbers_.clear();
return std::move(computation);
}
absl::Status XlaBuilder::PopulateInputOutputAliasAndBufferDonor(
HloModuleProto* module, const ProgramShape& program_shape,
const std::vector<InputOutputAlias>& input_output_aliases,
const absl::flat_hash_set<HloBufferDonorConfig::BufferDonor>&
buffer_donors) {
HloInputOutputAliasConfig io_alias_config(program_shape.result());
for (auto& alias : input_output_aliases) {
if (alias.param_number >= program_shape.parameters_size()) {
return InvalidArgument("Invalid parameter number %ld (total %ld)",
alias.param_number,
program_shape.parameters_size());
}
const Shape& parameter_shape = program_shape.parameters(alias.param_number);
if (!ShapeUtil::IndexIsValid(parameter_shape, alias.param_index)) {
return InvalidArgument("Invalid parameter %ld index: %s",
alias.param_number,
alias.param_index.ToString().c_str());
}
TF_RETURN_IF_ERROR(io_alias_config.SetUpAlias(
alias.output_index, alias.param_number, alias.param_index, alias.kind));
}
*module->mutable_input_output_alias() = io_alias_config.ToProto();
HloBufferDonorConfig buffer_donor_config;
for (auto& donor : buffer_donors) {
if (donor.param_number >= program_shape.parameters_size()) {
return InvalidArgument("Invalid parameter number %ld (total %ld)",
donor.param_number,
program_shape.parameters_size());
}
const Shape& parameter_shape = program_shape.parameters(donor.param_number);
if (!ShapeUtil::IndexIsValid(parameter_shape, donor.param_index)) {
return InvalidArgument("Invalid parameter %ld index: %s",
donor.param_number,
donor.param_index.ToString().c_str());
}
if (io_alias_config.ParameterHasAlias(donor.param_number,
donor.param_index)) {
return InvalidArgument(
"Parameter %ld index %s is already aliased with one output, thus it "
"cannot be added as a buffer donor for any output.",
donor.param_number, donor.param_index.ToString().c_str());
}
TF_RETURN_IF_ERROR(buffer_donor_config.AddBufferDonor(donor.param_number,
donor.param_index));
}
*module->mutable_buffer_donor() = buffer_donor_config.ToProto();
return absl::OkStatus();
}
XlaOp XlaBuilder::MhloDynamicReshape(XlaOp operand, XlaOp output_shape,
const Shape& shape) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
if (operand_shape->element_type() != shape.element_type()) {
return InvalidArgument(
"Element type of operand %s and output %s must match",
ShapeUtil::HumanString(*operand_shape),
ShapeUtil::HumanString(shape));
}
if (operand_shape->is_static() && shape.is_static() &&
ShapeUtil::ElementsIn(*operand_shape) != ShapeUtil::ElementsIn(shape)) {
return InvalidArgument(
"MhloDynamicReshape has mismatched element counts: from=%d (%s) "
"to=%d (%s)",
ShapeUtil::ElementsIn(*operand_shape),
ShapeUtil::HumanString(*operand_shape), ShapeUtil::ElementsIn(shape),
ShapeUtil::HumanString(shape));
}
TF_ASSIGN_OR_RETURN(const Shape* output_shape_shape,
GetShapePtr(output_shape));
if (output_shape_shape->dimensions(0) != shape.rank()) {
return InvalidArgument(
"output_shape dimension size=%d (%s) and rank of shape=%d (%s) must "
"match",
output_shape_shape->dimensions(0),
ShapeUtil::HumanString(*output_shape_shape), shape.rank(),
ShapeUtil::HumanString(shape));
}
return xla::CustomCall(operand.builder(), "mhlo.dynamic_reshape",
{operand, output_shape},
shape,
"");
});
};
XlaOp XlaBuilder::MhloDynamicBroadcastInDim(
const XlaOp operand, const XlaOp output_dimensions,
absl::Span<const int64_t> broadcast_dimensions, const Shape& output_shape) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape* output_dimensions_shape,
GetShapePtr(output_dimensions));
if (!output_dimensions_shape->IsInteger()) {
return InvalidArgument("output_dimensions must be an integer type %s",
ShapeUtil::HumanString(*output_dimensions_shape));
}
if (output_dimensions_shape->rank() != 1) {
return InvalidArgument("output_dimensions must be rank 1 but got rank %d",
output_dimensions_shape->rank());
}
int64_t operand_rank = operand_shape->rank();
int64_t result_rank = output_shape.rank();
int64_t broadcast_dimensions_size = broadcast_dimensions.size();
if (broadcast_dimensions_size != operand_rank) {
return InvalidArgument(
"broadcast_dimensions size (%d) does not match operand rank (%d)",
broadcast_dimensions_size, operand_rank);
}
if (result_rank < operand_rank) {
return InvalidArgument("result rank (%d) is less than operand rank (%d)",
result_rank, operand_rank);
}
for (int64_t i = 0; i != broadcast_dimensions_size; ++i) {
int64_t dim_index = broadcast_dimensions[i];
if (dim_index < 0 || dim_index >= result_rank) {
return InvalidArgument(
"broadcast_dimensions contains invalid value %d for result with "
"rank %d",
dim_index, result_rank);
}
int64_t dim_size = operand_shape->dimensions(i);
int64_t result_dim_size = output_shape.dimensions(dim_index);
if (dim_size != 1 && dim_size != result_dim_size &&
dim_size != Shape::kUnboundedSize) {
return InvalidArgument(
"size of operand dimension %d (%d) is not compatible with size of "
"result dimension %d (%d)",
i, dim_size, dim_index, result_dim_size);
}
}
return xla::CustomCall(
operand.builder(), "mhlo.dynamic_broadcast_in_dim",
{operand, output_dimensions},
output_shape,
absl::StrCat("{broadcast_dimensions=[",
absl::StrJoin(broadcast_dimensions, ","), "]}"));
});
}
absl::StatusOr<XlaOp> XlaBuilder::InDimBroadcast(
const Shape& shape, XlaOp operand,
absl::Span<const int64_t> broadcast_dimensions) {
TF_RETURN_IF_ERROR(first_error_);
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
for (int64_t dim : broadcast_dimensions) {
instr.add_dimensions(dim);
}
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_RET_CHECK(!shape.is_unbounded_dynamic())
<< "broadcast op result shapes must be static";
for (int64_t i = 0; i < shape.rank(); i++) {
if (auto it = absl::c_find(broadcast_dimensions, i);
it != broadcast_dimensions.end()) {
TF_RET_CHECK(operand_shape->is_bounded_dynamic_dimension(
it - broadcast_dimensions.begin()) ==
shape.is_bounded_dynamic_dimension(i))
<< " i: " << i << ", shape: " << ShapeUtil::HumanString(shape)
<< ", operand_shape: " << ShapeUtil::HumanString(*operand_shape);
} else {
TF_RET_CHECK(shape.is_static_dimension(i));
}
}
return AddInstruction(std::move(instr), HloOpcode::kBroadcast, {operand});
}
absl::StatusOr<XlaOp> XlaBuilder::AddBroadcastSequence(
const Shape& output_shape, XlaOp operand) {
TF_RETURN_IF_ERROR(first_error_);
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
CHECK(ShapeUtil::IsScalar(*operand_shape) ||
operand_shape->rank() == output_shape.rank());
Shape broadcast_shape =
ShapeUtil::ChangeElementType(output_shape, operand_shape->element_type());
if (ShapeUtil::IsScalar(*operand_shape)) {
return InDimBroadcast(ShapeUtil::MakeStaticShape(broadcast_shape), operand,
{});
}
std::vector<int64_t> broadcast_dimensions;
std::vector<int64_t> reshaped_dimensions;
std::vector<bool> reshaped_dynamic_dimensions;
for (int i = 0; i < operand_shape->rank(); i++) {
if (operand_shape->dimensions(i) == output_shape.dimensions(i)) {
broadcast_dimensions.push_back(i);
reshaped_dimensions.push_back(operand_shape->dimensions(i));
reshaped_dynamic_dimensions.push_back(
operand_shape->is_dynamic_dimension(i));
} else {
TF_RET_CHECK(operand_shape->dimensions(i) == 1 &&
operand_shape->is_static_dimension(i))
<< "An explicit broadcast sequence requires the broadcasted "
"dimensions to be trivial; operand shape: "
<< *operand_shape << "; output_shape: " << output_shape;
}
broadcast_shape.set_dynamic_dimension(
i, operand_shape->is_dynamic_dimension(i));
}
Shape reshaped_shape =
ShapeUtil::MakeShape(operand_shape->element_type(), reshaped_dimensions,
reshaped_dynamic_dimensions);
XlaOp reshaped_operand;
{
XlaScopedShardingAssignment scoped_sharding(this, std::nullopt);
TF_ASSIGN_OR_RETURN(
reshaped_operand,
ReshapeInternal(reshaped_shape, operand, -1));
}
return InDimBroadcast(broadcast_shape, reshaped_operand,
broadcast_dimensions);
}
XlaOp XlaBuilder::UnaryOp(HloOpcode unop, XlaOp operand) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(
Shape shape, ShapeInference::InferUnaryOpShape(unop, *operand_shape));
return AddOpWithShape(unop, shape, {operand});
});
}
namespace {
absl::StatusOr<XlaOp> BroadcastToTargetRank(
XlaOp origin, const Shape& origin_shape, const Shape& target_shape,
absl::Span<const int64_t> broadcast_dimensions) {
if (ShapeUtil::IsScalar(origin_shape)) {
return origin;
}
const int64_t origin_rank = origin_shape.rank();
const int64_t target_rank = target_shape.rank();
if (origin_rank >= target_rank) {
return origin;
}
absl::Span<const int64_t> target_dimensions = target_shape.dimensions();
std::vector<int64_t> target_size{target_dimensions.begin(),
target_dimensions.end()};
for (int64_t origin_dim = 0; origin_dim < origin_rank; origin_dim++) {
int64_t target_dim = broadcast_dimensions[origin_dim];
target_size[target_dim] = origin_shape.dimensions(origin_dim);
}
return BroadcastInDim(origin, target_size, broadcast_dimensions);
}
absl::StatusOr<std::vector<XlaOp>> ExtractDimensionSizesAndPadOnesToLeft(
XlaBuilder* builder, XlaOp op, size_t num_dims, int pad_count) {
TF_ASSIGN_OR_RETURN(const Shape* op_shape, builder->GetShapePtr(op));
std::vector<XlaOp> op_dims(
pad_count, ConstantR1<int32_t>(builder, {1}));
for (size_t i = 0; i < num_dims; i++) {
op_dims.push_back(
op_shape->is_static_dimension(i)
? ConstantR1<int32_t>(
builder,
{static_cast<int32_t>(op_shape->dimensions(i))})
: Reshape(GetDimensionSize(op, i), {1}));
}
return op_dims;
}
absl::StatusOr<XlaOp> BroadcastScalarToOutputShapeWithUnbounded(
XlaBuilder* builder, XlaOp scalar, XlaOp output,
const Shape& output_shape) {
TF_ASSIGN_OR_RETURN(const Shape* scalar_shape, builder->GetShapePtr(scalar));
CHECK(ShapeUtil::IsScalar(*scalar_shape));
std::vector<XlaOp> output_sizes(output_shape.rank());
for (size_t i = 0; i < output_shape.rank(); i++) {
output_sizes[i] =
output_shape.is_static_dimension(i)
? ConstantR1<int32_t>(
builder,
{static_cast<int32_t>(output_shape.dimensions(i))})
: Reshape(GetDimensionSize(output, i), {1});
}
return MhloDynamicBroadcastInDim(
scalar, ConcatInDim(builder, output_sizes, 0), {},
output_shape);
}
absl::StatusOr<XlaOp> DegenerateBroadcastWithUnbounded(
XlaBuilder* builder, XlaOp operand, XlaOp output_dimensions,
const Shape& output_shape) {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape,
builder->GetShapePtr(operand));
std::vector<int64_t> broadcast_dimensions(operand_shape->rank());
std::iota(broadcast_dimensions.begin(), broadcast_dimensions.end(),
output_shape.rank() - operand_shape->rank());
return MhloDynamicBroadcastInDim(operand, output_dimensions,
broadcast_dimensions, output_shape);
}
struct UnboundedBroadcastResult {
XlaOp lhs;
XlaOp rhs;
};
absl::StatusOr<UnboundedBroadcastResult> BroadcastToOutputShapeWithUnbounded(
XlaBuilder* builder, XlaOp lhs, const Shape& lhs_shape, XlaOp rhs,
const Shape rhs_shape, const Shape& output_shape,
absl::Span<const int64_t> broadcast_dimensions) {
const int64_t lhs_rank = lhs_shape.rank();
const int64_t rhs_rank = rhs_shape.rank();
const int64_t output_rank = output_shape.rank();
TF_ASSIGN_OR_RETURN(std::vector<XlaOp> lhs_dims,
ExtractDimensionSizesAndPadOnesToLeft(
builder, lhs, lhs_rank, output_rank - lhs_rank));
TF_ASSIGN_OR_RETURN(std::vector<XlaOp> rhs_dims,
ExtractDimensionSizesAndPadOnesToLeft(
builder, rhs, rhs_rank, output_rank - rhs_rank));
XlaOp output_dimensions =
Max(ConcatInDim(builder, lhs_dims, 0), ConcatInDim(builder, rhs_dims, 0));
TF_ASSIGN_OR_RETURN(XlaOp lhs_result,
DegenerateBroadcastWithUnbounded(
builder, lhs, output_dimensions, output_shape));
TF_ASSIGN_OR_RETURN(XlaOp rhs_result,
DegenerateBroadcastWithUnbounded(
builder, rhs, output_dimensions, output_shape));
return UnboundedBroadcastResult{lhs_result, rhs_result};
}
}
XlaOp XlaBuilder::BinaryOp(HloOpcode binop, XlaOp lhs, XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions,
std::optional<ComparisonDirection> direction,
std::optional<Comparison::Type> type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* lhs_shape, GetShapePtr(lhs));
TF_ASSIGN_OR_RETURN(const Shape* rhs_shape, GetShapePtr(rhs));
TF_ASSIGN_OR_RETURN(
Shape shape, ShapeInference::InferBinaryOpShape(
binop, *lhs_shape, *rhs_shape, broadcast_dimensions));
XlaOp updated_lhs = lhs;
XlaOp updated_rhs = rhs;
if (!lhs_shape->is_unbounded_dynamic() &&
!rhs_shape->is_unbounded_dynamic()) {
if (lhs_shape->rank() < shape.rank()) {
TF_ASSIGN_OR_RETURN(updated_lhs,
BroadcastToTargetRank(lhs, *lhs_shape, shape,
broadcast_dimensions));
}
if (rhs_shape->rank() < shape.rank()) {
TF_ASSIGN_OR_RETURN(updated_rhs,
BroadcastToTargetRank(rhs, *rhs_shape, shape,
broadcast_dimensions));
}
TF_ASSIGN_OR_RETURN(const Shape* updated_lhs_shape,
GetShapePtr(updated_lhs));
TF_ASSIGN_OR_RETURN(const Shape* updated_rhs_shape,
GetShapePtr(updated_rhs));
if (!ShapeUtil::SameDimensions(shape, *updated_lhs_shape)) {
TF_ASSIGN_OR_RETURN(updated_lhs,
AddBroadcastSequence(shape, updated_lhs));
}
if (!ShapeUtil::SameDimensions(shape, *updated_rhs_shape)) {
TF_ASSIGN_OR_RETURN(updated_rhs,
AddBroadcastSequence(shape, updated_rhs));
}
} else {
if (ShapeUtil::IsScalar(*lhs_shape) || ShapeUtil::IsScalar(*rhs_shape)) {
if (ShapeUtil::IsScalar(*lhs_shape)) {
TF_ASSIGN_OR_RETURN(updated_lhs,
BroadcastScalarToOutputShapeWithUnbounded(
this, lhs, rhs, *rhs_shape));
}
if (ShapeUtil::IsScalar(*rhs_shape)) {
TF_ASSIGN_OR_RETURN(updated_rhs,
BroadcastScalarToOutputShapeWithUnbounded(
this, rhs, lhs, *lhs_shape));
}
} else {
if (!ShapeUtil::SameDimensions(*lhs_shape, *rhs_shape)) {
Shape output_shape = shape;
output_shape.set_element_type(lhs_shape->element_type());
TF_ASSIGN_OR_RETURN(UnboundedBroadcastResult broadcast_result,
BroadcastToOutputShapeWithUnbounded(
this, lhs, *lhs_shape, rhs, *rhs_shape,
output_shape, broadcast_dimensions));
updated_lhs = broadcast_result.lhs;
updated_rhs = broadcast_result.rhs;
}
}
}
if (binop == HloOpcode::kCompare) {
if (!direction.has_value()) {
return InvalidArgument(
"kCompare expects a ComparisonDirection, but none provided.");
}
if (type == std::nullopt) {
return Compare(shape, updated_lhs, updated_rhs, *direction);
} else {
return Compare(shape, updated_lhs, updated_rhs, *direction, *type);
}
}
if (direction.has_value()) {
return InvalidArgument(
"A comparison direction is provided for a non-compare opcode: %s.",
HloOpcodeString(binop));
}
return BinaryOpNoBroadcast(binop, shape, updated_lhs, updated_rhs);
});
}
XlaOp XlaBuilder::BinaryOpNoBroadcast(HloOpcode binop, const Shape& shape,
XlaOp lhs, XlaOp rhs) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return AddInstruction(std::move(instr), binop, {lhs, rhs});
});
}
absl::StatusOr<XlaOp> XlaBuilder::Compare(const Shape& shape, XlaOp lhs,
XlaOp rhs,
ComparisonDirection direction) {
TF_ASSIGN_OR_RETURN(auto operand_shape, GetShape(lhs));
return Compare(
shape, lhs, rhs, direction,
Comparison::DefaultComparisonType(operand_shape.element_type()));
}
absl::StatusOr<XlaOp> XlaBuilder::Compare(const Shape& shape, XlaOp lhs,
XlaOp rhs,
ComparisonDirection direction,
Comparison::Type type) {
HloInstructionProto instr;
instr.set_comparison_direction(ComparisonDirectionToString(direction));
instr.set_comparison_type(ComparisonTypeToString(type));
*instr.mutable_shape() = shape.ToProto();
return AddInstruction(std::move(instr), HloOpcode::kCompare, {lhs, rhs});
}
absl::StatusOr<XlaOp> XlaBuilder::BroadcastScalarToOutputShape(XlaOp scalar,
XlaOp output) {
TF_ASSIGN_OR_RETURN(const Shape* scalar_shape, GetShapePtr(scalar));
TF_ASSIGN_OR_RETURN(const Shape* output_shape, GetShapePtr(output));
XlaOp updated_output = scalar;
if (output_shape->is_unbounded_dynamic()) {
Shape output_shape_copy = *output_shape;
output_shape_copy.set_element_type(scalar_shape->element_type());
TF_ASSIGN_OR_RETURN(updated_output,
BroadcastScalarToOutputShapeWithUnbounded(
this, scalar, output, output_shape_copy));
return updated_output;
}
TF_ASSIGN_OR_RETURN(updated_output,
AddBroadcastSequence(*output_shape, updated_output));
return updated_output;
}
XlaOp XlaBuilder::TernaryOp(HloOpcode triop, XlaOp lhs, XlaOp rhs, XlaOp ehs) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
XlaOp updated_lhs = lhs;
XlaOp updated_rhs = rhs;
XlaOp updated_ehs = ehs;
if (triop == HloOpcode::kSelect || triop == HloOpcode::kClamp) {
TF_ASSIGN_OR_RETURN(const Shape* lhs_shape, GetShapePtr(lhs));
TF_ASSIGN_OR_RETURN(const Shape* rhs_shape, GetShapePtr(rhs));
TF_ASSIGN_OR_RETURN(const Shape* ehs_shape, GetShapePtr(ehs));
TF_ASSIGN_OR_RETURN(
std::optional<Shape> output_shape,
ShapeInference::InferScalarBroadcastShape(
absl::Span<const Shape>({*lhs_shape, *rhs_shape, *ehs_shape})));
if (output_shape.has_value()) {
if (ShapeUtil::IsScalar(*lhs_shape)) {
TF_ASSIGN_OR_RETURN(
updated_lhs,
BroadcastScalarToOutputShape(
lhs,
ShapeUtil::Equal(*output_shape, *rhs_shape) ? rhs : ehs));
}
if (ShapeUtil::IsScalar(*rhs_shape)) {
TF_ASSIGN_OR_RETURN(
updated_rhs,
BroadcastScalarToOutputShape(
rhs,
ShapeUtil::Equal(*output_shape, *lhs_shape) ? lhs : ehs));
}
if (ShapeUtil::IsScalar(*ehs_shape)) {
TF_ASSIGN_OR_RETURN(
updated_ehs,
BroadcastScalarToOutputShape(
ehs,
ShapeUtil::Equal(*output_shape, *lhs_shape) ? lhs : rhs));
}
}
}
TF_ASSIGN_OR_RETURN(const Shape* lhs_shape, GetShapePtr(updated_lhs));
TF_ASSIGN_OR_RETURN(const Shape* rhs_shape, GetShapePtr(updated_rhs));
TF_ASSIGN_OR_RETURN(const Shape* ehs_shape, GetShapePtr(updated_ehs));
TF_ASSIGN_OR_RETURN(const Shape inferred_shape,
ShapeInference::InferTernaryOpShape(
triop, *lhs_shape, *rhs_shape, *ehs_shape));
return AddOpWithShape(triop, inferred_shape,
{updated_lhs, updated_rhs, updated_ehs});
});
}
XlaOp XlaBuilder::ConstantLiteral(const LiteralSlice& literal) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (literal.shape().IsArray() && literal.element_count() > 1 &&
literal.IsAllFirst()) {
Literal scalar = LiteralUtil::GetFirstScalarLiteral(literal);
HloInstructionProto instr;
*instr.mutable_shape() = scalar.shape().ToProto();
*instr.mutable_literal() = scalar.ToProto();
XlaOp scalar_op;
{
XlaScopedShardingAssignment scoped_sharding(this, std::nullopt);
TF_ASSIGN_OR_RETURN(
scalar_op, AddInstruction(std::move(instr), HloOpcode::kConstant));
}
return Broadcast(scalar_op, literal.shape().dimensions());
} else {
HloInstructionProto instr;
*instr.mutable_shape() = literal.shape().ToProto();
*instr.mutable_literal() = literal.ToProto();
return AddInstruction(std::move(instr), HloOpcode::kConstant);
}
});
}
XlaOp XlaBuilder::Iota(const Shape& shape, int64_t iota_dimension) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (!shape.is_static()) {
return InvalidArgument(
"The output of iota must not have dynamic dimensions: %s",
ShapeUtil::HumanString(shape));
}
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.add_dimensions(iota_dimension);
return AddInstruction(std::move(instr), HloOpcode::kIota);
});
}
XlaOp XlaBuilder::Iota(PrimitiveType type, int64_t size) {
return Iota(ShapeUtil::MakeShape(type, {size}), 0);
}
XlaOp XlaBuilder::Call(const XlaComputation& computation,
absl::Span<const XlaOp> operands) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& operand_shapes, GetOperandShapes(operands));
absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(const ProgramShape& called_program_shape,
computation.GetProgramShape());
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferCallShape(
operand_shape_ptrs,
called_program_shape));
*instr.mutable_shape() = shape.ToProto();
AddCalledComputation(computation, &instr);
return AddInstruction(std::move(instr), HloOpcode::kCall, operands);
});
}
XlaOp XlaBuilder::CompositeCall(const XlaComputation& computation,
absl::Span<const XlaOp> operands,
const std::string& name,
std::optional<absl::string_view> attributes,
std::optional<int64_t> version) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& operand_shapes, GetOperandShapes(operands));
absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(const ProgramShape& called_program_shape,
computation.GetProgramShape());
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferCallShape(
operand_shape_ptrs,
called_program_shape));
*instr.mutable_shape() = shape.ToProto();
AddCalledComputation(computation, &instr);
instr.set_is_composite(true);
TF_ASSIGN_OR_RETURN(
XlaOp instruction,
AddInstruction(std::move(instr), HloOpcode::kCall, operands));
TF_RETURN_IF_ERROR(
SetInstructionFrontendAttribute(instruction, "composite.name", name));
TF_RETURN_IF_ERROR(SetInstructionFrontendAttribute(
instruction, "composite.attributes",
attributes.has_value() ? std::string(*attributes) : "{}"));
TF_RETURN_IF_ERROR(SetInstructionFrontendAttribute(
instruction, "composite.version",
version.has_value() ? std::to_string(*version) : "0"));
return instruction;
});
}
XlaOp XlaBuilder::Parameter(
int64_t parameter_number, const Shape& shape, const std::string& name,
const std::vector<bool>& replicated_at_leaf_buffers) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
if (!parameter_numbers_.insert(parameter_number).second) {
return InvalidArgument("parameter %d already registered",
parameter_number);
}
instr.set_parameter_number(parameter_number);
instr.set_name(name);
*instr.mutable_shape() = shape.ToProto();
if (!replicated_at_leaf_buffers.empty()) {
auto replication = instr.mutable_parameter_replication();
for (bool replicated : replicated_at_leaf_buffers) {
replication->add_replicated_at_leaf_buffers(replicated);
}
}
return AddInstruction(std::move(instr), HloOpcode::kParameter);
});
}
XlaOp XlaBuilder::Broadcast(XlaOp operand,
absl::Span<const int64_t> broadcast_sizes) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(
const Shape& shape,
ShapeInference::InferBroadcastShape(*operand_shape, broadcast_sizes));
const int64_t operand_rank = operand_shape->rank();
std::vector<int64_t> dimensions(operand_rank);
for (int i = 0; i < operand_rank; ++i) {
dimensions[i] = i + shape.rank() - operand_rank;
}
return InDimBroadcast(shape, operand, dimensions);
});
}
XlaOp XlaBuilder::BroadcastInDim(
XlaOp operand, absl::Span<const int64_t> out_dim_size,
absl::Span<const int64_t> broadcast_dimensions) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(auto output_shape,
ShapeUtil::MakeValidatedShape(
operand_shape->element_type(), out_dim_size));
TF_RET_CHECK(!output_shape.is_unbounded_dynamic())
<< "BroadcastInDim output must shape be static or bounded dynamic "
<< ShapeUtil::HumanString(output_shape);
int64_t broadcast_rank = broadcast_dimensions.size();
if (operand_shape->rank() != broadcast_rank) {
return InvalidArgument(
"Size of broadcast_dimensions has to match operand's rank; operand "
"rank: %lld, size of broadcast_dimensions %u.",
operand_shape->rank(), broadcast_dimensions.size());
}
for (int i = 0; i < broadcast_rank; i++) {
const int64_t num_dims = out_dim_size.size();
if (broadcast_dimensions[i] < 0 || broadcast_dimensions[i] > num_dims) {
return InvalidArgument("Broadcast dimension %lld is out of bound",
broadcast_dimensions[i]);
}
output_shape.set_dynamic_dimension(
broadcast_dimensions[i],
operand_shape->is_bounded_dynamic_dimension(i));
}
TF_RETURN_IF_ERROR(ShapeInference::InferBroadcastShape(
*operand_shape, output_shape, broadcast_dimensions)
.status());
std::vector<int64_t> in_dim_size(out_dim_size.begin(), out_dim_size.end());
std::vector<bool> in_dim_dynamic(out_dim_size.size(), false);
for (int i = 0; i < broadcast_rank; i++) {
in_dim_size[broadcast_dimensions[i]] =
(operand_shape->is_unbounded_dynamic_dimension(i))
? out_dim_size[broadcast_dimensions[i]]
: operand_shape->dimensions(i);
in_dim_dynamic[broadcast_dimensions[i]] =
operand_shape->is_bounded_dynamic_dimension(i);
}
const auto& in_dim_shape = ShapeUtil::MakeShape(
operand_shape->element_type(), in_dim_size, in_dim_dynamic);
TF_ASSIGN_OR_RETURN(
XlaOp in_dim_broadcast,
InDimBroadcast(in_dim_shape, operand, broadcast_dimensions));
if (ShapeUtil::Equal(in_dim_shape, output_shape)) {
return in_dim_broadcast;
}
return AddBroadcastSequence(output_shape, in_dim_broadcast);
});
}
absl::StatusOr<XlaOp> XlaBuilder::ReshapeInternal(const Shape& shape,
XlaOp operand,
int64_t inferred_dimension) {
TF_RETURN_IF_ERROR(first_error_);
if (shape.is_unbounded_dynamic()) {
return InvalidArgument(
"Reshaping with unbounded result shape is not supported.");
}
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
if (inferred_dimension != -1) {
instr.add_dimensions(inferred_dimension);
}
return AddInstruction(std::move(instr), HloOpcode::kReshape, {operand});
}
XlaOp XlaBuilder::Slice(XlaOp operand, absl::Span<const int64_t> start_indices,
absl::Span<const int64_t> limit_indices,
absl::Span<const int64_t> strides) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferSliceShape(
*operand_shape, start_indices,
limit_indices, strides));
return SliceInternal(shape, operand, start_indices, limit_indices, strides);
});
}
absl::StatusOr<XlaOp> XlaBuilder::SliceInternal(
const Shape& shape, XlaOp operand, absl::Span<const int64_t> start_indices,
absl::Span<const int64_t> limit_indices,
absl::Span<const int64_t> strides) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
for (int i = 0, end = start_indices.size(); i < end; i++) {
auto* slice_config = instr.add_slice_dimensions();
slice_config->set_start(start_indices[i]);
slice_config->set_limit(limit_indices[i]);
slice_config->set_stride(strides[i]);
}
return AddInstruction(std::move(instr), HloOpcode::kSlice, {operand});
}
XlaOp XlaBuilder::SliceInDim(XlaOp operand, int64_t start_index,
int64_t limit_index, int64_t stride,
int64_t dimno) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* shape, GetShapePtr(operand));
std::vector<int64_t> starts(shape->rank(), 0);
std::vector<int64_t> limits(shape->dimensions().begin(),
shape->dimensions().end());
std::vector<int64_t> strides(shape->rank(), 1);
starts[dimno] = start_index;
limits[dimno] = limit_index;
strides[dimno] = stride;
return Slice(operand, starts, limits, strides);
});
}
XlaOp XlaBuilder::DynamicSlice(XlaOp operand,
absl::Span<const XlaOp> start_indices,
absl::Span<const int64_t> slice_sizes) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
std::vector<const Shape*> start_indices_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& start_indices_shapes,
GetOperandShapes(start_indices));
absl::c_transform(start_indices_shapes,
std::back_inserter(start_indices_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferDynamicSliceShape(
*operand_shape, start_indices_shapes, slice_sizes));
return DynamicSliceInternal(shape, operand, start_indices, slice_sizes);
});
}
absl::StatusOr<XlaOp> XlaBuilder::DynamicSliceInternal(
const Shape& shape, XlaOp operand, absl::Span<const XlaOp> start_indices,
absl::Span<const int64_t> slice_sizes) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
for (int64_t size : slice_sizes) {
instr.add_dynamic_slice_sizes(size);
}
std::vector<XlaOp> operands = {operand};
operands.insert(operands.end(), start_indices.begin(), start_indices.end());
return AddInstruction(std::move(instr), HloOpcode::kDynamicSlice, operands);
}
XlaOp XlaBuilder::DynamicUpdateSlice(XlaOp operand, XlaOp update,
absl::Span<const XlaOp> start_indices) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape* update_shape, GetShapePtr(update));
std::vector<const Shape*> start_indices_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& start_indices_shapes,
GetOperandShapes(start_indices));
absl::c_transform(start_indices_shapes,
std::back_inserter(start_indices_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(
Shape shape, ShapeInference::InferDynamicUpdateSliceShape(
*operand_shape, *update_shape, start_indices_shapes));
return DynamicUpdateSliceInternal(shape, operand, update, start_indices);
});
}
absl::StatusOr<XlaOp> XlaBuilder::DynamicUpdateSliceInternal(
const Shape& shape, XlaOp operand, XlaOp update,
absl::Span<const XlaOp> start_indices) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
std::vector<XlaOp> operands = {operand, update};
operands.insert(operands.end(), start_indices.begin(), start_indices.end());
return AddInstruction(std::move(instr), HloOpcode::kDynamicUpdateSlice,
operands);
}
XlaOp XlaBuilder::ConcatInDim(absl::Span<const XlaOp> operands,
int64_t dimension) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& operand_shapes, GetOperandShapes(operands));
absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferConcatOpShape(
operand_shape_ptrs, dimension));
return ConcatInDimInternal(shape, operands, dimension);
});
}
absl::StatusOr<XlaOp> XlaBuilder::ConcatInDimInternal(
const Shape& shape, absl::Span<const XlaOp> operands, int64_t dimension) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.add_dimensions(dimension);
return AddInstruction(std::move(instr), HloOpcode::kConcatenate, operands);
}
XlaOp XlaBuilder::Pad(XlaOp operand, XlaOp padding_value,
const PaddingConfig& padding_config) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape* padding_value_shape,
GetShapePtr(padding_value));
TF_ASSIGN_OR_RETURN(
Shape shape, ShapeInference::InferPadShape(
*operand_shape, *padding_value_shape, padding_config));
return PadInternal(shape, operand, padding_value, padding_config);
});
}
XlaOp XlaBuilder::PadInDim(XlaOp operand, XlaOp padding_value, int64_t dimno,
int64_t pad_lo, int64_t pad_hi) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* shape, GetShapePtr(operand));
PaddingConfig padding_config = MakeNoPaddingConfig(shape->rank());
auto* dims = padding_config.mutable_dimensions(dimno);
dims->set_edge_padding_low(pad_lo);
dims->set_edge_padding_high(pad_hi);
return Pad(operand, padding_value, padding_config);
});
}
absl::StatusOr<XlaOp> XlaBuilder::PadInternal(
const Shape& shape, XlaOp operand, XlaOp padding_value,
const PaddingConfig& padding_config) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
*instr.mutable_padding_config() = padding_config;
return AddInstruction(std::move(instr), HloOpcode::kPad,
{operand, padding_value});
}
XlaOp XlaBuilder::Reshape(XlaOp operand, absl::Span<const int64_t> dimensions,
absl::Span<const int64_t> new_sizes,
int64_t inferred_dimension) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape shape, ShapeInference::InferReshapeShape(
*operand_shape, dimensions,
new_sizes, inferred_dimension));
XlaOp transposed = IsIdentityPermutation(dimensions)
? operand
: Transpose(operand, dimensions);
return ReshapeInternal(shape, transposed, inferred_dimension);
});
}
XlaOp XlaBuilder::Reshape(XlaOp operand, absl::Span<const int64_t> new_sizes,
int64_t inferred_dimension) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* shape, GetShapePtr(operand));
std::vector<int64_t> dimensions(shape->dimensions_size());
std::iota(dimensions.begin(), dimensions.end(), 0);
return Reshape(operand, dimensions, new_sizes, inferred_dimension);
});
}
XlaOp XlaBuilder::Reshape(const Shape& shape, XlaOp operand,
int64_t inferred_dimension) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
return ReshapeInternal(shape, operand, inferred_dimension);
});
}
XlaOp XlaBuilder::DynamicReshape(XlaOp operand,
absl::Span<const XlaOp> dim_sizes,
absl::Span<const int64_t> new_size_bounds,
const std::vector<bool>& dims_are_dynamic) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
std::vector<const Shape*> dim_size_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& dim_size_shapes,
GetOperandShapes(dim_sizes));
absl::c_transform(dim_size_shapes, std::back_inserter(dim_size_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(const Shape shape,
ShapeInference::InferDynamicReshapeShape(
*operand_shape, dim_size_shape_ptrs,
new_size_bounds, dims_are_dynamic));
TF_RETURN_IF_ERROR(first_error_);
std::vector<XlaOp> operands;
operands.reserve(1 + dim_sizes.size());
operands.push_back(operand);
for (const XlaOp& dim_size : dim_sizes) {
operands.push_back(dim_size);
}
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return AddInstruction(std::move(instr), HloOpcode::kDynamicReshape,
operands);
});
}
XlaOp XlaBuilder::Collapse(XlaOp operand,
absl::Span<const int64_t> dimensions) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (dimensions.size() <= 1) {
return operand;
}
for (absl::Span<const int64_t>::size_type i = 1; i < dimensions.size();
++i) {
if (dimensions[i] - 1 != dimensions[i - 1]) {
return InvalidArgument(
"Collapsed dimensions are not in consecutive order.");
}
}
TF_ASSIGN_OR_RETURN(const Shape* original_shape, GetShapePtr(operand));
VLOG(3) << "original shape: " << ShapeUtil::HumanString(*original_shape);
VLOG(3) << "dims to collapse: " << absl::StrJoin(dimensions, ",");
std::vector<int64_t> new_sizes;
for (int i = 0; i < original_shape->rank(); ++i) {
if (i <= dimensions.front() || i > dimensions.back()) {
new_sizes.push_back(original_shape->dimensions(i));
} else {
new_sizes.back() *= original_shape->dimensions(i);
}
}
VLOG(3) << "new sizes: [" << absl::StrJoin(new_sizes, ",") << "]";
return Reshape(operand, new_sizes);
});
}
static absl::StatusOr<XlaComputation> PassthroughComputation(
const Shape& shape) {
XlaBuilder builder("dummy");
XlaOp out = Parameter(&builder, 0, shape, "p");
return builder.Build(out);
}
XlaOp XlaBuilder::Select(XlaOp pred, XlaOp on_true, XlaOp on_false) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* true_shape, GetShapePtr(on_true));
TF_ASSIGN_OR_RETURN(const Shape* false_shape, GetShapePtr(on_false));
TF_RET_CHECK(true_shape->IsTuple() == false_shape->IsTuple());
if (true_shape->IsTuple()) {
TF_ASSIGN_OR_RETURN(XlaComputation passthrough_true,
PassthroughComputation(*true_shape));
TF_ASSIGN_OR_RETURN(XlaComputation passthrough_false,
PassthroughComputation(*false_shape));
return Conditional(pred, on_true, passthrough_true, on_false,
passthrough_false);
}
return TernaryOp(HloOpcode::kSelect, pred, on_true, on_false);
});
}
XlaOp XlaBuilder::Tuple(absl::Span<const XlaOp> elements) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& operand_shapes, GetOperandShapes(elements));
absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(const Shape shape,
ShapeInference::InferVariadicOpShape(
HloOpcode::kTuple, operand_shape_ptrs));
return TupleInternal(shape, elements);
});
}
absl::StatusOr<XlaOp> XlaBuilder::TupleInternal(
const Shape& shape, absl::Span<const XlaOp> elements) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return AddInstruction(std::move(instr), HloOpcode::kTuple, elements);
}
XlaOp XlaBuilder::GetTupleElement(XlaOp tuple_data, int64_t index) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* tuple_shape, GetShapePtr(tuple_data));
if (!tuple_shape->IsTuple()) {
return InvalidArgument(
"Operand to GetTupleElement() is not a tuple; got %s",
ShapeUtil::HumanString(*tuple_shape));
}
if (index < 0 || index >= ShapeUtil::TupleElementCount(*tuple_shape)) {
return InvalidArgument(
"GetTupleElement() index (%d) out of range for tuple shape %s", index,
ShapeUtil::HumanString(*tuple_shape));
}
return GetTupleElementInternal(
ShapeUtil::GetTupleElementShape(*tuple_shape, index), tuple_data,
index);
});
}
absl::StatusOr<XlaOp> XlaBuilder::GetTupleElementInternal(const Shape& shape,
XlaOp tuple_data,
int64_t index) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.set_tuple_index(index);
return AddInstruction(std::move(instr), HloOpcode::kGetTupleElement,
{tuple_data});
}
XlaOp XlaBuilder::Dot(XlaOp lhs, XlaOp rhs,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* lhs_shape, GetShapePtr(lhs));
DotDimensionNumbers dimension_numbers;
dimension_numbers.add_lhs_contracting_dimensions(
lhs_shape->dimensions_size() == 1 ? 0 : 1);
dimension_numbers.add_rhs_contracting_dimensions(0);
return DotGeneral(lhs, rhs, dimension_numbers, precision_config,
preferred_element_type);
});
}
XlaOp XlaBuilder::DotGeneral(
XlaOp lhs, XlaOp rhs, const DotDimensionNumbers& dimension_numbers,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* lhs_shape, GetShapePtr(lhs));
TF_ASSIGN_OR_RETURN(const Shape* rhs_shape, GetShapePtr(rhs));
TF_ASSIGN_OR_RETURN(
Shape shape,
ShapeInference::InferDotOpShape(
*lhs_shape, *rhs_shape, dimension_numbers, preferred_element_type));
return DotGeneralInternal(shape, lhs, rhs, dimension_numbers,
precision_config);
});
}
absl::StatusOr<XlaOp> XlaBuilder::DotGeneralInternal(
const Shape& shape, XlaOp lhs, XlaOp rhs,
const DotDimensionNumbers& dimension_numbers,
const PrecisionConfig* precision_config) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
*instr.mutable_dot_dimension_numbers() = dimension_numbers;
if (precision_config != nullptr) {
*instr.mutable_precision_config() = *precision_config;
}
return AddInstruction(std::move(instr), HloOpcode::kDot, {lhs, rhs});
}
XlaOp XlaBuilder::SparseDot(
XlaOp lhs, XlaOp rhs, absl::Span<const XlaOp> sparse_meta,
absl::Span<const SparsityDescriptor> sparsity,
const DotDimensionNumbers& dimension_numbers,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* lhs_shape, GetShapePtr(lhs));
TF_ASSIGN_OR_RETURN(const Shape* rhs_shape, GetShapePtr(rhs));
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferDotOpShape(
*lhs_shape, *rhs_shape, dimension_numbers,
preferred_element_type, sparsity));
std::vector<XlaOp> operands{lhs, rhs};
operands.insert(operands.end(), sparse_meta.begin(), sparse_meta.end());
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
*instr.mutable_dot_dimension_numbers() = dimension_numbers;
if (precision_config != nullptr) {
*instr.mutable_precision_config() = *precision_config;
}
for (const SparsityDescriptor& descriptor : sparsity) {
*instr.add_dot_sparsity() = descriptor;
}
return AddInstruction(std::move(instr), HloOpcode::kDot, operands);
});
}
absl::Status XlaBuilder::VerifyConvolution(
const Shape& lhs_shape, const Shape& rhs_shape,
const ConvolutionDimensionNumbers& dimension_numbers) const {
if (lhs_shape.rank() != rhs_shape.rank()) {
return InvalidArgument(
"Convolution arguments must have same number of "
"dimensions. Got: %s and %s",
ShapeUtil::HumanString(lhs_shape), ShapeUtil::HumanString(rhs_shape));
}
int num_dims = lhs_shape.rank();
if (num_dims < 2) {
return InvalidArgument(
"Convolution expects argument arrays with >= 3 dimensions. "
"Got: %s and %s",
ShapeUtil::HumanString(lhs_shape), ShapeUtil::HumanString(rhs_shape));
}
int num_spatial_dims = num_dims - 2;
const auto check_spatial_dimensions =
[&](absl::string_view field_name,
absl::Span<const int64_t> numbers) -> absl::Status {
if (numbers.size() != num_spatial_dims) {
return InvalidArgument("Expected %d elements for %s, but got %d.",
num_spatial_dims, field_name, numbers.size());
}
for (int i = 0; i < numbers.size(); ++i) {
if (numbers[i] < 0 || numbers[i] >= num_dims) {
return InvalidArgument("Convolution %s[%d] is out of bounds: %d",
field_name, i, numbers[i]);
}
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(
check_spatial_dimensions("input_spatial_dimensions",
dimension_numbers.input_spatial_dimensions()));
TF_RETURN_IF_ERROR(
check_spatial_dimensions("kernel_spatial_dimensions",
dimension_numbers.kernel_spatial_dimensions()));
return check_spatial_dimensions(
"output_spatial_dimensions",
dimension_numbers.output_spatial_dimensions());
}
XlaOp XlaBuilder::Conv(XlaOp lhs, XlaOp rhs,
absl::Span<const int64_t> window_strides,
Padding padding, int64_t feature_group_count,
int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return ConvWithGeneralDimensions(
lhs, rhs, window_strides, padding,
CreateDefaultConvDimensionNumbers(window_strides.size()),
feature_group_count, batch_group_count, precision_config,
preferred_element_type);
}
XlaOp XlaBuilder::ConvWithGeneralPadding(
XlaOp lhs, XlaOp rhs, absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return ConvGeneral(lhs, rhs, window_strides, padding,
CreateDefaultConvDimensionNumbers(window_strides.size()),
feature_group_count, batch_group_count, precision_config,
preferred_element_type);
}
XlaOp XlaBuilder::ConvWithGeneralDimensions(
XlaOp lhs, XlaOp rhs, absl::Span<const int64_t> window_strides,
Padding padding, const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* lhs_shape, GetShapePtr(lhs));
TF_ASSIGN_OR_RETURN(const Shape* rhs_shape, GetShapePtr(rhs));
TF_RETURN_IF_ERROR(
VerifyConvolution(*lhs_shape, *rhs_shape, dimension_numbers));
std::vector<int64_t> base_area_dimensions(
dimension_numbers.input_spatial_dimensions_size());
for (std::vector<int64_t>::size_type i = 0; i < base_area_dimensions.size();
++i) {
base_area_dimensions[i] =
lhs_shape->dimensions(dimension_numbers.input_spatial_dimensions(i));
}
std::vector<int64_t> window_dimensions(
dimension_numbers.kernel_spatial_dimensions_size());
for (std::vector<int64_t>::size_type i = 0; i < window_dimensions.size();
++i) {
window_dimensions[i] =
rhs_shape->dimensions(dimension_numbers.kernel_spatial_dimensions(i));
}
return ConvGeneral(lhs, rhs, window_strides,
MakePadding(base_area_dimensions, window_dimensions,
window_strides, padding),
dimension_numbers, feature_group_count,
batch_group_count, precision_config,
preferred_element_type);
});
}
XlaOp XlaBuilder::ConvGeneral(
XlaOp lhs, XlaOp rhs, absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return ConvGeneralDilated(lhs, rhs, window_strides, padding, {}, {},
dimension_numbers, feature_group_count,
batch_group_count, precision_config,
preferred_element_type);
}
XlaOp XlaBuilder::ConvGeneralDilated(
XlaOp lhs, XlaOp rhs, absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type,
std::optional<std::vector<bool>> window_reversal) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* lhs_shape, GetShapePtr(lhs));
TF_ASSIGN_OR_RETURN(const Shape* rhs_shape, GetShapePtr(rhs));
TF_RETURN_IF_ERROR(
VerifyConvolution(*lhs_shape, *rhs_shape, dimension_numbers));
std::vector<int64_t> window_dimensions(
dimension_numbers.kernel_spatial_dimensions_size());
for (std::vector<int64_t>::size_type i = 0; i < window_dimensions.size();
++i) {
window_dimensions[i] =
rhs_shape->dimensions(dimension_numbers.kernel_spatial_dimensions(i));
}
TF_ASSIGN_OR_RETURN(Window window,
ShapeInference::InferWindowFromDimensions(
window_dimensions, window_strides, padding,
lhs_dilation, rhs_dilation, window_reversal));
TF_ASSIGN_OR_RETURN(
Shape shape,
ShapeInference::InferConvolveShape(
*lhs_shape, *rhs_shape, feature_group_count, batch_group_count,
window, dimension_numbers, preferred_element_type));
return ConvGeneralDilatedInternal(shape, lhs, rhs, window, window_strides,
padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count,
batch_group_count, precision_config);
});
}
absl::StatusOr<HloInstructionProto> XlaBuilder::DynamicConvInstruction(
XlaOp lhs, XlaOp rhs, absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config, PaddingType padding_type,
std::optional<PrimitiveType> preferred_element_type) {
TF_ASSIGN_OR_RETURN(const Shape* lhs_shape, GetShapePtr(lhs));
TF_ASSIGN_OR_RETURN(const Shape* rhs_shape, GetShapePtr(rhs));
std::vector<int64_t> window_dimensions(
dimension_numbers.kernel_spatial_dimensions_size());
for (std::vector<int64_t>::size_type i = 0; i < window_dimensions.size();
++i) {
window_dimensions[i] =
rhs_shape->dimensions(dimension_numbers.kernel_spatial_dimensions(i));
}
TF_ASSIGN_OR_RETURN(Window window, ShapeInference::InferWindowFromDimensions(
window_dimensions, window_strides,
padding, lhs_dilation, rhs_dilation));
TF_ASSIGN_OR_RETURN(
Shape shape,
ShapeInference::InferConvolveShape(
*lhs_shape, *rhs_shape, feature_group_count, batch_group_count,
window, dimension_numbers, preferred_element_type));
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
*instr.mutable_window() = window;
*instr.mutable_convolution_dimension_numbers() = dimension_numbers;
instr.set_feature_group_count(feature_group_count);
instr.set_batch_group_count(batch_group_count);
instr.set_padding_type(padding_type);
if (precision_config != nullptr) {
*instr.mutable_precision_config() = *precision_config;
}
return std::move(instr);
}
XlaOp XlaBuilder::DynamicConvInputGrad(
XlaOp input_sizes, XlaOp lhs, XlaOp rhs,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config, PaddingType padding_type,
std::optional<PrimitiveType> preferred_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(
HloInstructionProto instr,
DynamicConvInstruction(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
precision_config, padding_type, preferred_element_type));
instr.set_custom_call_target("DynamicConvolutionInputGrad");
return AddInstruction(std::move(instr), HloOpcode::kCustomCall,
{input_sizes, lhs, rhs});
});
}
XlaOp XlaBuilder::DynamicConvKernelGrad(
XlaOp activations, XlaOp gradients,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config, PaddingType padding_type,
std::optional<PrimitiveType> preferred_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(
HloInstructionProto instr,
DynamicConvInstruction(activations, gradients, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision_config, padding_type,
preferred_element_type));
instr.set_custom_call_target("DynamicConvolutionKernelGrad");
instr.mutable_shape()->clear_is_dynamic_dimension();
return AddInstruction(std::move(instr), HloOpcode::kCustomCall,
{activations, gradients});
});
}
XlaOp XlaBuilder::DynamicConvForward(
XlaOp lhs, XlaOp rhs, absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config, PaddingType padding_type,
std::optional<PrimitiveType> preferred_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(
HloInstructionProto instr,
DynamicConvInstruction(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
precision_config, padding_type, preferred_element_type));
instr.set_custom_call_target("DynamicConvolutionForward");
return AddInstruction(std::move(instr), HloOpcode::kCustomCall, {lhs, rhs});
});
}
absl::StatusOr<XlaOp> XlaBuilder::ConvGeneralDilatedInternal(
const Shape& shape, XlaOp lhs, XlaOp rhs, const Window& window,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
*instr.mutable_window() = window;
*instr.mutable_convolution_dimension_numbers() = dimension_numbers;
instr.set_feature_group_count(feature_group_count);
instr.set_batch_group_count(batch_group_count);
if (precision_config != nullptr) {
*instr.mutable_precision_config() = *precision_config;
}
return AddInstruction(std::move(instr), HloOpcode::kConvolution, {lhs, rhs});
}
XlaOp XlaBuilder::Fft(XlaOp operand, const FftType fft_type,
const absl::Span<const int64_t> fft_length) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferFftShape(
*operand_shape, fft_type, fft_length));
return FftInternal(shape, operand, fft_type, fft_length);
});
}
absl::StatusOr<XlaOp> XlaBuilder::FftInternal(
const Shape& shape, XlaOp operand, const FftType fft_type,
const absl::Span<const int64_t> fft_length) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.set_fft_type(fft_type);
for (int64_t i : fft_length) {
instr.add_fft_length(i);
}
return AddInstruction(std::move(instr), HloOpcode::kFft, {operand});
}
absl::StatusOr<XlaOp> XlaBuilder::TriangularSolveInternal(
const Shape& shape, XlaOp a, XlaOp b, TriangularSolveOptions options) {
HloInstructionProto instr;
*instr.mutable_triangular_solve_options() = std::move(options);
*instr.mutable_shape() = shape.ToProto();
return AddInstruction(std::move(instr), HloOpcode::kTriangularSolve, {a, b});
}
absl::StatusOr<XlaOp> XlaBuilder::CholeskyInternal(const Shape& shape, XlaOp a,
bool lower) {
HloInstructionProto instr;
CholeskyOptions& options = *instr.mutable_cholesky_options();
options.set_lower(lower);
*instr.mutable_shape() = shape.ToProto();
return AddInstruction(std::move(instr), HloOpcode::kCholesky, {a});
}
XlaOp XlaBuilder::Infeed(const Shape& shape, const std::string& config) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
if (!LayoutUtil::HasLayout(shape)) {
return InvalidArgument("Given shape to Infeed must have a layout");
}
const Shape infeed_instruction_shape =
ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeTokenShape()});
*instr.mutable_shape() = infeed_instruction_shape.ToProto();
instr.set_infeed_config(config);
if (shape.IsArray() && sharding() &&
sharding()->type() == OpSharding::OTHER) {
return InvalidArgument(
"Tiled sharding is not yet supported for array-shaped infeeds");
}
if (sharding() && sharding()->type() == OpSharding::REPLICATED) {
return InvalidArgument(
"Replicated sharding is not yet supported for infeeds");
}
XlaOp token;
auto make_token = [&]() {
HloInstructionProto token_instr;
*token_instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
return AddInstruction(std::move(token_instr), HloOpcode::kAfterAll, {});
};
if (sharding()) {
OpSharding sharding = sharding_builder::AssignDevice(0);
XlaScopedShardingAssignment scoped_sharding(this, sharding);
TF_ASSIGN_OR_RETURN(token, make_token());
} else {
TF_ASSIGN_OR_RETURN(token, make_token());
}
XlaOp infeed;
if (sharding() && sharding()->type() == OpSharding::TUPLE) {
OpSharding infeed_instruction_sharding = *sharding();
*infeed_instruction_sharding.add_tuple_shardings() =
sharding_builder::AssignDevice(0);
XlaScopedShardingAssignment scoped_sharding(this,
infeed_instruction_sharding);
TF_ASSIGN_OR_RETURN(infeed, AddInstruction(std::move(instr),
HloOpcode::kInfeed, {token}));
} else {
TF_ASSIGN_OR_RETURN(infeed, AddInstruction(std::move(instr),
HloOpcode::kInfeed, {token}));
}
HloInstructionProto infeed_data;
*infeed_data.mutable_shape() = shape.ToProto();
infeed_data.set_tuple_index(0);
return AddInstruction(std::move(infeed_data), HloOpcode::kGetTupleElement,
{infeed});
});
}
XlaOp XlaBuilder::InfeedWithToken(XlaOp token, const Shape& shape,
const std::string& config) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (!LayoutUtil::HasLayout(shape)) {
return InvalidArgument("Given shape to Infeed must have a layout");
}
const Shape infeed_instruction_shape =
ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeTokenShape()});
if (shape.IsArray() && sharding() &&
sharding()->type() == OpSharding::OTHER) {
return InvalidArgument(
"Tiled sharding is not yet supported for array-shaped infeeds");
}
if (sharding() && sharding()->type() == OpSharding::REPLICATED) {
return InvalidArgument(
"Replicated sharding is not yet supported for infeeds");
}
return InfeedWithTokenInternal(infeed_instruction_shape, token, config);
});
}
absl::StatusOr<XlaOp> XlaBuilder::InfeedWithTokenInternal(
const Shape& infeed_instruction_shape, XlaOp token,
const std::string& config) {
HloInstructionProto instr;
*instr.mutable_shape() = infeed_instruction_shape.ToProto();
instr.set_infeed_config(config);
return AddInstruction(std::move(instr), HloOpcode::kInfeed, {token});
}
void XlaBuilder::Outfeed(XlaOp operand, const Shape& shape_with_layout,
const std::string& outfeed_config) {
ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
if (!LayoutUtil::HasLayout(shape_with_layout)) {
return InvalidArgument("Given shape to Outfeed must have a layout");
}
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
if (!ShapeUtil::Compatible(*operand_shape, shape_with_layout)) {
return InvalidArgument(
"Outfeed shape %s must be compatible with operand shape %s",
ShapeUtil::HumanStringWithLayout(shape_with_layout),
ShapeUtil::HumanStringWithLayout(*operand_shape));
}
*instr.mutable_outfeed_shape() = shape_with_layout.ToProto();
instr.set_outfeed_config(outfeed_config);
XlaOp token;
auto make_token = [&]() {
HloInstructionProto token_instr;
*token_instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
return AddInstruction(std::move(token_instr), HloOpcode::kAfterAll, {});
};
auto make_outfeed = [&](XlaOp token) {
return AddInstruction(std::move(instr), HloOpcode::kOutfeed,
{operand, token});
};
if (sharding()) {
XlaScopedShardingAssignment scoped_sharding(
this, sharding_builder::AssignDevice(0));
TF_ASSIGN_OR_RETURN(token, make_token());
} else {
TF_ASSIGN_OR_RETURN(token, make_token());
}
if (sharding()) {
OpSharding tuple_sharding = *sharding();
if (tuple_sharding.type() != OpSharding::TUPLE) {
tuple_sharding = sharding_builder::Tuple({});
*tuple_sharding.add_tuple_shardings() = *sharding();
}
*tuple_sharding.add_tuple_shardings() = sharding_builder::AssignDevice(0);
XlaScopedShardingAssignment scoped_sharding(this, tuple_sharding);
TF_RETURN_IF_ERROR(make_outfeed(token).status());
} else {
TF_RETURN_IF_ERROR(make_outfeed(token).status());
}
HloInstructionProto tuple_instr;
*tuple_instr.mutable_shape() = ShapeUtil::MakeNil().ToProto();
{
XlaScopedShardingAssignment scoped_sharding(this, std::nullopt);
TF_ASSIGN_OR_RETURN(
XlaOp empty_tuple,
AddInstruction(std::move(tuple_instr), HloOpcode::kTuple, {}));
return empty_tuple;
}
});
}
XlaOp XlaBuilder::OutfeedWithToken(XlaOp operand, XlaOp token,
const Shape& shape_with_layout,
const std::string& outfeed_config) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (!LayoutUtil::HasLayout(shape_with_layout)) {
return InvalidArgument("Given shape to Outfeed must have a layout");
}
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
if (!ShapeUtil::Compatible(*operand_shape, shape_with_layout)) {
return InvalidArgument(
"Outfeed shape %s must be compatible with operand shape %s",
ShapeUtil::HumanStringWithLayout(shape_with_layout),
ShapeUtil::HumanStringWithLayout(*operand_shape));
}
return OutfeedWithTokenInternal(operand, token, shape_with_layout,
outfeed_config);
});
}
absl::StatusOr<XlaOp> XlaBuilder::OutfeedWithTokenInternal(
XlaOp operand, XlaOp token, const Shape& shape_with_layout,
const std::string& outfeed_config) {
HloInstructionProto instr;
*instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
*instr.mutable_outfeed_shape() = shape_with_layout.ToProto();
instr.set_outfeed_config(outfeed_config);
return AddInstruction(std::move(instr), HloOpcode::kOutfeed,
{operand, token});
}
XlaOp XlaBuilder::CreateToken() {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
return AddInstruction(std::move(instr), HloOpcode::kAfterAll);
});
}
XlaOp XlaBuilder::AfterAll(absl::Span<const XlaOp> tokens) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
for (int i = 0, end = tokens.size(); i < end; ++i) {
XlaOp operand = tokens[i];
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
if (!operand_shape->IsToken()) {
return InvalidArgument(
"All operands to AfterAll must be tokens; operand %d has shape %s",
i, ShapeUtil::HumanString(*operand_shape));
}
}
HloInstructionProto instr;
*instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
return AddInstruction(std::move(instr), HloOpcode::kAfterAll, tokens);
});
}
XlaOp XlaBuilder::CustomCall(
const std::string& call_target_name, absl::Span<const XlaOp> operands,
const Shape& shape, const std::string& opaque,
std::optional<absl::Span<const Shape>> operand_shapes_with_layout,
bool has_side_effect,
absl::Span<const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
output_operand_aliasing,
const Literal* literal, std::optional<Window> window,
std::optional<ConvolutionDimensionNumbers> dnums,
CustomCallSchedule schedule, CustomCallApiVersion api_version) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (absl::StartsWith(call_target_name, "$")) {
return InvalidArgument(
"Invalid custom_call_target \"%s\": Call targets that start with '$' "
"are reserved for internal use.",
call_target_name);
}
if (operand_shapes_with_layout.has_value()) {
if (!LayoutUtil::HasLayout(shape)) {
return InvalidArgument(
"Result shape must have layout for custom call with constrained "
"layout.");
}
if (operands.size() != operand_shapes_with_layout->size()) {
return InvalidArgument(
"Must specify a shape with layout for each operand for custom call "
"with constrained layout; given %d shapes, expected %d",
operand_shapes_with_layout->size(), operands.size());
}
int64_t operand_num = 0;
for (const Shape& operand_shape : *operand_shapes_with_layout) {
if (!LayoutUtil::HasLayout(operand_shape)) {
return InvalidArgument(
"No layout specified for operand %d for custom call with "
"constrained layout.",
operand_num);
}
++operand_num;
}
}
return CustomCallInternal(
call_target_name, operands, nullptr, shape, opaque,
operand_shapes_with_layout, has_side_effect, output_operand_aliasing,
literal, window, dnums, schedule, api_version);
});
}
absl::StatusOr<XlaOp> XlaBuilder::CustomCallInternal(
const std::string& call_target_name, absl::Span<const XlaOp> operands,
const XlaComputation* computation, const Shape& shape,
const std::string& opaque,
std::optional<absl::Span<const Shape>> operand_shapes_with_layout,
bool has_side_effect,
absl::Span<const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
output_operand_aliasing,
const Literal* literal, std::optional<Window> window,
std::optional<ConvolutionDimensionNumbers> dnums,
CustomCallSchedule schedule, CustomCallApiVersion api_version) {
HloInstructionProto instr;
if (call_target_name == "__cudnn$convForward") {
instr.set_name("cudnn-conv");
} else if (call_target_name == "__cudnn$convBackwardInput") {
instr.set_name("cudnn-conv-bw-input");
} else if (call_target_name == "__cudnn$convBackwardFilter") {
instr.set_name("cudnn-conv-bw-filter");
} else if (call_target_name == "__cudnn$convBiasActivationForward") {
instr.set_name("cudnn-conv-bias-activation");
}
*instr.mutable_shape() = shape.ToProto();
instr.set_custom_call_target(call_target_name);
instr.set_backend_config(opaque);
if (operand_shapes_with_layout.has_value()) {
instr.set_constrain_layout(true);
for (const Shape& operand_shape : *operand_shapes_with_layout) {
*instr.add_operand_shapes_with_layout() = operand_shape.ToProto();
}
}
if (literal != nullptr) {
*instr.mutable_literal() = literal->ToProto();
}
instr.set_custom_call_has_side_effect(has_side_effect);
if (computation != nullptr && !computation->IsNull()) {
AddCalledComputation(*computation, &instr);
}
for (const auto& pair : output_operand_aliasing) {
auto aliasing = instr.add_output_operand_aliasing();
aliasing->set_operand_index(pair.second.first);
for (int64_t index : pair.second.second) {
aliasing->add_operand_shape_index(index);
}
for (int64_t index : pair.first) {
aliasing->add_output_shape_index(index);
}
}
if (window.has_value()) {
*instr.mutable_window() = *window;
}
if (dnums.has_value()) {
*instr.mutable_convolution_dimension_numbers() = *dnums;
}
instr.set_custom_call_schedule(schedule);
instr.set_custom_call_api_version(api_version);
return AddInstruction(std::move(instr), HloOpcode::kCustomCall, operands);
}
XlaOp XlaBuilder::CustomCall(
const std::string& call_target_name, absl::Span<const XlaOp> operands,
const XlaComputation& computation, const Shape& shape,
const std::string& opaque,
std::optional<absl::Span<const Shape>> operand_shapes_with_layout,
bool has_side_effect,
absl::Span<const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
output_operand_aliasing,
const Literal* literal, CustomCallSchedule schedule,
CustomCallApiVersion api_version) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (absl::StartsWith(call_target_name, "$")) {
return InvalidArgument(
"Invalid custom_call_target \"%s\": Call targets that start with '$' "
"are reserved for internal use.",
call_target_name);
}
if (operand_shapes_with_layout.has_value()) {
if (!LayoutUtil::HasLayout(shape)) {
return InvalidArgument(
"Result shape must have layout for custom call with constrained "
"layout.");
}
if (operands.size() != operand_shapes_with_layout->size()) {
return InvalidArgument(
"Must specify a shape with layout for each operand for custom call "
"with constrained layout; given %d shapes, expected %d",
operand_shapes_with_layout->size(), operands.size());
}
int64_t operand_num = 0;
for (const Shape& operand_shape : *operand_shapes_with_layout) {
if (!LayoutUtil::HasLayout(operand_shape)) {
return InvalidArgument(
"No layout specified for operand %d for custom call with "
"constrained layout.",
operand_num);
}
++operand_num;
}
}
return CustomCallInternal(
call_target_name, operands, &computation, shape, opaque,
operand_shapes_with_layout, has_side_effect, output_operand_aliasing,
literal, {}, {}, schedule, api_version);
});
}
XlaOp XlaBuilder::OptimizationBarrier(XlaOp operand) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
Shape shape = *operand_shape;
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return AddInstruction(std::move(instr), HloOpcode::kOptimizationBarrier,
{operand});
});
}
XlaOp XlaBuilder::Transpose(XlaOp operand,
absl::Span<const int64_t> permutation) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferTransposeShape(
*operand_shape, permutation));
return TransposeInternal(shape, operand, permutation);
});
}
absl::StatusOr<XlaOp> XlaBuilder::TransposeInternal(
const Shape& shape, XlaOp operand, absl::Span<const int64_t> permutation) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
for (int64_t dim : permutation) {
instr.add_dimensions(dim);
}
return AddInstruction(std::move(instr), HloOpcode::kTranspose, {operand});
}
XlaOp XlaBuilder::Rev(XlaOp operand, absl::Span<const int64_t> dimensions) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferReverseShape(
*operand_shape, dimensions));
return RevInternal(shape, operand, dimensions);
});
}
absl::StatusOr<XlaOp> XlaBuilder::RevInternal(
const Shape& shape, XlaOp operand, absl::Span<const int64_t> dimensions) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
for (int64_t dim : dimensions) {
instr.add_dimensions(dim);
}
return AddInstruction(std::move(instr), HloOpcode::kReverse, {operand});
}
XlaOp XlaBuilder::Sort(absl::Span<const XlaOp> operands,
const XlaComputation& comparator, int64_t dimension,
bool is_stable) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(std::vector<Shape> operand_shapes,
GetOperandShapes(operands));
absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferVariadicOpShape(
HloOpcode::kSort, operand_shape_ptrs));
return SortInternal(shape, operands, comparator, dimension, is_stable);
});
}
absl::StatusOr<XlaOp> XlaBuilder::SortInternal(const Shape& shape,
absl::Span<const XlaOp> operands,
const XlaComputation& comparator,
int64_t dimension,
bool is_stable) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.set_is_stable(is_stable);
if (dimension == -1) {
TF_ASSIGN_OR_RETURN(const Shape* keys_shape, GetShapePtr(operands[0]));
dimension = keys_shape->rank() - 1;
}
instr.add_dimensions(dimension);
AddCalledComputation(comparator, &instr);
return AddInstruction(std::move(instr), HloOpcode::kSort, operands);
}
XlaOp XlaBuilder::TopK(XlaOp operand, int64_t k, bool largest) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferTopKShape(*operand_shape, k));
return TopKInternal(shape, operand, k, largest);
});
}
absl::StatusOr<XlaOp> XlaBuilder::TopKInternal(const Shape& shape,
XlaOp operand, int64_t k,
bool largest) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.set_k(k);
instr.set_largest(largest);
return AddInstruction(std::move(instr), HloOpcode::kTopK, {operand});
}
XlaOp XlaBuilder::ConvertElementType(XlaOp operand,
PrimitiveType new_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferConvertShape(
*operand_shape, new_element_type));
if (primitive_util::IsComplexType(operand_shape->element_type()) &&
!primitive_util::IsComplexType(new_element_type)) {
operand = Real(operand);
}
return AddOpWithShape(HloOpcode::kConvert, shape, {operand});
});
}
XlaOp XlaBuilder::BitcastConvertType(XlaOp operand,
PrimitiveType new_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferBitcastConvertShape(
*operand_shape, new_element_type));
return BitcastConvertTypeInternal(shape, operand);
});
}
absl::StatusOr<XlaOp> XlaBuilder::BitcastConvertTypeInternal(const Shape& shape,
XlaOp operand) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return AddInstruction(std::move(instr), HloOpcode::kBitcastConvert,
{operand});
}
XlaOp XlaBuilder::StochasticConvertType(XlaOp operand, XlaOp random,
PrimitiveType new_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape* random_shape, GetShapePtr(random));
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferStochasticConvertShape(
*operand_shape, *random_shape, new_element_type));
return AddOpWithShape(HloOpcode::kStochasticConvert, shape,
{operand, random});
});
}
XlaOp XlaBuilder::Clamp(XlaOp min, XlaOp operand, XlaOp max) {
return TernaryOp(HloOpcode::kClamp, min, operand, max);
}
XlaOp XlaBuilder::Map(absl::Span<const XlaOp> operands,
const XlaComputation& computation,
absl::Span<const int64_t> dimensions,
absl::Span<const XlaOp> static_operands) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (!static_operands.empty()) {
return Unimplemented("static_operands is not supported in Map");
}
HloInstructionProto instr;
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& operand_shapes, GetOperandShapes(operands));
absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(const ProgramShape& called_program_shape,
computation.GetProgramShape());
TF_ASSIGN_OR_RETURN(
Shape shape, ShapeInference::InferMapShape(
operand_shape_ptrs, called_program_shape, dimensions));
*instr.mutable_shape() = shape.ToProto();
Shape output_shape(instr.shape());
const int64_t output_rank = output_shape.rank();
AddCalledComputation(computation, &instr);
std::vector<XlaOp> new_operands(operands.begin(), operands.end());
for (XlaOp& new_operand : new_operands) {
TF_ASSIGN_OR_RETURN(const Shape* shape, GetShapePtr(new_operand));
const int64_t rank = shape->rank();
if (rank != output_rank) {
TF_ASSIGN_OR_RETURN(new_operand,
InDimBroadcast(output_shape, new_operand, {}));
TF_ASSIGN_OR_RETURN(shape, GetShapePtr(new_operand));
}
if (!ShapeUtil::SameDimensions(output_shape, *shape)) {
TF_ASSIGN_OR_RETURN(new_operand,
AddBroadcastSequence(output_shape, new_operand));
}
}
return AddInstruction(std::move(instr), HloOpcode::kMap, new_operands);
});
}
XlaOp XlaBuilder::RngOp(RandomDistribution distribution,
absl::Span<const XlaOp> parameters,
const Shape& shape) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
switch (distribution) {
case RandomDistribution::RNG_NORMAL:
case RandomDistribution::RNG_UNIFORM:
if (parameters.size() != 2) {
return InvalidArgument(
"RNG distribution (%s) expects 2 parameters, but got %ld",
RandomDistribution_Name(distribution), parameters.size());
}
break;
default:
LOG(FATAL) << "unhandled distribution " << distribution;
}
TF_RETURN_IF_ERROR(ShapeUtil::ValidateShapeWithOptionalLayout(shape));
return RngOpInternal(distribution, parameters, shape);
});
}
absl::StatusOr<XlaOp> XlaBuilder::RngOpInternal(
RandomDistribution distribution, absl::Span<const XlaOp> parameters,
const Shape& shape) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.set_distribution(distribution);
return AddInstruction(std::move(instr), HloOpcode::kRng, parameters);
}
XlaOp XlaBuilder::RngNormal(XlaOp mu, XlaOp sigma, const Shape& shape) {
return RngOp(RandomDistribution::RNG_NORMAL, {mu, sigma}, shape);
}
XlaOp XlaBuilder::RngUniform(XlaOp a, XlaOp b, const Shape& shape) {
return RngOp(RandomDistribution::RNG_UNIFORM, {a, b}, shape);
}
XlaOp XlaBuilder::RngBitGenerator(RandomAlgorithm algorithm,
XlaOp initial_state, const Shape& shape) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(ShapeUtil::ValidateShapeWithOptionalLayout(shape));
TF_ASSIGN_OR_RETURN(Shape state_shape, GetShape(initial_state));
Shape output_shape = shape;
output_shape.set_element_type(PRIMITIVE_TYPE_INVALID);
if (primitive_util::IsArrayType(shape.element_type())) {
output_shape.set_element_type(
primitive_util::UnsignedIntegralTypeForBitWidth(
primitive_util::BitWidth(shape.element_type())));
}
if (!primitive_util::IsUnsignedIntegralType(output_shape.element_type())) {
return InvalidArgument("Unsupported shape for RngBitGenerator: %s",
PrimitiveType_Name(shape.element_type()));
}
return RngBitGeneratorInternal(
ShapeUtil::MakeTupleShapeWithPtrs({&state_shape, &output_shape}),
algorithm, initial_state);
});
}
absl::StatusOr<XlaOp> XlaBuilder::RngBitGeneratorInternal(
const Shape& full_result_shape, RandomAlgorithm algorithm,
XlaOp initial_state) {
HloInstructionProto instr;
*instr.mutable_shape() = full_result_shape.ToProto();
instr.set_rng_algorithm(algorithm);
return AddInstruction(std::move(instr), HloOpcode::kRngBitGenerator,
{initial_state});
}
XlaOp XlaBuilder::While(const XlaComputation& condition,
const XlaComputation& body, XlaOp init) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const auto& body_program_shape, body.GetProgramShape());
TF_ASSIGN_OR_RETURN(const auto& condition_program_shape,
condition.GetProgramShape());
TF_ASSIGN_OR_RETURN(const Shape* init_shape, GetShapePtr(init));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferWhileShape(
condition_program_shape,
body_program_shape, *init_shape));
return WhileInternal(shape, condition, body, init);
});
}
absl::StatusOr<XlaOp> XlaBuilder::WhileInternal(const Shape& shape,
const XlaComputation& condition,
const XlaComputation& body,
XlaOp init) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
AddCalledComputation(body, &instr);
AddCalledComputation(condition, &instr);
return AddInstruction(std::move(instr), HloOpcode::kWhile, {init});
}
XlaOp XlaBuilder::Gather(XlaOp input, XlaOp start_indices,
const GatherDimensionNumbers& dimension_numbers,
absl::Span<const int64_t> slice_sizes,
bool indices_are_sorted) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* input_shape, GetShapePtr(input));
TF_ASSIGN_OR_RETURN(const Shape* start_indices_shape,
GetShapePtr(start_indices));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferGatherShape(
*input_shape, *start_indices_shape,
dimension_numbers, slice_sizes));
return GatherInternal(shape, input, start_indices, dimension_numbers,
slice_sizes, indices_are_sorted);
});
}
absl::StatusOr<XlaOp> XlaBuilder::GatherInternal(
const Shape& shape, XlaOp input, XlaOp start_indices,
const GatherDimensionNumbers& dimension_numbers,
absl::Span<const int64_t> slice_sizes, bool indices_are_sorted) {
HloInstructionProto instr;
instr.set_indices_are_sorted(indices_are_sorted);
*instr.mutable_shape() = shape.ToProto();
*instr.mutable_gather_dimension_numbers() = dimension_numbers;
for (int64_t bound : slice_sizes) {
instr.add_gather_slice_sizes(bound);
}
return AddInstruction(std::move(instr), HloOpcode::kGather,
{input, start_indices});
}
XlaOp XlaBuilder::Scatter(XlaOp input, XlaOp scatter_indices, XlaOp updates,
const XlaComputation& update_computation,
const ScatterDimensionNumbers& dimension_numbers,
bool indices_are_sorted, bool unique_indices) {
return Scatter(absl::MakeConstSpan(&input, 1), scatter_indices,
absl::MakeConstSpan(&updates, 1), update_computation,
dimension_numbers, indices_are_sorted, unique_indices);
}
XlaOp XlaBuilder::Scatter(absl::Span<const XlaOp> inputs, XlaOp scatter_indices,
absl::Span<const XlaOp> updates,
const XlaComputation& update_computation,
const ScatterDimensionNumbers& dimension_numbers,
bool indices_are_sorted, bool unique_indices) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (inputs.empty()) {
return InvalidArgument("Scatter inputs cannot be empty.");
}
if (inputs.size() != updates.size()) {
return InvalidArgument(
"Scatter should have same number of inputs and updates: %d vs %d.",
inputs.size(), updates.size());
}
absl::InlinedVector<const Shape*, 3> operand_shapes;
operand_shapes.reserve(inputs.size() + 1 + updates.size());
for (const XlaOp& input : inputs) {
TF_ASSIGN_OR_RETURN(const Shape* input_shape, GetShapePtr(input));
operand_shapes.push_back(input_shape);
}
TF_ASSIGN_OR_RETURN(const Shape* scatter_indices_shape,
GetShapePtr(scatter_indices));
operand_shapes.push_back(scatter_indices_shape);
for (const XlaOp& update : updates) {
TF_ASSIGN_OR_RETURN(const Shape* update_shape, GetShapePtr(update));
operand_shapes.push_back(update_shape);
}
TF_ASSIGN_OR_RETURN(const ProgramShape& to_apply_shape,
update_computation.GetProgramShape());
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferScatterShape(
operand_shapes, to_apply_shape, dimension_numbers));
return ScatterInternal(shape, inputs, scatter_indices, updates,
update_computation, dimension_numbers,
indices_are_sorted, unique_indices);
});
}
absl::StatusOr<XlaOp> XlaBuilder::ScatterInternal(
const Shape& shape, absl::Span<const XlaOp> inputs, XlaOp scatter_indices,
absl::Span<const XlaOp> updates, const XlaComputation& update_computation,
const ScatterDimensionNumbers& dimension_numbers, bool indices_are_sorted,
bool unique_indices) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
instr.set_indices_are_sorted(indices_are_sorted);
instr.set_unique_indices(unique_indices);
*instr.mutable_shape() = shape.ToProto();
*instr.mutable_scatter_dimension_numbers() = dimension_numbers;
AddCalledComputation(update_computation, &instr);
absl::InlinedVector<XlaOp, 3> operands;
operands.reserve(inputs.size() + 1 + updates.size());
absl::c_copy(inputs, std::back_inserter(operands));
operands.push_back(scatter_indices);
absl::c_copy(updates, std::back_inserter(operands));
return AddInstruction(std::move(instr), HloOpcode::kScatter, operands);
});
}
XlaOp XlaBuilder::Conditional(XlaOp predicate, XlaOp true_operand,
const XlaComputation& true_computation,
XlaOp false_operand,
const XlaComputation& false_computation) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* shape, GetShapePtr(predicate));
if (!ShapeUtil::IsScalar(*shape) || shape->element_type() != PRED) {
return InvalidArgument(
"Argument to predicated-Conditional is not a scalar of PRED type "
"(%s).",
ShapeUtil::HumanString(*shape));
}
return ConditionalImpl(predicate, {&true_computation, &false_computation},
{true_operand, false_operand});
});
}
XlaOp XlaBuilder::Conditional(
XlaOp branch_index,
absl::Span<const XlaComputation* const> branch_computations,
absl::Span<const XlaOp> branch_operands) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* shape, GetShapePtr(branch_index));
if (!ShapeUtil::IsScalar(*shape) || shape->element_type() != S32) {
return InvalidArgument(
"Argument to indexed-Conditional is not a scalar of S32 type (%s).",
ShapeUtil::HumanString(*shape));
}
return ConditionalImpl(branch_index, branch_computations, branch_operands);
});
}
XlaOp XlaBuilder::AllReduceImpl(XlaOp operand,
const XlaComputation& computation,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Shape>& layout,
const std::optional<bool> use_global_device_ids,
bool async) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
std::vector<const Shape*> operand_shapes;
std::vector<XlaOp> operands;
if (operand_shape->IsTuple()) {
if (operand_shape->tuple_shapes_size() == 0) {
return Unimplemented("0 element tuple AllReduce is not supported");
}
for (int i = 0; i < operand_shape->tuple_shapes_size(); ++i) {
if (operand_shape->tuple_shapes(i).element_type() !=
operand_shape->tuple_shapes(0).element_type()) {
return Unimplemented(
"All the shapes of a tuple input of AllReduce must have the same "
"element type");
}
operand_shapes.push_back(&operand_shape->tuple_shapes(i));
operands.push_back(GetTupleElement(operand, i));
}
} else {
operand_shapes.push_back(operand_shape);
operands.push_back(operand);
}
TF_ASSIGN_OR_RETURN(Shape inferred_shape,
ShapeInference::InferAllReduceShape(operand_shapes));
if (layout) {
if (!LayoutUtil::HasLayout(*layout)) {
return InvalidArgument("shape_with_layout must have the layout set: %s",
ShapeUtil::HumanString(*layout));
}
if (!ShapeUtil::Compatible(*layout, *operand_shape)) {
return InvalidArgument(
"Provided shape_with_layout must be compatible with the "
"operand shape: %s vs %s",
ShapeUtil::HumanString(*layout),
ShapeUtil::HumanString(*operand_shape));
}
instr.set_constrain_layout(true);
if (operand_shape->IsTuple() && !inferred_shape.IsTuple()) {
TF_RET_CHECK(layout->tuple_shapes_size() == 1);
*instr.mutable_shape() = layout->tuple_shapes(0).ToProto();
} else {
*instr.mutable_shape() = layout->ToProto();
}
} else {
*instr.mutable_shape() = inferred_shape.ToProto();
}
for (const ReplicaGroup& group : replica_groups) {
*instr.add_replica_groups() = group;
}
if (channel_id.has_value()) {
instr.set_channel_id(channel_id->handle());
}
if (use_global_device_ids.has_value()) {
instr.set_use_global_device_ids(*use_global_device_ids);
}
AddCalledComputation(computation, &instr);
TF_ASSIGN_OR_RETURN(auto all_reduce,
AddInstruction(std::move(instr),
async ? HloOpcode::kAllReduceStart
: HloOpcode::kAllReduce,
operands));
if (operand_shape->IsTuple() && !inferred_shape.IsTuple()) {
TF_RET_CHECK(operand_shapes.size() == 1);
TF_RET_CHECK(ShapeUtil::Compatible(*operand_shapes[0], inferred_shape));
return Tuple({all_reduce});
}
return all_reduce;
});
}
XlaOp XlaBuilder::AllGatherImpl(const XlaOp operand,
int64_t all_gather_dimension,
int64_t shard_count,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Layout>& layout,
const std::optional<bool> use_global_device_ids,
bool async) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
std::vector<const Shape*> operand_shapes;
std::vector<XlaOp> operands;
if (operand_shape->IsTuple()) {
if (operand_shape->tuple_shapes_size() == 0) {
return Unimplemented("0 element tuple AllGather is not supported");
}
for (int i = 0; i < operand_shape->tuple_shapes_size(); ++i) {
operand_shapes.push_back(&operand_shape->tuple_shapes(i));
operands.push_back(GetTupleElement(operand, i));
}
} else {
operand_shapes.push_back(operand_shape);
operands.push_back(operand);
}
TF_ASSIGN_OR_RETURN(Shape inferred_shape,
ShapeInference::InferAllGatherShape(
operand_shapes, all_gather_dimension, shard_count));
if (layout) {
*inferred_shape.mutable_layout() = *layout;
instr.set_constrain_layout(true);
}
*instr.mutable_shape() = inferred_shape.ToProto();
instr.add_dimensions(all_gather_dimension);
for (const ReplicaGroup& group : replica_groups) {
*instr.add_replica_groups() = group;
}
if (channel_id.has_value()) {
instr.set_channel_id(channel_id->handle());
}
if (use_global_device_ids.has_value()) {
instr.set_use_global_device_ids(use_global_device_ids.value());
}
TF_ASSIGN_OR_RETURN(auto all_gather,
AddInstruction(std::move(instr),
async ? HloOpcode::kAllGatherStart
: HloOpcode::kAllGather,
operands));
return all_gather;
});
}
XlaOp XlaBuilder::ConditionalImpl(
XlaOp branch_index,
absl::Span<const XlaComputation* const> branch_computations,
absl::Span<const XlaOp> branch_operands) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape* branch_index_shape,
GetShapePtr(branch_index));
std::vector<Shape> branch_operand_shapes(branch_operands.size());
std::vector<ProgramShape> branch_computation_shapes(
branch_computations.size());
for (int j = 0, end = branch_operands.size(); j < end; ++j) {
TF_ASSIGN_OR_RETURN(branch_operand_shapes[j],
GetShape(branch_operands[j]));
TF_ASSIGN_OR_RETURN(branch_computation_shapes[j],
branch_computations[j]->GetProgramShape());
}
TF_ASSIGN_OR_RETURN(const Shape shape,
ShapeInference::InferConditionalShape(
*branch_index_shape, branch_computation_shapes,
branch_operand_shapes));
*instr.mutable_shape() = shape.ToProto();
for (const XlaComputation* branch_computation : branch_computations) {
AddCalledComputation(*branch_computation, &instr);
}
std::vector<XlaOp> operands(1, branch_index);
for (const XlaOp branch_operand : branch_operands) {
operands.emplace_back(branch_operand);
}
return AddInstruction(std::move(instr), HloOpcode::kConditional,
absl::MakeSpan(operands));
});
}
absl::Status XlaBuilder::CheckOpBuilder(XlaOp op) const {
if (this != op.builder()) {
return InvalidArgument(
"XlaOp with handle %d is built by builder '%s', but is trying to use "
"it in builder '%s'",
op.handle(), op.builder()->name(), name());
}
return absl::OkStatus();
}
XlaOp XlaBuilder::Reduce(XlaOp operand, XlaOp init_value,
const XlaComputation& computation,
absl::Span<const int64_t> dimensions_to_reduce) {
return Reduce(absl::Span<const XlaOp>({operand}),
absl::Span<const XlaOp>({init_value}), computation,
dimensions_to_reduce);
}
XlaOp XlaBuilder::Reduce(absl::Span<const XlaOp> operands,
absl::Span<const XlaOp> init_values,
const XlaComputation& computation,
absl::Span<const int64_t> dimensions_to_reduce) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const ProgramShape& called_program_shape,
computation.GetProgramShape());
std::vector<XlaOp> all_operands;
all_operands.insert(all_operands.end(), operands.begin(), operands.end());
all_operands.insert(all_operands.end(), init_values.begin(),
init_values.end());
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& operand_shapes,
GetOperandShapes(all_operands));
absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(
Shape shape,
ShapeInference::InferReduceShape(
operand_shape_ptrs, dimensions_to_reduce, called_program_shape));
return ReduceInternal(shape, all_operands, computation,
dimensions_to_reduce);
});
}
absl::StatusOr<XlaOp> XlaBuilder::ReduceInternal(
const Shape& shape, absl::Span<const XlaOp> all_operands,
const XlaComputation& computation,
absl::Span<const int64_t> dimensions_to_reduce) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
for (int64_t dim : dimensions_to_reduce) {
instr.add_dimensions(dim);
}
AddCalledComputation(computation, &instr);
return AddInstruction(std::move(instr), HloOpcode::kReduce, all_operands);
});
}
XlaOp XlaBuilder::ReduceAll(XlaOp operand, XlaOp init_value,
const XlaComputation& computation) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
std::vector<int64_t> all_dimnos(operand_shape->rank());
std::iota(all_dimnos.begin(), all_dimnos.end(), 0);
return Reduce(operand, init_value, computation, all_dimnos);
});
}
XlaOp XlaBuilder::ReduceWindow(XlaOp operand, XlaOp init_value,
const XlaComputation& computation,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
Padding padding) {
return ReduceWindow(absl::MakeSpan(&operand, 1),
absl::MakeSpan(&init_value, 1), computation,
window_dimensions, window_strides, padding);
}
XlaOp XlaBuilder::ReduceWindow(absl::Span<const XlaOp> operands,
absl::Span<const XlaOp> init_values,
const XlaComputation& computation,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
Padding padding) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
const Shape* operand_shape = nullptr;
for (const auto& operand : operands) {
TF_ASSIGN_OR_RETURN(operand_shape, GetShapePtr(operand));
TF_RETURN_IF_ERROR(ValidatePaddingValues(
operand_shape->dimensions(), window_dimensions, window_strides));
}
CHECK(operand_shape != nullptr);
std::vector<std::pair<int64_t, int64_t>> padding_values =
MakePadding(operand_shape->dimensions(), window_dimensions,
window_strides, padding);
TF_ASSIGN_OR_RETURN(auto window,
ShapeInference::InferWindowFromDimensions(
window_dimensions, window_strides, padding_values,
{},
{}));
PaddingType padding_type = PADDING_INVALID;
for (int64_t i = 0; i < operand_shape->rank(); ++i) {
if (operand_shape->is_dynamic_dimension(i) &&
!window_util::IsTrivialWindowDimension(window.dimensions(i)) &&
padding == Padding::kSame) {
padding_type = PADDING_SAME;
}
}
if (padding_type == PADDING_SAME) {
TF_ASSIGN_OR_RETURN(
HloInstructionProto instr,
ReduceWindowInternal(operands, init_values, computation,
window_dimensions, window_strides, {}, {},
padding_values));
instr.set_custom_call_target("DynamicReduceWindowSamePadding");
std::vector<XlaOp> args;
args.insert(args.end(), operands.begin(), operands.end());
args.insert(args.end(), init_values.begin(), init_values.end());
return AddInstruction(std::move(instr), HloOpcode::kCustomCall, args);
}
return ReduceWindowWithGeneralPadding(
operands, init_values, computation, window_dimensions, window_strides,
{}, {}, padding_values);
});
}
XlaOp XlaBuilder::ReduceWindowWithGeneralPadding(
absl::Span<const XlaOp> operands, absl::Span<const XlaOp> init_values,
const XlaComputation& computation,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
absl::Span<const int64_t> base_dilations,
absl::Span<const int64_t> window_dilations,
absl::Span<const std::pair<int64_t, int64_t>> padding) {
std::vector<const Shape*> operand_shapes, init_shapes;
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (operands.size() == 1) {
const auto& operand = operands[0];
const auto& init_value = init_values[0];
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
operand_shapes.push_back(operand_shape);
TF_ASSIGN_OR_RETURN(const Shape* init_shape, GetShapePtr(init_value));
init_shapes.push_back(init_shape);
TF_ASSIGN_OR_RETURN(const ProgramShape& to_apply_shape,
computation.GetProgramShape());
TF_ASSIGN_OR_RETURN(auto window,
ShapeInference::InferWindowFromDimensions(
window_dimensions, window_strides, padding,
base_dilations,
window_dilations));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferReduceWindowShape(
absl::MakeSpan(operand_shapes),
absl::MakeSpan(init_shapes), window,
to_apply_shape));
return ReduceWindowInternal(shape, operands[0], init_values[0],
computation, window);
}
TF_ASSIGN_OR_RETURN(
HloInstructionProto instr,
ReduceWindowInternal(operands, init_values, computation,
window_dimensions, window_strides, base_dilations,
window_dilations, padding));
std::vector<XlaOp> args;
args.insert(args.end(), operands.begin(), operands.end());
args.insert(args.end(), init_values.begin(), init_values.end());
return AddInstruction(std::move(instr), HloOpcode::kReduceWindow, args);
});
}
absl::StatusOr<HloInstructionProto> XlaBuilder::ReduceWindowInternal(
absl::Span<const XlaOp> operands, absl::Span<const XlaOp> init_values,
const XlaComputation& computation,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
absl::Span<const int64_t> base_dilations,
absl::Span<const int64_t> window_dilations,
absl::Span<const std::pair<int64_t, int64_t>> padding) {
std::vector<const Shape*> operand_shapes, init_shapes;
for (int i = 0; i < operands.size(); ++i) {
const auto& operand = operands[i];
const auto& init_value = init_values[i];
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
operand_shapes.push_back(operand_shape);
TF_ASSIGN_OR_RETURN(const Shape* init_shape, GetShapePtr(init_value));
init_shapes.push_back(init_shape);
}
TF_ASSIGN_OR_RETURN(const ProgramShape& to_apply_shape,
computation.GetProgramShape());
TF_ASSIGN_OR_RETURN(auto window,
ShapeInference::InferWindowFromDimensions(
window_dimensions, window_strides, padding,
base_dilations,
window_dilations));
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferReduceWindowShape(
absl::MakeSpan(operand_shapes),
absl::MakeSpan(init_shapes), window, to_apply_shape));
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
*instr.mutable_window() = std::move(window);
AddCalledComputation(computation, &instr);
return instr;
}
absl::StatusOr<XlaOp> XlaBuilder::ReduceWindowInternal(
const Shape& shape, XlaOp operand, XlaOp init_value,
const XlaComputation& computation, Window window) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
*instr.mutable_window() = std::move(window);
AddCalledComputation(computation, &instr);
return AddInstruction(std::move(instr), HloOpcode::kReduceWindow,
{operand, init_value});
}
XlaOp XlaBuilder::BatchNormTraining(XlaOp operand, XlaOp scale, XlaOp offset,
float epsilon, int64_t feature_index) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape* scale_shape, GetShapePtr(scale));
TF_ASSIGN_OR_RETURN(const Shape* offset_shape, GetShapePtr(offset));
TF_ASSIGN_OR_RETURN(
Shape shape,
ShapeInference::InferBatchNormTrainingShape(
*operand_shape, *scale_shape, *offset_shape, feature_index));
*instr.mutable_shape() = shape.ToProto();
instr.set_epsilon(epsilon);
instr.set_feature_index(feature_index);
return AddInstruction(std::move(instr), HloOpcode::kBatchNormTraining,
{operand, scale, offset});
});
}
XlaOp XlaBuilder::BatchNormInference(XlaOp operand, XlaOp scale, XlaOp offset,
XlaOp mean, XlaOp variance, float epsilon,
int64_t feature_index) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape* scale_shape, GetShapePtr(scale));
TF_ASSIGN_OR_RETURN(const Shape* offset_shape, GetShapePtr(offset));
TF_ASSIGN_OR_RETURN(const Shape* mean_shape, GetShapePtr(mean));
TF_ASSIGN_OR_RETURN(const Shape* variance_shape, GetShapePtr(variance));
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferBatchNormInferenceShape(
*operand_shape, *scale_shape, *offset_shape,
*mean_shape, *variance_shape, feature_index));
*instr.mutable_shape() = shape.ToProto();
instr.set_epsilon(epsilon);
instr.set_feature_index(feature_index);
return AddInstruction(std::move(instr), HloOpcode::kBatchNormInference,
{operand, scale, offset, mean, variance});
});
}
XlaOp XlaBuilder::BatchNormGrad(XlaOp operand, XlaOp scale, XlaOp batch_mean,
XlaOp batch_var, XlaOp grad_output,
float epsilon, int64_t feature_index) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape* scale_shape, GetShapePtr(scale));
TF_ASSIGN_OR_RETURN(const Shape* batch_mean_shape, GetShapePtr(batch_mean));
TF_ASSIGN_OR_RETURN(const Shape* batch_var_shape, GetShapePtr(batch_var));
TF_ASSIGN_OR_RETURN(const Shape* grad_output_shape,
GetShapePtr(grad_output));
TF_ASSIGN_OR_RETURN(
Shape shape, ShapeInference::InferBatchNormGradShape(
*operand_shape, *scale_shape, *batch_mean_shape,
*batch_var_shape, *grad_output_shape, feature_index));
*instr.mutable_shape() = shape.ToProto();
instr.set_epsilon(epsilon);
instr.set_feature_index(feature_index);
return AddInstruction(std::move(instr), HloOpcode::kBatchNormGrad,
{operand, scale, batch_mean, batch_var, grad_output});
});
}
XlaOp XlaBuilder::AllGather(XlaOp operand, int64_t all_gather_dimension,
int64_t shard_count,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Layout>& layout,
const std::optional<bool> use_global_device_ids) {
return AllGatherImpl(operand, all_gather_dimension, shard_count,
replica_groups, channel_id, layout,
use_global_device_ids, false);
}
XlaOp XlaBuilder::CrossReplicaSum(
XlaOp operand, absl::Span<const ReplicaGroup> replica_groups) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* shape, GetShapePtr(operand));
const Shape* element_shape;
if (shape->IsTuple()) {
if (shape->tuple_shapes_size() == 0) {
return Unimplemented(
"0 element tuple CrossReplicaSum is not supported");
}
element_shape = &shape->tuple_shapes(0);
} else {
element_shape = shape;
}
const Shape scalar_shape =
ShapeUtil::MakeShape(element_shape->element_type(), {});
auto b = CreateSubBuilder("sum");
auto x = b->Parameter(0, scalar_shape, "x");
auto y = b->Parameter(1, scalar_shape, "y");
if (scalar_shape.element_type() == PRED) {
Or(x, y);
} else {
Add(x, y);
}
TF_ASSIGN_OR_RETURN(auto computation, b->Build());
return AllReduce(operand, computation, replica_groups,
std::nullopt);
});
}
XlaOp XlaBuilder::AllReduce(XlaOp operand, const XlaComputation& computation,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Shape>& shape_with_layout,
const std::optional<bool> use_global_device_ids) {
return AllReduceImpl(operand, computation, replica_groups, channel_id,
shape_with_layout, use_global_device_ids,
false);
}
XlaOp XlaBuilder::ReduceScatter(
XlaOp operand, const XlaComputation& computation, int64_t scatter_dimension,
int64_t shard_count, absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Layout>& layout,
const std::optional<bool> use_global_device_ids) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
std::vector<const Shape*> operand_shapes;
std::vector<XlaOp> operands;
if (operand_shape->IsTuple()) {
if (operand_shape->tuple_shapes_size() == 0) {
return Unimplemented("0 element tuple ReduceScatter is not supported");
}
for (int i = 0; i < operand_shape->tuple_shapes_size(); ++i) {
if (operand_shape->tuple_shapes(i).element_type() !=
operand_shape->tuple_shapes(0).element_type()) {
return Unimplemented(
"All the shapes of a tuple input of ReduceScatter must have "
"the same element type");
}
operand_shapes.push_back(&operand_shape->tuple_shapes(i));
operands.push_back(GetTupleElement(operand, i));
}
} else {
operand_shapes.push_back(operand_shape);
operands.push_back(operand);
}
TF_ASSIGN_OR_RETURN(Shape inferred_shape,
ShapeInference::InferReduceScatterShape(
operand_shapes, scatter_dimension, shard_count));
if (layout) {
*inferred_shape.mutable_layout() = *layout;
instr.set_constrain_layout(true);
}
*instr.mutable_shape() = inferred_shape.ToProto();
AddCalledComputation(computation, &instr);
instr.add_dimensions(scatter_dimension);
for (const ReplicaGroup& group : replica_groups) {
*instr.add_replica_groups() = group;
}
if (channel_id.has_value()) {
instr.set_channel_id(channel_id->handle());
}
if (use_global_device_ids.has_value()) {
instr.set_use_global_device_ids(use_global_device_ids.value());
}
TF_ASSIGN_OR_RETURN(
auto reduce_scatter,
AddInstruction(std::move(instr), HloOpcode::kReduceScatter, operands));
return reduce_scatter;
});
}
XlaOp XlaBuilder::AllToAll(XlaOp operand, int64_t split_dimension,
int64_t concat_dimension, int64_t split_count,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<Layout>& layout,
const std::optional<ChannelHandle>& channel_id) {
if (layout.has_value()) {
return AllToAllTuple(operand, split_dimension, concat_dimension,
split_count, replica_groups, layout, channel_id);
}
return AllToAllArray(operand, split_dimension, concat_dimension, split_count,
replica_groups, channel_id);
}
XlaOp XlaBuilder::AllToAllArray(
XlaOp operand, int64_t split_dimension, int64_t concat_dimension,
int64_t split_count, absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(
const Shape all_to_all_shape,
ShapeInference::InferAllToAllShape(*operand_shape, split_dimension,
concat_dimension, split_count));
HloInstructionProto instr;
*instr.mutable_shape() = operand_shape->ToProto();
if (replica_groups.empty()) {
auto* group = instr.add_replica_groups();
for (int64_t i = 0; i < split_count; ++i) {
group->add_replica_ids(i);
}
} else {
for (const ReplicaGroup& group : replica_groups) {
*instr.add_replica_groups() = group;
}
}
instr.add_dimensions(split_dimension);
if (channel_id.has_value()) {
instr.set_channel_id(channel_id->handle());
}
TF_ASSIGN_OR_RETURN(
XlaOp all_to_all,
AddInstruction(std::move(instr), HloOpcode::kAllToAll, {operand}));
if (split_dimension == concat_dimension) {
return all_to_all;
}
DimensionVector sizes;
const bool is_unbounded = operand_shape->is_unbounded_dynamic();
std::vector<XlaOp> dynamic_sizes;
auto GetR1DimensionSizeOrConstant = [&](XlaOp operand,
int64_t dimension) -> XlaOp {
if (operand_shape->is_unbounded_dynamic_dimension(dimension)) {
return Reshape(GetDimensionSize(operand, dimension), {1});
}
return ConstantR1<int32_t>(
this, {static_cast<int32_t>(operand_shape->dimensions(dimension))});
};
XlaOp r1_split_count =
ConstantR1<int32_t>(this, {static_cast<int32_t>(split_count)});
for (int64_t i = 0; i < operand_shape->rank(); ++i) {
if (i != split_dimension) {
sizes.push_back(operand_shape->dimensions(i));
if (is_unbounded) {
dynamic_sizes.push_back(GetR1DimensionSizeOrConstant(operand, i));
}
continue;
}
sizes.push_back(split_count);
sizes.push_back(operand_shape->is_unbounded_dynamic_dimension(i)
? Shape::kUnboundedSize
: operand_shape->dimensions(i) / split_count);
if (is_unbounded) {
dynamic_sizes.push_back(r1_split_count);
dynamic_sizes.push_back(
operand_shape->is_unbounded_dynamic_dimension(i)
? Div(GetR1DimensionSizeOrConstant(operand, i), r1_split_count)
: ConstantR1<int32_t>(this,
{static_cast<int32_t>(sizes.back())}));
}
}
if (is_unbounded) {
std::vector<bool> dynamic_dimensions;
std::transform(
sizes.begin(), sizes.end(), std::back_inserter(dynamic_dimensions),
[](int64_t size) { return size == Shape::kUnboundedSize; });
TF_ASSIGN_OR_RETURN(
const Shape shape,
ShapeUtil::MakeValidatedShape(all_to_all_shape.element_type(), sizes,
dynamic_dimensions));
all_to_all =
MhloDynamicReshape(all_to_all, ConcatInDim(dynamic_sizes, 0), shape);
} else {
all_to_all = Reshape(all_to_all, sizes);
}
std::vector<int64_t> permutation;
const auto rank = operand_shape->rank();
permutation.reserve(rank + 1);
for (int64_t i = 0; i < rank; ++i) {
int64_t dim_after_reshape = i >= split_dimension ? i + 1 : i;
if (i == concat_dimension) {
permutation.push_back(split_dimension);
}
permutation.push_back(dim_after_reshape);
}
all_to_all = Transpose(all_to_all, permutation);
if (is_unbounded) {
std::vector<XlaOp> new_dimensions;
new_dimensions.reserve(operand_shape->rank());
for (int64_t i = 0; i < operand_shape->rank(); ++i) {
new_dimensions.push_back(GetR1DimensionSizeOrConstant(operand, i));
}
new_dimensions[split_dimension] =
Div(new_dimensions[split_dimension], r1_split_count);
new_dimensions[concat_dimension] =
Mul(new_dimensions[concat_dimension], r1_split_count);
return MhloDynamicReshape(all_to_all, ConcatInDim(new_dimensions, 0),
all_to_all_shape);
}
return Reshape(all_to_all_shape, all_to_all);
});
}
XlaOp XlaBuilder::AllToAllTuple(
absl::Span<const XlaOp> operands,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<Layout>& layout,
const std::optional<ChannelHandle>& channel_id) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(auto operand_shapes, this->GetOperandShapes(operands));
std::vector<const Shape*> operand_shape_ptrs;
operand_shape_ptrs.reserve(operand_shapes.size());
absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferAllToAllTupleShape(
operand_shape_ptrs));
if (layout) {
TF_RET_CHECK(shape.IsTuple() && !ShapeUtil::IsNestedTuple(shape));
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
const int64_t layout_minor_to_major_size =
layout->minor_to_major().size();
if (layout_minor_to_major_size != shape.tuple_shapes(i).rank()) {
return InvalidArgument(
"Provided layout must be compatible with the operands' shape. "
"The layout is %s, but operand %d has shape %s.",
layout->ToString(), i,
ShapeUtil::HumanString(shape.tuple_shapes(i)));
}
*(shape.mutable_tuple_shapes(i)->mutable_layout()) = *layout;
}
instr.set_constrain_layout(true);
}
*instr.mutable_shape() = shape.ToProto();
for (const ReplicaGroup& group : replica_groups) {
*instr.add_replica_groups() = group;
}
if (channel_id.has_value()) {
instr.set_channel_id(channel_id->handle());
}
return AddInstruction(std::move(instr), HloOpcode::kAllToAll, operands);
});
}
XlaOp XlaBuilder::AllToAllTuple(
XlaOp operand, int64_t split_dimension, int64_t concat_dimension,
int64_t split_count, absl::Span<const ReplicaGroup> replica_groups,
const std::optional<Layout>& layout,
const std::optional<ChannelHandle>& channel_id) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
if (operand_shape->is_unbounded_dynamic() ||
split_dimension == Shape::kUnboundedSize ||
concat_dimension == Shape::kUnboundedSize ||
split_count == Shape::kUnboundedSize) {
return InvalidArgument(
"AllToAllTuple does not support unbounded dynamic shapes");
}
TF_RETURN_IF_ERROR(
ShapeInference::InferAllToAllShape(*operand_shape, split_dimension,
concat_dimension, split_count)
.status());
std::vector<XlaOp> slices;
slices.reserve(split_count);
const int64_t block_size =
operand_shape->dimensions(split_dimension) / split_count;
for (int i = 0; i < split_count; i++) {
slices.push_back(SliceInDim(operand, i * block_size,
(i + 1) * block_size,
1, split_dimension));
}
XlaOp all_to_all =
this->AllToAllTuple(slices, replica_groups, layout, channel_id);
std::vector<XlaOp> received;
received.reserve(split_count);
for (int i = 0; i < split_count; i++) {
received.push_back(this->GetTupleElement(all_to_all, i));
}
return this->ConcatInDim(received, concat_dimension);
});
}
XlaOp XlaBuilder::CollectiveBroadcast(
XlaOp operand, absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id) {
return CollectiveBroadcastImpl(operand, replica_groups, channel_id);
}
XlaOp XlaBuilder::CollectiveBroadcastImpl(
XlaOp operand, absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(
Shape shape,
ShapeInference::InferCollectiveBroadcastShape({operand_shape}));
*instr.mutable_shape() = shape.ToProto();
for (const ReplicaGroup& group : replica_groups) {
*instr.add_replica_groups() = group;
}
if (channel_id.has_value()) {
instr.set_channel_id(channel_id->handle());
}
return AddInstruction(std::move(instr), HloOpcode::kCollectiveBroadcast,
{operand});
});
}
XlaOp XlaBuilder::CollectivePermute(
XlaOp operand,
const std::vector<std::pair<int64_t, int64_t>>& source_target_pairs,
const std::optional<ChannelHandle>& channel_id) {
return CollectivePermuteImpl(operand, source_target_pairs, channel_id,
false);
}
XlaOp XlaBuilder::CollectivePermuteImpl(
XlaOp operand,
const std::vector<std::pair<int64_t, int64_t>>& source_target_pairs,
const std::optional<ChannelHandle>& channel_id, bool async) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(
Shape shape,
ShapeInference::InferCollectivePermuteShape({operand_shape}));
*instr.mutable_shape() = shape.ToProto();
for (const auto& pair : source_target_pairs) {
auto* proto_pair = instr.add_source_target_pairs();
proto_pair->set_source(pair.first);
proto_pair->set_target(pair.second);
}
if (channel_id.has_value()) {
instr.set_channel_id(channel_id->handle());
}
return AddInstruction(std::move(instr),
async ? HloOpcode::kCollectivePermuteStart
: HloOpcode::kCollectivePermute,
{operand});
});
}
XlaOp XlaBuilder::ReplicaId() {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = ShapeUtil::MakeShape(U32, {}).ToProto();
return AddInstruction(std::move(instr), HloOpcode::kReplicaId, {});
});
}
XlaOp XlaBuilder::SelectAndScatter(XlaOp operand, const XlaComputation& select,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
Padding padding, XlaOp source,
XlaOp init_value,
const XlaComputation& scatter) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
std::vector<std::pair<int64_t, int64_t>> padding_values =
MakePadding(operand_shape->dimensions(), window_dimensions,
window_strides, padding);
TF_ASSIGN_OR_RETURN(auto window,
ShapeInference::InferWindowFromDimensions(
window_dimensions, window_strides, padding_values,
{},
{}));
PaddingType padding_type = PADDING_INVALID;
for (int64_t i = 0; i < operand_shape->rank(); ++i) {
if (operand_shape->is_dynamic_dimension(i) &&
!window_util::IsTrivialWindowDimension(window.dimensions(i)) &&
padding == Padding::kSame) {
padding_type = PADDING_SAME;
}
}
if (padding_type == PADDING_SAME) {
TF_ASSIGN_OR_RETURN(
HloInstructionProto instr,
SelectAndScatterInternal(operand, select, window_dimensions,
window_strides, padding_values, source,
init_value, scatter));
instr.set_custom_call_target("DynamicSelectAndScatterSamePadding");
return AddInstruction(std::move(instr), HloOpcode::kCustomCall,
{operand, source, init_value});
}
return SelectAndScatterWithGeneralPadding(
operand, select, window_dimensions, window_strides, padding_values,
source, init_value, scatter);
});
}
absl::StatusOr<HloInstructionProto> XlaBuilder::SelectAndScatterInternal(
XlaOp operand, const XlaComputation& select,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding, XlaOp source,
XlaOp init_value, const XlaComputation& scatter) {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape* source_shape, GetShapePtr(source));
TF_ASSIGN_OR_RETURN(const Shape* init_shape, GetShapePtr(init_value));
TF_ASSIGN_OR_RETURN(const ProgramShape& select_shape,
select.GetProgramShape());
TF_ASSIGN_OR_RETURN(const ProgramShape& scatter_shape,
scatter.GetProgramShape());
TF_ASSIGN_OR_RETURN(*instr.mutable_window(),
ShapeInference::InferWindowFromDimensions(
window_dimensions, window_strides, padding,
{}, {}));
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferSelectAndScatterShape(
*operand_shape, select_shape, instr.window(),
*source_shape, *init_shape, scatter_shape));
*instr.mutable_shape() = shape.ToProto();
AddCalledComputation(select, &instr);
AddCalledComputation(scatter, &instr);
return instr;
}
XlaOp XlaBuilder::SelectAndScatterWithGeneralPadding(
XlaOp operand, const XlaComputation& select,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding, XlaOp source,
XlaOp init_value, const XlaComputation& scatter) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(HloInstructionProto instr,
SelectAndScatterInternal(
operand, select, window_dimensions, window_strides,
padding, source, init_value, scatter));
return AddInstruction(std::move(instr), HloOpcode::kSelectAndScatter,
{operand, source, init_value});
});
}
XlaOp XlaBuilder::ReducePrecision(XlaOp operand, const int exponent_bits,
const int mantissa_bits) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferReducePrecisionShape(
*operand_shape, exponent_bits, mantissa_bits));
return ReducePrecisionInternal(shape, operand, exponent_bits,
mantissa_bits);
});
}
absl::StatusOr<XlaOp> XlaBuilder::ReducePrecisionInternal(
const Shape& shape, XlaOp operand, const int exponent_bits,
const int mantissa_bits) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.set_exponent_bits(exponent_bits);
instr.set_mantissa_bits(mantissa_bits);
return AddInstruction(std::move(instr), HloOpcode::kReducePrecision,
{operand});
}
void XlaBuilder::Send(XlaOp operand, const ChannelHandle& handle) {
ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto token_instr;
*token_instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
TF_ASSIGN_OR_RETURN(XlaOp token, AddInstruction(std::move(token_instr),
HloOpcode::kAfterAll, {}));
return SendWithToken(operand, token, handle);
});
}
XlaOp XlaBuilder::SendWithToken(XlaOp operand, XlaOp token,
const ChannelHandle& handle) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (handle.type() != ChannelHandle::DEVICE_TO_DEVICE) {
return InvalidArgument("Send must use a device-to-device channel");
}
XlaOp send_op = internal::XlaBuilderFriend::BuildSend(this, operand, token,
handle, false);
return internal::XlaBuilderFriend::BuildSendDone(this, send_op, handle,
false);
});
}
XlaOp XlaBuilder::Recv(const Shape& shape, const ChannelHandle& handle) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto token_instr;
*token_instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
TF_ASSIGN_OR_RETURN(XlaOp token, AddInstruction(std::move(token_instr),
HloOpcode::kAfterAll, {}));
XlaOp recv = RecvWithToken(token, shape, handle);
HloInstructionProto recv_data;
*recv_data.mutable_shape() = shape.ToProto();
recv_data.set_tuple_index(0);
return AddInstruction(std::move(recv_data), HloOpcode::kGetTupleElement,
{recv});
});
}
XlaOp XlaBuilder::RecvWithToken(XlaOp token, const Shape& shape,
const ChannelHandle& handle) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (handle.type() != ChannelHandle::DEVICE_TO_DEVICE) {
return InvalidArgument("Recv must use a device-to-device channel");
}
XlaOp recv_op = internal::XlaBuilderFriend::BuildRecv(this, token, shape,
handle, false);
return internal::XlaBuilderFriend::BuildRecvDone(this, recv_op, shape,
handle, false);
});
}
XlaOp XlaBuilder::SendToHost(XlaOp operand, XlaOp token,
const Shape& shape_with_layout,
const ChannelHandle& handle) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (!LayoutUtil::HasLayout(shape_with_layout)) {
return InvalidArgument("Shape passed to SendToHost must have a layout");
}
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
if (!ShapeUtil::Compatible(*operand_shape, shape_with_layout)) {
return InvalidArgument(
"SendToHost shape %s must be compatible with operand shape %s",
ShapeUtil::HumanStringWithLayout(shape_with_layout),
ShapeUtil::HumanStringWithLayout(*operand_shape));
}
if (!operand_shape->IsArray()) {
return InvalidArgument("SendToHost only supports array shapes, shape: %s",
ShapeUtil::HumanString(*operand_shape));
}
if (handle.type() != ChannelHandle::DEVICE_TO_HOST) {
return InvalidArgument("SendToHost must use a device-to-host channel");
}
HloInstructionProto send_instr;
*send_instr.mutable_shape() =
ShapeUtil::MakeTupleShape({shape_with_layout,
ShapeUtil::MakeShape(U32, {}),
ShapeUtil::MakeTokenShape()})
.ToProto();
send_instr.set_channel_id(handle.handle());
send_instr.set_is_host_transfer(true);
TF_ASSIGN_OR_RETURN(XlaOp send,
AddInstruction(std::move(send_instr), HloOpcode::kSend,
{operand, token}));
HloInstructionProto send_done_instr;
*send_done_instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
send_done_instr.set_channel_id(handle.handle());
send_done_instr.set_is_host_transfer(true);
TF_ASSIGN_OR_RETURN(XlaOp send_done,
AddInstruction(std::move(send_done_instr),
HloOpcode::kSendDone, {send}));
return send_done;
});
}
XlaOp XlaBuilder::RecvFromHost(XlaOp token, const Shape& shape,
const ChannelHandle& handle) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (!LayoutUtil::HasLayout(shape)) {
return InvalidArgument("Shape passed to RecvFromHost must have a layout");
}
if (!shape.IsArray()) {
return InvalidArgument(
"RecvFromHost only supports array shapes, shape: %s",
ShapeUtil::HumanString(shape));
}
if (handle.type() != ChannelHandle::HOST_TO_DEVICE) {
return InvalidArgument("RecvFromHost must use a host-to-device channel");
}
HloInstructionProto recv_instr;
*recv_instr.mutable_shape() =
ShapeUtil::MakeTupleShape(
{shape, ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeTokenShape()})
.ToProto();
recv_instr.set_channel_id(handle.handle());
recv_instr.set_is_host_transfer(true);
TF_ASSIGN_OR_RETURN(XlaOp recv, AddInstruction(std::move(recv_instr),
HloOpcode::kRecv, {token}));
HloInstructionProto recv_done_instr;
*recv_done_instr.mutable_shape() =
ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeTokenShape()})
.ToProto();
recv_done_instr.set_channel_id(handle.handle());
recv_done_instr.set_is_host_transfer(true);
return AddInstruction(std::move(recv_done_instr), HloOpcode::kRecvDone,
{recv});
});
}
XlaOp XlaBuilder::GetDimensionSize(XlaOp operand, int64_t dimension) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferGetDimensionSizeShape(
*operand_shape, dimension));
if (operand_shape->is_static_dimension(dimension)) {
return ConstantR0<int32_t>(this, operand_shape->dimensions(dimension));
}
*instr.mutable_shape() = shape.ToProto();
instr.add_dimensions(dimension);
return AddInstruction(std::move(instr), HloOpcode::kGetDimensionSize,
{operand});
});
}
XlaOp XlaBuilder::RemoveDynamicDimension(XlaOp operand, int64_t dimension) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
Shape shape = *operand_shape;
shape.set_dynamic_dimension(dimension, false);
XlaOp static_size =
ConstantR0<int32_t>(this, operand_shape->dimensions(dimension));
return SetDimensionSizeInternal(shape, operand, static_size, dimension);
});
}
XlaOp XlaBuilder::SetDimensionSize(XlaOp operand, XlaOp val,
int64_t dimension) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape* val_shape, GetShapePtr(val));
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferSetDimensionSizeShape(
*operand_shape, *val_shape, dimension));
return SetDimensionSizeInternal(shape, operand, val, dimension);
});
}
absl::StatusOr<XlaOp> XlaBuilder::SetDimensionSizeInternal(const Shape& shape,
XlaOp operand,
XlaOp val,
int64_t dimension) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.add_dimensions(dimension);
return AddInstruction(std::move(instr), HloOpcode::kSetDimensionSize,
{operand, val});
}
absl::StatusOr<bool> XlaBuilder::IsConstant(XlaOp operand) const {
TF_RETURN_IF_ERROR(first_error_);
TF_RETURN_IF_ERROR(LookUpInstruction(operand).status());
bool is_constant = true;
absl::flat_hash_set<int64_t> visited;
IsConstantVisitor(operand.handle(), 0, &visited, &is_constant);
return is_constant;
}
absl::StatusOr<XlaComputation> XlaBuilder::BuildConstantSubGraph(
XlaOp root_op, bool dynamic_dimension_is_minus_one) {
TF_ASSIGN_OR_RETURN(bool is_constant, IsConstant(root_op));
if (!is_constant) {
auto op_status = LookUpInstruction(root_op);
std::string op_string =
op_status.ok() ? op_status.value()->name() : "<unknown operation>";
return InvalidArgument(
"Operand to BuildConstantSubGraph depends on a parameter.\n\n"
" op requested for constant subgraph: %s\n\n"
"This is an internal error that typically happens when the XLA user "
"(e.g. TensorFlow) is attempting to determine a value that must be a "
"compile-time constant (e.g. an array dimension) but it is not capable "
"of being evaluated at XLA compile time.\n\n"
"Please file a usability bug with the framework being used (e.g. "
"TensorFlow).",
op_string);
}
TF_ASSIGN_OR_RETURN(const HloInstructionProto* root,
LookUpInstruction(root_op));
if (VLOG_IS_ON(4)) {
VLOG(4) << "Build constant subgraph for:\n" << OpToString(root_op);
}
HloComputationProto entry;
SetProtoIdAndName(&entry, StrCat(name_, "_compute_constant"), kNameSeparator,
GetNextId());
ProgramShapeProto* program_shape = entry.mutable_program_shape();
*program_shape->mutable_result() = root->shape();
std::set<int64_t> related_ops;
absl::flat_hash_map<int64_t, int64_t> substitutions;
absl::flat_hash_set<int64_t> related_calls;
std::queue<int64_t> worklist;
worklist.push(root->id());
related_ops.insert(root->id());
while (!worklist.empty()) {
int64_t handle = worklist.front();
worklist.pop();
TF_ASSIGN_OR_RETURN(const HloInstructionProto* instr_proto,
LookUpInstructionByHandle(handle));
auto default_behavior = [&related_ops, &worklist, &related_calls,
instr_proto]() {
for (int64_t id : instr_proto->operand_ids()) {
if (related_ops.insert(id).second) {
worklist.push(id);
}
}
for (int64_t called_id : instr_proto->called_computation_ids()) {
related_calls.insert(called_id);
}
};
if (instr_proto->opcode() ==
HloOpcodeString(HloOpcode::kGetDimensionSize) ||
InstrIsSetBound(instr_proto)) {
int32_t constant_value = -1;
HloInstructionProto const_instr;
if (instr_proto->opcode() ==
HloOpcodeString(HloOpcode::kGetDimensionSize)) {
int64_t dimension = instr_proto->dimensions(0);
int64_t operand_handle = instr_proto->operand_ids(0);
TF_ASSIGN_OR_RETURN(const HloInstructionProto* operand_proto,
LookUpInstructionByHandle(operand_handle));
if (!(operand_proto->shape().is_dynamic_dimension(dimension) &&
dynamic_dimension_is_minus_one)) {
constant_value = static_cast<int32_t>(
operand_proto->shape().dimensions(dimension));
}
Literal literal = LiteralUtil::CreateR0(constant_value);
*const_instr.mutable_literal() = literal.ToProto();
*const_instr.mutable_shape() = literal.shape().ToProto();
} else {
if (instr_proto->literal().shape().element_type() == TUPLE) {
*const_instr.mutable_literal() =
instr_proto->literal().tuple_literals(0);
} else {
*const_instr.mutable_literal() = instr_proto->literal();
}
*const_instr.mutable_shape() = instr_proto->shape();
}
*const_instr.mutable_opcode() =
std::string(HloOpcodeString(HloOpcode::kConstant));
const_instr.set_id(handle);
*const_instr.mutable_name() =
GetFullName(const_instr.opcode(), kNameSeparator, const_instr.id());
*entry.add_instructions() =
const_instr;
} else if (instr_proto->opcode() ==
HloOpcodeString(HloOpcode::kGetTupleElement)) {
TF_ASSIGN_OR_RETURN(
const HloInstructionProto* maybe_tuple_instr,
LookUpInstructionByHandle(instr_proto->operand_ids(0)));
if (maybe_tuple_instr->opcode() == HloOpcodeString(HloOpcode::kTuple)) {
int64_t id = maybe_tuple_instr->operand_ids(instr_proto->tuple_index());
if (related_ops.insert(id).second) {
worklist.push(id);
}
substitutions[handle] = id;
} else {
default_behavior();
}
} else {
default_behavior();
}
}
int64_t root_id = root->id();
auto it = substitutions.find(root_id);
while (it != substitutions.end()) {
root_id = it->second;
it = substitutions.find(root_id);
}
entry.set_root_id(root_id);
for (int64_t id : related_ops) {
if (substitutions.find(id) != substitutions.end()) {
continue;
}
TF_ASSIGN_OR_RETURN(const HloInstructionProto* instr_src,
LookUpInstructionByHandle(id));
if (instr_src->opcode() == HloOpcodeString(HloOpcode::kGetDimensionSize) ||
InstrIsSetBound(instr_src)) {
continue;
}
HloInstructionProto* instr = entry.add_instructions();
*instr = *instr_src;
instr->clear_operand_ids();
for (int64_t operand_id : instr_src->operand_ids()) {
auto it = substitutions.find(operand_id);
while (it != substitutions.end()) {
operand_id = it->second;
it = substitutions.find(operand_id);
}
instr->add_operand_ids(operand_id);
}
const std::string& new_name =
StrCat(instr->name(), ".", entry.id(), ".", instr->id());
instr->set_name(new_name);
}
XlaComputation computation(entry.id());
HloModuleProto* module = computation.mutable_proto();
module->set_name(entry.name());
module->set_id(entry.id());
module->set_entry_computation_name(entry.name());
module->set_entry_computation_id(entry.id());
*module->mutable_host_program_shape() = *program_shape;
for (auto& e : embedded_) {
if (related_calls.find(e.second.id()) != related_calls.end()) {
*module->add_computations() = e.second;
}
}
*module->add_computations() = std::move(entry);
if (VLOG_IS_ON(4)) {
VLOG(4) << "Constant computation:\n" << module->DebugString();
}
return std::move(computation);
}
std::unique_ptr<XlaBuilder> XlaBuilder::CreateSubBuilder(
const std::string& computation_name) {
auto sub_builder = std::make_unique<XlaBuilder>(computation_name);
sub_builder->parent_builder_ = this;
sub_builder->die_immediately_on_error_ = this->die_immediately_on_error_;
return sub_builder;
}
ConvolutionDimensionNumbers
XlaBuilder::CreateDefaultConvDimensionNumbers(int num_spatial_dims) {
ConvolutionDimensionNumbers dimension_numbers;
dimension_numbers.set_input_batch_dimension(kConvBatchDimension);
dimension_numbers.set_input_feature_dimension(kConvFeatureDimension);
dimension_numbers.set_output_batch_dimension(kConvBatchDimension);
dimension_numbers.set_output_feature_dimension(kConvFeatureDimension);
dimension_numbers.set_kernel_output_feature_dimension(
kConvKernelOutputDimension);
dimension_numbers.set_kernel_input_feature_dimension(
kConvKernelInputDimension);
for (int i = 0; i < num_spatial_dims; ++i) {
dimension_numbers.add_input_spatial_dimensions(i + 2);
dimension_numbers.add_kernel_spatial_dimensions(i + 2);
dimension_numbers.add_output_spatial_dimensions(i + 2);
}
return dimension_numbers;
}
absl::Status XlaBuilder::Validate(
const ConvolutionDimensionNumbers& dnum) {
if (dnum.input_spatial_dimensions_size() < 2) {
return FailedPrecondition("input spacial dimension < 2: %d",
dnum.input_spatial_dimensions_size());
}
if (dnum.kernel_spatial_dimensions_size() < 2) {
return FailedPrecondition("kernel spacial dimension < 2: %d",
dnum.kernel_spatial_dimensions_size());
}
if (dnum.output_spatial_dimensions_size() < 2) {
return FailedPrecondition("output spacial dimension < 2: %d",
dnum.output_spatial_dimensions_size());
}
if (std::set<int64_t>(
{dnum.input_batch_dimension(), dnum.input_feature_dimension(),
dnum.input_spatial_dimensions(0), dnum.input_spatial_dimensions(1)})
.size() != 4) {
return FailedPrecondition(
"dimension numbers for the input are not unique: (%d, %d, %d, "
"%d)",
dnum.input_batch_dimension(), dnum.input_feature_dimension(),
dnum.input_spatial_dimensions(0), dnum.input_spatial_dimensions(1));
}
if (std::set<int64_t>({dnum.kernel_output_feature_dimension(),
dnum.kernel_input_feature_dimension(),
dnum.kernel_spatial_dimensions(0),
dnum.kernel_spatial_dimensions(1)})
.size() != 4) {
return FailedPrecondition(
"dimension numbers for the weight are not unique: (%d, %d, %d, "
"%d)",
dnum.kernel_output_feature_dimension(),
dnum.kernel_input_feature_dimension(),
dnum.kernel_spatial_dimensions(0), dnum.kernel_spatial_dimensions(1));
}
if (std::set<int64_t>({dnum.output_batch_dimension(),
dnum.output_feature_dimension(),
dnum.output_spatial_dimensions(0),
dnum.output_spatial_dimensions(1)})
.size() != 4) {
return FailedPrecondition(
"dimension numbers for the output are not unique: (%d, %d, %d, "
"%d)",
dnum.output_batch_dimension(), dnum.output_feature_dimension(),
dnum.output_spatial_dimensions(0), dnum.output_spatial_dimensions(1));
}
return absl::OkStatus();
}
absl::StatusOr<XlaOp> XlaBuilder::AddInstruction(
HloInstructionProto&& instr, HloOpcode opcode,
absl::Span<const XlaOp> operands) {
TF_RETURN_IF_ERROR(first_error_);
const int64_t handle = GetNextId();
instr.set_id(handle);
*instr.mutable_opcode() = std::string(HloOpcodeString(opcode));
if (instr.name().empty()) {
instr.set_name(instr.opcode());
}
for (const auto& operand : operands) {
if (operand.builder_ == nullptr) {
return InvalidArgument("invalid XlaOp with handle %d", operand.handle());
}
if (operand.builder_ != this) {
return InvalidArgument("Do not add XlaOp from builder %s to builder %s",
operand.builder_->name(), this->name());
}
instr.add_operand_ids(operand.handle());
}
if (one_shot_metadata_.has_value()) {
*instr.mutable_metadata() = one_shot_metadata_.value();
one_shot_metadata_.reset();
} else {
*instr.mutable_metadata() = metadata_;
}
if (sharding_) {
TF_RETURN_IF_ERROR(NormalizeAndAssignSharing(&instr, *sharding_));
}
*instr.mutable_frontend_attributes() = frontend_attributes_;
handle_to_index_[handle] = instructions_.size();
instructions_.push_back(std::move(instr));
instruction_shapes_.push_back(
std::make_unique<Shape>(instructions_.back().shape()));
XlaOp op(handle, this);
return op;
}
absl::StatusOr<XlaOp> XlaBuilder::AddOpWithShape(
HloOpcode opcode, const Shape& shape, absl::Span<const XlaOp> operands) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return AddInstruction(std::move(instr), opcode, operands);
}
void XlaBuilder::AddCalledComputation(const XlaComputation& computation,
HloInstructionProto* instr) {
absl::flat_hash_map<int64_t, int64_t> remapped_ids;
std::vector<HloComputationProto> imported_computations;
imported_computations.reserve(computation.proto().computations_size());
for (const HloComputationProto& e : computation.proto().computations()) {
HloComputationProto new_computation(e);
int64_t computation_id = GetNextId();
remapped_ids[new_computation.id()] = computation_id;
SetProtoIdAndName(&new_computation,
GetBaseName(new_computation.name(), kNameSeparator),
kNameSeparator, computation_id);
for (auto& instruction : *new_computation.mutable_instructions()) {
int64_t instruction_id = GetNextId();
remapped_ids[instruction.id()] = instruction_id;
SetProtoIdAndName(&instruction,
GetBaseName(instruction.name(), kNameSeparator),
kNameSeparator, instruction_id);
}
new_computation.set_root_id(remapped_ids.at(new_computation.root_id()));
imported_computations.push_back(std::move(new_computation));
}
instr->add_called_computation_ids(
remapped_ids.at(computation.proto().entry_computation_id()));
for (auto& imported_computation : imported_computations) {
for (auto& instruction : *imported_computation.mutable_instructions()) {
for (auto& operand_id : *instruction.mutable_operand_ids()) {
operand_id = remapped_ids.at(operand_id);
}
for (auto& control_predecessor_id :
*instruction.mutable_control_predecessor_ids()) {
control_predecessor_id = remapped_ids.at(control_predecessor_id);
}
for (auto& called_computation_id :
*instruction.mutable_called_computation_ids()) {
called_computation_id = remapped_ids.at(called_computation_id);
}
}
int64_t computation_id = imported_computation.id();
for (int64_t i = 0; i < imported_computation.instructions_size(); ++i) {
ImportedInstruction imported_instruction;
imported_instruction.computation_id = computation_id;
imported_instruction.instruction_index = i;
handle_to_imported_index_.insert(
{imported_computation.instructions(i).id(), imported_instruction});
}
embedded_.insert({computation_id, std::move(imported_computation)});
}
}
absl::StatusOr<const HloInstructionProto*> XlaBuilder::LookUpInstruction(
const XlaOp op) const {
TF_RETURN_IF_ERROR(first_error_);
return LookUpInstructionInternal<const HloInstructionProto*>(op);
}
absl::StatusOr<const HloInstructionProto*>
XlaBuilder::LookUpInstructionByHandle(int64_t handle) const {
return LookUpInstructionByHandleInternal<const HloInstructionProto*>(handle);
}
absl::StatusOr<HloInstructionProto*> XlaBuilder::LookUpMutableInstruction(
const XlaOp op) {
TF_RETURN_IF_ERROR(first_error_);
return LookUpInstructionInternal<HloInstructionProto*>(op);
}
absl::StatusOr<HloInstructionProto*>
XlaBuilder::LookUpMutableInstructionByHandle(int64_t handle) {
return LookUpInstructionByHandleInternal<HloInstructionProto*>(handle);
}
XlaOp Parameter(XlaBuilder* builder, int64_t parameter_number,
const Shape& shape, const std::string& name) {
std::vector<bool> empty_bools;
return Parameter(builder, parameter_number, shape, name, empty_bools);
}
XlaOp Parameter(XlaBuilder* builder, int64_t parameter_number,
const Shape& shape, const std::string& name,
const std::vector<bool>& replicated_at_leaf_buffers) {
return builder->Parameter(parameter_number, shape, name,
replicated_at_leaf_buffers);
}
XlaOp ConstantLiteral(XlaBuilder* builder, const LiteralSlice& literal) {
return builder->ConstantLiteral(literal);
}
XlaOp Broadcast(const XlaOp operand,
absl::Span<const int64_t> broadcast_sizes) {
return operand.builder()->Broadcast(operand, broadcast_sizes);
}
XlaOp BroadcastInDim(const XlaOp operand,
absl::Span<const int64_t> out_dim_size,
absl::Span<const int64_t> broadcast_dimensions) {
return operand.builder()->BroadcastInDim(operand, out_dim_size,
broadcast_dimensions);
}
XlaOp MhloDynamicReshape(const XlaOp operand, const XlaOp output_shape,
const Shape& shape) {
return operand.builder()->MhloDynamicReshape(operand, output_shape, shape);
}
XlaOp MhloDynamicBroadcastInDim(const XlaOp operand,
const XlaOp output_dimensions,
absl::Span<const int64_t> broadcast_dimensions,
const Shape& output_shape) {
return operand.builder()->MhloDynamicBroadcastInDim(
operand, output_dimensions, broadcast_dimensions, output_shape);
}
XlaOp Copy(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kCopy, operand);
}
XlaOp Pad(const XlaOp operand, const XlaOp padding_value,
const PaddingConfig& padding_config) {
return operand.builder()->Pad(operand, padding_value, padding_config);
}
XlaOp PadInDim(XlaOp operand, XlaOp padding_value, int64_t dimno,
int64_t pad_lo, int64_t pad_hi) {
return operand.builder()->PadInDim(operand, padding_value, dimno, pad_lo,
pad_hi);
}
XlaOp Reshape(const XlaOp operand, absl::Span<const int64_t> dimensions,
absl::Span<const int64_t> new_sizes) {
return operand.builder()->Reshape(operand, dimensions, new_sizes);
}
XlaOp Reshape(const XlaOp operand, absl::Span<const int64_t> new_sizes) {
return operand.builder()->Reshape(operand, new_sizes);
}
XlaOp Reshape(const Shape& shape, XlaOp operand) {
return operand.builder()->Reshape(shape, operand);
}
XlaOp DynamicReshape(XlaOp operand, absl::Span<const XlaOp> dim_sizes,
absl::Span<const int64_t> new_size_bounds,
const std::vector<bool>& dims_are_dynamic) {
return operand.builder()->DynamicReshape(operand, dim_sizes, new_size_bounds,
dims_are_dynamic);
}
XlaOp ReshapeWithInferredDimension(XlaOp operand,
absl::Span<const int64_t> new_sizes,
int64_t inferred_dimension) {
return operand.builder()->Reshape(operand, new_sizes, inferred_dimension);
}
XlaOp Collapse(const XlaOp operand, absl::Span<const int64_t> dimensions) {
return operand.builder()->Collapse(operand, dimensions);
}
XlaOp Slice(const XlaOp operand, absl::Span<const int64_t> start_indices,
absl::Span<const int64_t> limit_indices,
absl::Span<const int64_t> strides) {
return operand.builder()->Slice(operand, start_indices, limit_indices,
strides);
}
XlaOp SliceInDim(const XlaOp operand, int64_t start_index, int64_t limit_index,
int64_t stride, int64_t dimno) {
return operand.builder()->SliceInDim(operand, start_index, limit_index,
stride, dimno);
}
XlaOp DynamicSlice(const XlaOp operand, absl::Span<const XlaOp> start_indices,
absl::Span<const int64_t> slice_sizes) {
return operand.builder()->DynamicSlice(operand, start_indices, slice_sizes);
}
XlaOp DynamicUpdateSlice(const XlaOp operand, const XlaOp update,
absl::Span<const XlaOp> start_indices) {
return operand.builder()->DynamicUpdateSlice(operand, update, start_indices);
}
XlaOp ConcatInDim(XlaBuilder* builder, absl::Span<const XlaOp> operands,
int64_t dimension) {
return builder->ConcatInDim(operands, dimension);
}
XlaOp Select(const XlaOp pred, const XlaOp on_true, const XlaOp on_false) {
return pred.builder()->Select(pred, on_true, on_false);
}
XlaOp Tuple(XlaBuilder* builder, absl::Span<const XlaOp> elements) {
return builder->Tuple(elements);
}
XlaOp GetTupleElement(const XlaOp tuple_data, int64_t index) {
return tuple_data.builder()->GetTupleElement(tuple_data, index);
}
XlaOp Eq(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return Compare(lhs, rhs, broadcast_dimensions, ComparisonDirection::kEq);
}
static XlaOp CompareTotalOrder(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions,
ComparisonDirection comparison_direction) {
auto b = lhs.builder();
return b->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto operand_shape, b->GetShape(lhs));
auto operand_element_type = operand_shape.element_type();
auto compare_type =
primitive_util::IsFloatingPointType(operand_element_type)
? Comparison::Type::kFloatTotalOrder
: Comparison::DefaultComparisonType(operand_element_type);
return Compare(lhs, rhs, broadcast_dimensions, comparison_direction,
compare_type);
});
}
XlaOp EqTotalOrder(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return CompareTotalOrder(lhs, rhs, broadcast_dimensions,
ComparisonDirection::kEq);
}
XlaOp Ne(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return Compare(lhs, rhs, broadcast_dimensions, ComparisonDirection::kNe);
}
XlaOp NeTotalOrder(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return CompareTotalOrder(lhs, rhs, broadcast_dimensions,
ComparisonDirection::kNe);
}
XlaOp Ge(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return Compare(lhs, rhs, broadcast_dimensions, ComparisonDirection::kGe);
}
XlaOp GeTotalOrder(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return CompareTotalOrder(lhs, rhs, broadcast_dimensions,
ComparisonDirection::kGe);
}
XlaOp Gt(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return Compare(lhs, rhs, broadcast_dimensions, ComparisonDirection::kGt);
}
XlaOp GtTotalOrder(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return CompareTotalOrder(lhs, rhs, broadcast_dimensions,
ComparisonDirection::kGt);
}
XlaOp Le(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return Compare(lhs, rhs, broadcast_dimensions, ComparisonDirection::kLe);
}
XlaOp LeTotalOrder(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return CompareTotalOrder(lhs, rhs, broadcast_dimensions,
ComparisonDirection::kLe);
}
XlaOp Lt(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return Compare(lhs, rhs, broadcast_dimensions, ComparisonDirection::kLt);
}
XlaOp LtTotalOrder(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return CompareTotalOrder(lhs, rhs, broadcast_dimensions,
ComparisonDirection::kLt);
}
XlaOp Compare(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions,
ComparisonDirection direction) {
return lhs.builder()->BinaryOp(HloOpcode::kCompare, lhs, rhs,
broadcast_dimensions, direction);
}
XlaOp Compare(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions,
ComparisonDirection direction, Comparison::Type compare_type) {
return lhs.builder()->BinaryOp(HloOpcode::kCompare, lhs, rhs,
broadcast_dimensions, direction, compare_type);
}
XlaOp Compare(const XlaOp lhs, const XlaOp rhs, ComparisonDirection direction) {
return Compare(lhs, rhs, {}, direction);
}
XlaOp Dot(const XlaOp lhs, const XlaOp rhs,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return lhs.builder()->Dot(lhs, rhs, precision_config, preferred_element_type);
}
XlaOp DotGeneral(const XlaOp lhs, const XlaOp rhs,
const DotDimensionNumbers& dimension_numbers,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return lhs.builder()->DotGeneral(lhs, rhs, dimension_numbers,
precision_config, preferred_element_type);
}
XlaOp SparseDot(const XlaOp lhs, const XlaOp rhs,
absl::Span<const XlaOp> sparse_meta,
absl::Span<const SparsityDescriptor> sparsity,
const DotDimensionNumbers& dimension_numbers,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return lhs.builder()->SparseDot(lhs, rhs, sparse_meta, sparsity,
dimension_numbers, precision_config,
preferred_element_type);
}
XlaOp Conv(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> window_strides, Padding padding,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return lhs.builder()->Conv(lhs, rhs, window_strides, padding,
feature_group_count, batch_group_count,
precision_config, preferred_element_type);
}
XlaOp ConvWithGeneralPadding(
const XlaOp lhs, const XlaOp rhs, absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return lhs.builder()->ConvWithGeneralPadding(
lhs, rhs, window_strides, padding, feature_group_count, batch_group_count,
precision_config, preferred_element_type);
}
XlaOp ConvWithGeneralDimensions(
const XlaOp lhs, const XlaOp rhs, absl::Span<const int64_t> window_strides,
Padding padding, const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return lhs.builder()->ConvWithGeneralDimensions(
lhs, rhs, window_strides, padding, dimension_numbers, feature_group_count,
batch_group_count, precision_config, preferred_element_type);
}
XlaOp ConvGeneral(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return lhs.builder()->ConvGeneral(
lhs, rhs, window_strides, padding, dimension_numbers, feature_group_count,
batch_group_count, precision_config, preferred_element_type);
}
XlaOp ConvGeneralDilated(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type,
std::optional<std::vector<bool>> window_reversal) {
return lhs.builder()->ConvGeneralDilated(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
precision_config, preferred_element_type, window_reversal);
}
XlaOp DynamicConvInputGrad(
XlaOp input_sizes, const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config, PaddingType padding_type,
std::optional<PrimitiveType> preferred_element_type) {
return lhs.builder()->DynamicConvInputGrad(
input_sizes, lhs, rhs, window_strides, padding, lhs_dilation,
rhs_dilation, dimension_numbers, feature_group_count, batch_group_count,
precision_config, padding_type, preferred_element_type);
}
XlaOp DynamicConvKernelGrad(
XlaOp activations, XlaOp gradients,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config, PaddingType padding_type,
std::optional<PrimitiveType> preferred_element_type) {
return activations.builder()->DynamicConvKernelGrad(
activations, gradients, window_strides, padding, lhs_dilation,
rhs_dilation, dimension_numbers, feature_group_count, batch_group_count,
precision_config, padding_type, preferred_element_type);
}
XlaOp DynamicConvForward(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
PaddingType padding_type,
std::optional<PrimitiveType> preferred_element_type) {
return lhs.builder()->DynamicConvForward(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
precision_config, padding_type, preferred_element_type);
}
XlaOp Fft(const XlaOp operand, FftType fft_type,
absl::Span<const int64_t> fft_length) {
return operand.builder()->Fft(operand, fft_type, fft_length);
}
XlaOp TriangularSolve(XlaOp a, XlaOp b, bool left_side, bool lower,
bool unit_diagonal,
TriangularSolveOptions::Transpose transpose_a) {
XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* a_shape, builder->GetShapePtr(a));
TF_ASSIGN_OR_RETURN(const Shape* b_shape, builder->GetShapePtr(b));
TriangularSolveOptions options;
options.set_left_side(left_side);
options.set_lower(lower);
options.set_unit_diagonal(unit_diagonal);
options.set_transpose_a(transpose_a);
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferTriangularSolveShape(
*a_shape, *b_shape, options));
return builder->TriangularSolveInternal(shape, a, b, std::move(options));
});
}
XlaOp Cholesky(XlaOp a, bool lower) {
XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* a_shape, builder->GetShapePtr(a));
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferCholeskyShape(*a_shape));
return builder->CholeskyInternal(shape, a, lower);
});
}
XlaOp Infeed(XlaBuilder* builder, const Shape& shape,
const std::string& config) {
return builder->Infeed(shape, config);
}
void Outfeed(const XlaOp operand, const Shape& shape_with_layout,
const std::string& outfeed_config) {
return operand.builder()->Outfeed(operand, shape_with_layout, outfeed_config);
}
XlaOp Call(XlaBuilder* builder, const XlaComputation& computation,
absl::Span<const XlaOp> operands) {
return builder->Call(computation, operands);
}
XlaOp CompositeCall(XlaBuilder* builder, const XlaComputation& computation,
absl::Span<const XlaOp> operands, const std::string& name,
std::optional<absl::string_view> attributes,
std::optional<int64_t> version) {
return builder->CompositeCall(computation, operands, name, attributes,
version);
}
XlaOp CustomCall(
XlaBuilder* builder, const std::string& call_target_name,
absl::Span<const XlaOp> operands, const Shape& shape,
const std::string& opaque, bool has_side_effect,
absl::Span<const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
output_operand_aliasing,
const Literal* literal, CustomCallSchedule schedule,
CustomCallApiVersion api_version) {
return builder->CustomCall(call_target_name, operands, shape, opaque,
std::nullopt,
has_side_effect, output_operand_aliasing, literal,
std::nullopt, std::nullopt,
schedule, api_version);
}
XlaOp CustomCallWithComputation(
XlaBuilder* builder, const std::string& call_target_name,
absl::Span<const XlaOp> operands, const XlaComputation& computation,
const Shape& shape, const std::string& opaque, bool has_side_effect,
absl::Span<const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
output_operand_aliasing,
const Literal* literal, CustomCallSchedule schedule,
CustomCallApiVersion api_version) {
return builder->CustomCall(
call_target_name, operands, computation, shape, opaque,
std::nullopt, has_side_effect,
output_operand_aliasing, literal, schedule, api_version);
}
XlaOp CustomCallWithLayout(
XlaBuilder* builder, const std::string& call_target_name,
absl::Span<const XlaOp> operands, const Shape& shape,
absl::Span<const Shape> operand_shapes_with_layout,
const std::string& opaque, bool has_side_effect,
absl::Span<const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
output_operand_aliasing,
const Literal* literal, CustomCallSchedule schedule,
CustomCallApiVersion api_version) {
return builder->CustomCall(
call_target_name, operands, shape, opaque, operand_shapes_with_layout,
has_side_effect, output_operand_aliasing, literal,
std::nullopt, std::nullopt, schedule, api_version);
}
XlaOp CustomCallWithConvDnums(
XlaBuilder* builder, const std::string& call_target_name,
absl::Span<const XlaOp> operands, const Shape& shape,
absl::Span<const Shape> operand_shapes_with_layout,
const std::string& opaque, bool has_side_effect,
absl::Span<const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
output_operand_aliasing,
const Literal* literal, Window window, ConvolutionDimensionNumbers dnums,
CustomCallSchedule schedule, CustomCallApiVersion api_version) {
std::optional<absl::Span<const Shape>> maybe_operand_shapes;
if (!operand_shapes_with_layout.empty()) {
maybe_operand_shapes = operand_shapes_with_layout;
}
return builder->CustomCall(call_target_name, operands, shape, opaque,
maybe_operand_shapes, has_side_effect,
output_operand_aliasing, literal, window, dnums,
schedule, api_version);
}
XlaOp OptimizationBarrier(XlaOp operand) {
return operand.builder()->OptimizationBarrier(operand);
}
XlaOp Complex(const XlaOp real, const XlaOp imag,
absl::Span<const int64_t> broadcast_dimensions) {
return real.builder()->BinaryOp(HloOpcode::kComplex, real, imag,
broadcast_dimensions);
}
XlaOp Conj(const XlaOp operand) {
return Complex(Real(operand), Neg(Imag(operand)));
}
XlaOp Add(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kAdd, lhs, rhs,
broadcast_dimensions);
}
XlaOp Sub(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kSubtract, lhs, rhs,
broadcast_dimensions);
}
XlaOp Mul(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kMultiply, lhs, rhs,
broadcast_dimensions);
}
XlaOp Div(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kDivide, lhs, rhs,
broadcast_dimensions);
}
XlaOp Rem(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kRemainder, lhs, rhs,
broadcast_dimensions);
}
XlaOp Max(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kMaximum, lhs, rhs,
broadcast_dimensions);
}
XlaOp Min(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kMinimum, lhs, rhs,
broadcast_dimensions);
}
XlaOp And(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kAnd, lhs, rhs,
broadcast_dimensions);
}
XlaOp Or(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kOr, lhs, rhs,
broadcast_dimensions);
}
XlaOp Xor(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kXor, lhs, rhs,
broadcast_dimensions);
}
XlaOp Not(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kNot, operand);
}
XlaOp PopulationCount(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kPopulationCount, operand);
}
XlaOp ShiftLeft(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kShiftLeft, lhs, rhs,
broadcast_dimensions);
}
XlaOp ShiftRightArithmetic(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kShiftRightArithmetic, lhs, rhs,
broadcast_dimensions);
}
XlaOp ShiftRightLogical(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kShiftRightLogical, lhs, rhs,
broadcast_dimensions);
}
XlaOp Reduce(const XlaOp operand, const XlaOp init_value,
const XlaComputation& computation,
absl::Span<const int64_t> dimensions_to_reduce) {
return operand.builder()->Reduce(operand, init_value, computation,
dimensions_to_reduce);
}
XlaOp Reduce(XlaBuilder* builder, absl::Span<const XlaOp> operands,
absl::Span<const XlaOp> init_values,
const XlaComputation& computation,
absl::Span<const int64_t> dimensions_to_reduce) {
return builder->Reduce(operands, init_values, computation,
dimensions_to_reduce);
}
XlaOp ReduceAll(const XlaOp operand, const XlaOp init_value,
const XlaComputation& computation) {
return operand.builder()->ReduceAll(operand, init_value, computation);
}
XlaOp ReduceWindow(const XlaOp operand, const XlaOp init_value,
const XlaComputation& computation,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides, Padding padding) {
return operand.builder()->ReduceWindow(operand, init_value, computation,
window_dimensions, window_strides,
padding);
}
XlaOp ReduceWindow(absl::Span<const XlaOp> operands,
absl::Span<const XlaOp> init_values,
const XlaComputation& computation,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides, Padding padding) {
CHECK(!operands.empty());
return operands[0].builder()->ReduceWindow(operands, init_values, computation,
window_dimensions, window_strides,
padding);
}
XlaOp ReduceWindowWithGeneralPadding(
const XlaOp operand, const XlaOp init_value,
const XlaComputation& computation,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
absl::Span<const int64_t> base_dilations,
absl::Span<const int64_t> window_dilations,
absl::Span<const std::pair<int64_t, int64_t>> padding) {
return operand.builder()->ReduceWindowWithGeneralPadding(
absl::MakeSpan(&operand, 1), absl::MakeSpan(&init_value, 1), computation,
window_dimensions, window_strides, base_dilations, window_dilations,
padding);
}
XlaOp ReduceWindowWithGeneralPadding(
absl::Span<const XlaOp> operands, absl::Span<const XlaOp> init_values,
const XlaComputation& computation,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
absl::Span<const int64_t> base_dilations,
absl::Span<const int64_t> window_dilations,
absl::Span<const std::pair<int64_t, int64_t>> padding) {
CHECK(!operands.empty());
return operands[0].builder()->ReduceWindowWithGeneralPadding(
operands, init_values, computation, window_dimensions, window_strides,
base_dilations, window_dilations, padding);
}
XlaOp AllGather(const XlaOp operand, int64_t all_gather_dimension,
int64_t shard_count,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Layout>& layout,
const std::optional<bool> use_global_device_ids) {
return operand.builder()->AllGather(operand, all_gather_dimension,
shard_count, replica_groups, channel_id,
layout, use_global_device_ids);
}
XlaOp AllGatherTuple(absl::Span<const XlaOp> operands,
int64_t all_gather_dimension, int64_t shard_count,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Layout>& layout,
const std::optional<bool> use_global_device_ids) {
CHECK(!operands.empty());
return operands[0].builder()->AllGather(
operands[0].builder()->Tuple(operands), all_gather_dimension, shard_count,
replica_groups, channel_id, layout, use_global_device_ids);
}
XlaOp CrossReplicaSum(const XlaOp operand,
absl::Span<const ReplicaGroup> replica_groups) {
return operand.builder()->CrossReplicaSum(operand, replica_groups);
}
XlaOp AllReduce(const XlaOp operand, const XlaComputation& computation,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Shape>& shape_with_layout,
const std::optional<bool> use_global_device_ids) {
return operand.builder()->AllReduce(operand, computation, replica_groups,
channel_id, shape_with_layout,
use_global_device_ids);
}
XlaOp AllReduceTuple(absl::Span<const XlaOp> operands,
const XlaComputation& computation,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Shape>& shape_with_layout,
const std::optional<bool> use_global_device_ids) {
CHECK(!operands.empty());
return operands[0].builder()->AllReduce(
operands[0].builder()->Tuple(operands), computation, replica_groups,
channel_id, shape_with_layout, use_global_device_ids);
}
XlaOp ReduceScatter(const XlaOp operand, const XlaComputation& computation,
int64_t scatter_dimension, int64_t shard_count,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Layout>& layout,
const std::optional<bool> use_global_device_ids) {
return operand.builder()->ReduceScatter(
operand, computation, scatter_dimension, shard_count, replica_groups,
channel_id, layout, use_global_device_ids);
}
XlaOp AllToAll(const XlaOp operand, int64_t split_dimension,
int64_t concat_dimension, int64_t split_count,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<Layout>& layout,
const std::optional<ChannelHandle>& channel_id) {
return operand.builder()->AllToAll(operand, split_dimension, concat_dimension,
split_count, replica_groups, layout,
channel_id);
}
XlaOp AllToAllTuple(absl::Span<const XlaOp> operands,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<Layout>& layout,
const std::optional<ChannelHandle>& channel_id) {
CHECK(!operands.empty());
return operands[0].builder()->AllToAllTuple(operands, replica_groups, layout,
channel_id);
}
XlaOp AllToAllTuple(const XlaOp operand, int64_t split_dimension,
int64_t concat_dimension, int64_t split_count,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<Layout>& layout,
const std::optional<ChannelHandle>& channel_id) {
return operand.builder()->AllToAllTuple(operand, split_dimension,
concat_dimension, split_count,
replica_groups, layout, channel_id);
}
XlaOp CollectiveBroadcast(const XlaOp operand,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id) {
return operand.builder()->CollectiveBroadcast(operand, replica_groups,
channel_id);
}
XlaOp CollectivePermute(
const XlaOp operand,
const std::vector<std::pair<int64_t, int64_t>>& source_target_pairs,
const std::optional<ChannelHandle>& channel_id) {
return operand.builder()->CollectivePermute(operand, source_target_pairs,
channel_id);
}
XlaOp ReplicaId(XlaBuilder* builder) { return builder->ReplicaId(); }
XlaOp SelectAndScatter(const XlaOp operand, const XlaComputation& select,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
Padding padding, const XlaOp source,
const XlaOp init_value, const XlaComputation& scatter) {
return operand.builder()->SelectAndScatter(operand, select, window_dimensions,
window_strides, padding, source,
init_value, scatter);
}
XlaOp SelectAndScatterWithGeneralPadding(
const XlaOp operand, const XlaComputation& select,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding, const XlaOp source,
const XlaOp init_value, const XlaComputation& scatter) {
return operand.builder()->SelectAndScatterWithGeneralPadding(
operand, select, window_dimensions, window_strides, padding, source,
init_value, scatter);
}
XlaOp Abs(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kAbs, operand);
}
XlaOp Atan2(const XlaOp y, const XlaOp x,
absl::Span<const int64_t> broadcast_dimensions) {
return y.builder()->BinaryOp(HloOpcode::kAtan2, y, x, broadcast_dimensions);
}
XlaOp Exp(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kExp, operand);
}
XlaOp Expm1(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kExpm1, operand);
}
XlaOp Floor(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kFloor, operand);
}
XlaOp Ceil(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kCeil, operand);
}
XlaOp Round(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kRoundNearestAfz, operand);
}
XlaOp RoundNearestEven(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kRoundNearestEven, operand);
}
XlaOp Log(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kLog, operand);
}
XlaOp Log1p(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kLog1p, operand);
}
XlaOp Erf(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kErf, operand);
}
XlaOp Logistic(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kLogistic, operand);
}
XlaOp Sign(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kSign, operand);
}
XlaOp Clz(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kClz, operand);
}
XlaOp Cos(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kCos, operand);
}
XlaOp Sin(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kSin, operand);
}
XlaOp Tan(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kTan, operand);
}
XlaOp Tanh(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kTanh, operand);
}
XlaOp Real(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kReal, operand);
}
XlaOp Imag(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kImag, operand);
}
XlaOp Sqrt(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kSqrt, operand);
}
XlaOp Cbrt(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kCbrt, operand);
}
XlaOp Rsqrt(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kRsqrt, operand);
}
XlaOp Pow(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kPower, lhs, rhs,
broadcast_dimensions);
}
XlaOp IsFinite(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kIsFinite, operand);
}
XlaOp ConvertElementType(const XlaOp operand, PrimitiveType new_element_type) {
return operand.builder()->ConvertElementType(operand, new_element_type);
}
XlaOp BitcastConvertType(const XlaOp operand, PrimitiveType new_element_type) {
return operand.builder()->BitcastConvertType(operand, new_element_type);
}
XlaOp StochasticConvertType(const XlaOp operand, const XlaOp random,
PrimitiveType new_element_type) {
return operand.builder()->StochasticConvertType(operand, random,
new_element_type);
}
XlaOp Neg(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kNegate, operand);
}
XlaOp Transpose(const XlaOp operand, absl::Span<const int64_t> permutation) {
return operand.builder()->Transpose(operand, permutation);
}
XlaOp Rev(const XlaOp operand, absl::Span<const int64_t> dimensions) {
return operand.builder()->Rev(operand, dimensions);
}
XlaOp Sort(absl::Span<const XlaOp> operands, const XlaComputation& comparator,
int64_t dimension, bool is_stable) {
return operands[0].builder()->Sort(operands, comparator, dimension,
is_stable);
}
XlaOp TopK(XlaOp operand, int64_t k, bool largest) {
return operand.builder()->TopK(operand, k, largest);
}
XlaOp Clamp(const XlaOp min, const XlaOp operand, const XlaOp max) {
return min.builder()->Clamp(min, operand, max);
}
XlaOp Map(XlaBuilder* builder, absl::Span<const XlaOp> operands,
const XlaComputation& computation,
absl::Span<const int64_t> dimensions,
absl::Span<const XlaOp> static_operands) {
return builder->Map(operands, computation, dimensions, static_operands);
}
XlaOp RngNormal(const XlaOp mu, const XlaOp sigma, const Shape& shape) {
return mu.builder()->RngNormal(mu, sigma, shape);
}
XlaOp RngUniform(const XlaOp a, const XlaOp b, const Shape& shape) {
return a.builder()->RngUniform(a, b, shape);
}
XlaOp RngBitGenerator(RandomAlgorithm algorithm, const XlaOp initial_state,
const Shape& shape) {
return initial_state.builder()->RngBitGenerator(algorithm, initial_state,
shape);
}
XlaOp While(const XlaComputation& condition, const XlaComputation& body,
const XlaOp init) {
return init.builder()->While(condition, body, init);
}
XlaOp Conditional(const XlaOp predicate, const XlaOp true_operand,
const XlaComputation& true_computation,
const XlaOp false_operand,
const XlaComputation& false_computation) {
return predicate.builder()->Conditional(predicate, true_operand,
true_computation, false_operand,
false_computation);
}
XlaOp Conditional(const XlaOp branch_index,
absl::Span<const XlaComputation* const> branch_computations,
absl::Span<const XlaOp> branch_operands) {
return branch_index.builder()->Conditional(branch_index, branch_computations,
branch_operands);
}
XlaOp ReducePrecision(const XlaOp operand, const int exponent_bits,
const int mantissa_bits) {
return operand.builder()->ReducePrecision(operand, exponent_bits,
mantissa_bits);
}
XlaOp Gather(const XlaOp input, const XlaOp start_indices,
const GatherDimensionNumbers& dimension_numbers,
absl::Span<const int64_t> slice_sizes, bool indices_are_sorted) {
return input.builder()->Gather(input, start_indices, dimension_numbers,
slice_sizes, indices_are_sorted);
}
XlaOp Scatter(const XlaOp input, const XlaOp scatter_indices,
const XlaOp updates, const XlaComputation& update_computation,
const ScatterDimensionNumbers& dimension_numbers,
bool indices_are_sorted, bool unique_indices) {
return input.builder()->Scatter(input, scatter_indices, updates,
update_computation, dimension_numbers,
indices_are_sorted, unique_indices);
}
XlaOp Scatter(absl::Span<const XlaOp> inputs, XlaOp scatter_indices,
absl::Span<const XlaOp> updates,
const XlaComputation& update_computation,
const ScatterDimensionNumbers& dimension_numbers,
bool indices_are_sorted, bool unique_indices) {
return scatter_indices.builder()->Scatter(
inputs, scatter_indices, updates, update_computation, dimension_numbers,
indices_are_sorted, unique_indices);
}
void Send(const XlaOp operand, const ChannelHandle& handle) {
return operand.builder()->Send(operand, handle);
}
XlaOp Recv(XlaBuilder* builder, const Shape& shape,
const ChannelHandle& handle) {
return builder->Recv(shape, handle);
}
XlaOp SendWithToken(const XlaOp operand, const XlaOp token,
const ChannelHandle& handle) {
return operand.builder()->SendWithToken(operand, token, handle);
}
XlaOp RecvWithToken(const XlaOp token, const Shape& shape,
const ChannelHandle& handle) {
return token.builder()->RecvWithToken(token, shape, handle);
}
XlaOp SendToHost(const XlaOp operand, const XlaOp token,
const Shape& shape_with_layout, const ChannelHandle& handle) {
return operand.builder()->SendToHost(operand, token, shape_with_layout,
handle);
}
XlaOp RecvFromHost(const XlaOp token, const Shape& shape,
const ChannelHandle& handle) {
return token.builder()->RecvFromHost(token, shape, handle);
}
XlaOp InfeedWithToken(const XlaOp token, const Shape& shape,
const std::string& config) {
return token.builder()->InfeedWithToken(token, shape, config);
}
XlaOp OutfeedWithToken(const XlaOp operand, const XlaOp token,
const Shape& shape_with_layout,
const std::string& outfeed_config) {
return operand.builder()->OutfeedWithToken(operand, token, shape_with_layout,
outfeed_config);
}
XlaOp CreateToken(XlaBuilder* builder) { return builder->CreateToken(); }
XlaOp AfterAll(XlaBuilder* builder, absl::Span<const XlaOp> tokens) {
return builder->AfterAll(tokens);
}
XlaOp BatchNormTraining(const XlaOp operand, const XlaOp scale,
const XlaOp offset, float epsilon,
int64_t feature_index) {
return operand.builder()->BatchNormTraining(operand, scale, offset, epsilon,
feature_index);
}
XlaOp BatchNormInference(const XlaOp operand, const XlaOp scale,
const XlaOp offset, const XlaOp mean,
const XlaOp variance, float epsilon,
int64_t feature_index) {
return operand.builder()->BatchNormInference(
operand, scale, offset, mean, variance, epsilon, feature_index);
}
XlaOp BatchNormGrad(const XlaOp operand, const XlaOp scale,
const XlaOp batch_mean, const XlaOp batch_var,
const XlaOp grad_output, float epsilon,
int64_t feature_index) {
return operand.builder()->BatchNormGrad(operand, scale, batch_mean, batch_var,
grad_output, epsilon, feature_index);
}
XlaOp Iota(XlaBuilder* builder, PrimitiveType type, int64_t size) {
return builder->Iota(type, size);
}
XlaOp Iota(XlaBuilder* builder, const Shape& shape, int64_t iota_dimension) {
return builder->Iota(shape, iota_dimension);
}
XlaOp GetDimensionSize(const XlaOp operand, int64_t dimension) {
return operand.builder()->GetDimensionSize(operand, dimension);
}
XlaOp SetDimensionSize(const XlaOp operand, const XlaOp val,
int64_t dimension) {
return operand.builder()->SetDimensionSize(operand, val, dimension);
}
XlaOp RemoveDynamicDimension(const XlaOp operand, int64_t dimension) {
return operand.builder()->RemoveDynamicDimension(operand, dimension);
}
OpSharding GetManualSharding(const OpSharding& original, int64_t single_dim) {
OpSharding manual;
if (single_dim < 0 || original.type() != OpSharding::OTHER) {
manual.set_type(OpSharding::MANUAL);
return manual;
}
manual.set_type(OpSharding::OTHER);
std::vector<int64_t> new_tile_shape(
original.tile_assignment_dimensions().begin(),
original.tile_assignment_dimensions().end());
new_tile_shape.push_back(new_tile_shape[single_dim]);
new_tile_shape[single_dim] = 1;
Array<int64_t> new_tile(new_tile_shape);
new_tile.Each([&](absl::Span<const int64_t> indices, int64_t* v) {
int64_t src_index = 0;
for (int64_t i = 0; i < indices.size() - 1; ++i) {
if (i > 0) {
src_index *= new_tile_shape[i];
}
int64_t index = indices[i];
if (i == single_dim) {
index = indices.back();
}
src_index += index;
}
*v = original.tile_assignment_devices(src_index);
});
for (int64_t dim : new_tile_shape) {
manual.add_tile_assignment_dimensions(dim);
}
for (int64_t device : new_tile) {
manual.add_tile_assignment_devices(device);
}
if (original.replicate_on_last_tile_dim()) {
manual.add_last_tile_dims(OpSharding::REPLICATED);
}
for (int64_t type : original.last_tile_dims()) {
manual.add_last_tile_dims(static_cast<OpSharding::Type>(type));
}
manual.add_last_tile_dims(OpSharding::MANUAL);
return manual;
}
absl::StatusOr<XlaOp> ConvertSpmdFullToShardShape(
XlaBuilder* builder, XlaOp input, int single_dim,
const OpSharding& manual_sharding,
absl::Span<const int64_t> unspecified_dims) {
TF_ASSIGN_OR_RETURN(const Shape input_shape, builder->GetShape(input));
Shape output_shape = input_shape;
const int64_t rank = output_shape.rank();
if (manual_sharding.type() == OpSharding::OTHER) {
for (int64_t i = 0; i < rank; ++i) {
if (single_dim >= 0 && i != single_dim) {
continue;
}
const int64_t partitions_i =
manual_sharding.tile_assignment_dimensions(i);
if (partitions_i == 1) continue;
const int64_t dim_size =
CeilOfRatio(output_shape.dimensions(i), partitions_i);
output_shape.set_dimensions(i, dim_size);
}
}
XlaOp input_annotation;
{
XlaScopedShardingAssignment assign_sharding(builder, manual_sharding);
input_annotation = CustomCall(
builder, "Sharding", {input}, input_shape,
sharding_op_util::EncodeAttributes(unspecified_dims));
}
{
OpSharding manual = GetManualSharding(manual_sharding, single_dim);
XlaScopedShardingAssignment assign_sharding(builder, manual);
return CustomCall(builder,
"SPMDFullToShardShape",
{input_annotation}, output_shape,
sharding_op_util::EncodeAttributes(unspecified_dims));
}
}
absl::StatusOr<XlaOp> ConvertSpmdShardToFullShape(
XlaBuilder* builder, XlaOp input, const Shape& output_shape, int single_dim,
const OpSharding& manual_sharding,
absl::Span<const int64_t> unspecified_dims) {
TF_ASSIGN_OR_RETURN(const Shape input_shape, builder->GetShape(input));
XlaOp input_annotation;
{
OpSharding manual = GetManualSharding(manual_sharding, single_dim);
XlaScopedShardingAssignment assign_sharding(builder, manual);
input_annotation = CustomCall(
builder, "Sharding", {input}, input_shape,
sharding_op_util::EncodeAttributes(unspecified_dims));
}
{
XlaScopedShardingAssignment assign_sharding(builder, manual_sharding);
return CustomCall(builder,
"SPMDShardToFullShape",
{input_annotation}, output_shape,
sharding_op_util::EncodeAttributes(unspecified_dims));
}
}
} | #include "xla/hlo/builder/xla_builder.h"
#include <algorithm>
#include <array>
#include <complex>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/builder/padding.h"
#include "xla/hlo/builder/sharding_builder.h"
#include "xla/hlo/builder/value_inference.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/layout_util.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = ::xla::match;
using ::testing::_;
using ::testing::HasSubstr;
using ::testing::Test;
using ::tsl::testing::StatusIs;
HloInstruction* GetRoot(HloModule& module) {
return module.entry_computation()->root_instruction();
}
absl::StatusOr<std::unique_ptr<HloModule>> BuildHloModule(XlaBuilder& b) {
TF_ASSIGN_OR_RETURN(XlaComputation computation,
b.Build(false));
const HloModuleProto& proto = computation.proto();
TF_ASSIGN_OR_RETURN(const auto& config,
HloModule::CreateModuleConfigFromProto(
proto, GetDebugOptionsFromFlags()));
return HloModule::CreateFromProto(proto, config);
}
absl::StatusOr<std::unique_ptr<HloModule>> BuildHloModule(XlaBuilder& b,
XlaOp root) {
TF_ASSIGN_OR_RETURN(XlaComputation computation,
b.Build(root, false));
const HloModuleProto& proto = computation.proto();
TF_ASSIGN_OR_RETURN(const auto& config,
HloModule::CreateModuleConfigFromProto(
proto, GetDebugOptionsFromFlags()));
return HloModule::CreateFromProto(proto, config);
}
std::string TestName() {
return ::testing::UnitTest::GetInstance()->current_test_info()->name();
}
TEST(XlaBuilderTest, OnePlusTwo) {
XlaBuilder b(TestName());
Add(ConstantR0<float>(&b, 1.0), ConstantR0<float>(&b, 2.0));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Add(m::Constant(), m::Constant())));
}
TEST(XlaBuilderTest, UnaryOperatorsBuildExpectedHLO) {
auto test_unary_operator = [&](std::function<XlaOp(XlaOp)> op,
auto matches_pattern) {
XlaBuilder b(TestName());
op(ConstantR0<int32_t>(&b, 1));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module), matches_pattern);
};
test_unary_operator([](XlaOp x) { return -x; },
GmockMatch(m::Negate(m::Constant())));
test_unary_operator([](XlaOp x) { return ~x; },
GmockMatch(m::Not(m::Constant())));
}
TEST(XlaBuilderTest, BinaryOperatorsBuildExpectedHLO) {
auto test_binary_operator = [&](std::function<XlaOp(XlaOp, XlaOp)> op,
auto matches_pattern) {
XlaBuilder b(TestName());
op(ConstantR0<int32_t>(&b, 1), ConstantR0<int32_t>(&b, 2));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module), matches_pattern);
};
test_binary_operator([](XlaOp x, XlaOp y) { return x + y; },
GmockMatch(m::Add(m::Constant(), m::Constant())));
test_binary_operator([](XlaOp x, XlaOp y) { return x - y; },
GmockMatch(m::Subtract(m::Constant(), m::Constant())));
test_binary_operator([](XlaOp x, XlaOp y) { return x * y; },
GmockMatch(m::Multiply(m::Constant(), m::Constant())));
test_binary_operator([](XlaOp x, XlaOp y) { return x / y; },
GmockMatch(m::Divide(m::Constant(), m::Constant())));
test_binary_operator([](XlaOp x, XlaOp y) { return x & y; },
GmockMatch(m::And(m::Constant(), m::Constant())));
test_binary_operator([](XlaOp x, XlaOp y) { return x | y; },
GmockMatch(m::Or(m::Constant(), m::Constant())));
test_binary_operator([](XlaOp x, XlaOp y) { return x ^ y; },
GmockMatch(m::Xor(m::Constant(), m::Constant())));
test_binary_operator([](XlaOp x, XlaOp y) { return x << y; },
GmockMatch(m::ShiftLeft(m::Constant(), m::Constant())));
test_binary_operator(
[](XlaOp x, XlaOp y) { return x >> y; },
GmockMatch(m::ShiftRightArithmetic(m::Constant(), m::Constant())));
auto test_unsigned_binary_operator =
[&](std::function<XlaOp(XlaOp, XlaOp)> op, auto matches_pattern) {
XlaBuilder b(TestName());
op(ConstantR0<uint32_t>(&b, 1), ConstantR0<uint32_t>(&b, 2));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module), matches_pattern);
};
test_unsigned_binary_operator(
[](XlaOp x, XlaOp y) { return x >> y; },
GmockMatch(m::ShiftRightLogical(m::Constant(), m::Constant())));
}
TEST(XlaBuilderTest, VariadicAnd) {
XlaBuilder b(TestName());
const Shape s = ShapeUtil::MakeShape(PRED, {});
And(Parameter(&b, 0, s, "p0"), Parameter(&b, 1, s, "p1"),
Parameter(&b, 2, s, "p2"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
::testing::AnyOf(
GmockMatch(m::And(m::Parameter(0),
m::And(m::Parameter(1), m::Parameter(2)))),
GmockMatch(m::And(m::And(m::Parameter(0), m::Parameter(1)),
m::Parameter(2)))));
}
TEST(XlaBuilderTest, VariadicOr) {
XlaBuilder b(TestName());
const Shape s = ShapeUtil::MakeShape(PRED, {});
Or(Parameter(&b, 0, s, "p0"), Parameter(&b, 1, s, "p1"),
Parameter(&b, 2, s, "p2"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
::testing::AnyOf(
GmockMatch(m::Or(m::Parameter(0),
m::Or(m::Parameter(1), m::Parameter(2)))),
GmockMatch(m::Or(m::Or(m::Parameter(0), m::Parameter(1)),
m::Parameter(2)))));
}
TEST(XlaBuilderTest, ShiftRightOperatorOnNonIntegerProducesError) {
XlaBuilder b(TestName());
ConstantR0<float>(&b, 1) >> ConstantR0<float>(&b, 2);
auto statusor = b.Build();
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(
statusor.status().message(),
HasSubstr("Argument to >> operator does not have an integral type"));
}
TEST(XlaBuilderTest, ParamPlusConstantHasScalarBroadcast) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {3, 5}), "x");
Add(x, ConstantR0<float>(&b, 1.0));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Add(m::Parameter(), m::Broadcast(m::Constant()))));
}
TEST(XlaBuilderTest, ParamPlusConstantHasScalarBroadcastReversed) {
XlaBuilder b(TestName());
const XlaOp x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {3, 5}), "x");
Add(ConstantR0<float>(&b, 1.0), x);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Add(m::Broadcast(m::Constant()), m::Parameter())));
}
TEST(XlaBuilderTest, ParamPlusParamHasBroadcast) {
XlaBuilder b(TestName());
const auto& x_shape = ShapeUtil::MakeShape(S32, {2, 4, 6});
const auto& y_shape = ShapeUtil::MakeShape(S32, {2, 4});
auto x = Parameter(&b, 0, x_shape, "x");
auto y = Parameter(&b, 1, y_shape, "y");
auto add = Add(x, y, {0, 1});
TF_ASSERT_OK_AND_ASSIGN(const auto add_shape, b.GetShape(add));
EXPECT_TRUE(ShapeUtil::Equal(add_shape, x_shape));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(
GetRoot(*module),
GmockMatch(m::Add(m::Parameter(0), m::Broadcast(m::Parameter(1)))));
}
TEST(XlaBuilderTest, XPlusX) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(S32, {1, 3, 5, 7}), "x");
Add(x, x);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Add(m::Parameter(0), m::Parameter(0))));
}
TEST(XlaBuilderTest, TestBinaryOpImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[1]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[2, 2]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[2,2]"));
Add(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
{1});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, TestBinaryOpImplicitBroadcastBounded) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[1]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[<=2, <=2]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[<=2, <=2]"));
Add(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
{1});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, ShapeInferenceError) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(U32, {2, 4, 6}), "x");
auto y = Parameter(&b, 1, ShapeUtil::MakeShape(U32, {2, 4}), "y");
Add(x, y);
auto statusor = BuildHloModule(b);
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(),
HasSubstr("Shapes must be equal rank"));
}
TEST(XlaBuilderTest, DynamicDimensionReshapeToR0) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {1}), "x");
auto y = Parameter(&b, 1, ShapeUtil::MakeShape(S32, {}), "dyn_dim");
auto dx = SetDimensionSize(x, y, 0);
Reshape(dx, {});
auto statusor = BuildHloModule(b);
ASSERT_TRUE(statusor.ok());
}
TEST(XlaBuilderTest, ParameterAlreadyRegistered) {
XlaBuilder b_call("add");
Parameter(&b_call, 0, ShapeUtil::MakeShape(PRED, {}), "x");
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(PRED, {}), "x");
auto y = Parameter(&b, 0, ShapeUtil::MakeShape(PRED, {}), "y");
Add(x, y);
auto statusor = BuildHloModule(b);
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(),
HasSubstr("parameter 0 already registered"));
}
TEST(XlaBuilderTest, Call) {
XlaBuilder b_call("the_only_to_apply");
auto p0 = Parameter(&b_call, 0, ShapeUtil::MakeShape(F32, {}), "p0");
auto p1 = Parameter(&b_call, 1, ShapeUtil::MakeShape(F32, {}), "p1");
Add(p0, p1);
TF_ASSERT_OK_AND_ASSIGN(const auto call, b_call.Build());
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {}), "x");
auto y = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {}), "y");
auto one = ConstantR0<float>(&b, 1);
auto two = ConstantR0<float>(&b, 2);
Add(Call(&b, call, {x, y}), Call(&b, call, {one, two}));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Add(m::Call(m::Parameter(), m::Parameter()),
m::Call(m::Constant(), m::Constant()))));
}
TEST(XlaBuilderTest, CompositeCall) {
XlaBuilder b(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {});
const Shape expected = ShapeUtil::MakeTupleShape({shape, shape, shape});
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, shape, "arg0"), Parameter(&bsum, 1, shape, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(const XlaComputation computation, bsum.Build());
std::vector<XlaOp> operands = {Parameter(&b, 0, shape, "arg0"),
Parameter(&b, 1, shape, "arg1")};
CompositeCall(&b, computation, absl::MakeSpan(operands),
"foo.bar",
"{n = 1 : i32, tensor = dense<1> : tensor<i32>}",
1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Call(m::Parameter(), m::Parameter())));
}
TEST(XlaBuilderTest, CompositeCallFrontendAttributesStayLocal) {
XlaBuilder b(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {});
const Shape expected = ShapeUtil::MakeTupleShape({shape, shape, shape});
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, shape, "arg0"), Parameter(&bsum, 1, shape, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(const XlaComputation computation, bsum.Build());
std::vector<XlaOp> operands = {Parameter(&b, 0, shape, "arg0"),
Parameter(&b, 1, shape, "arg1")};
CompositeCall(&b, computation, absl::MakeSpan(operands),
"foo.bar",
"{n = 1 : i32, tensor = dense<1> : tensor<i32>}",
1);
Add(operands[0], operands[1]);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_TRUE(GetRoot(*module)->frontend_attributes().map().empty());
}
TEST(XlaBuilderTest, CompositeCallMissingName) {
XlaBuilder b(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {});
const Shape expected = ShapeUtil::MakeTupleShape({shape, shape, shape});
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, shape, "arg0"), Parameter(&bsum, 1, shape, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(const XlaComputation computation, bsum.Build());
std::vector<XlaOp> operands = {Parameter(&b, 0, shape, "arg0"),
Parameter(&b, 1, shape, "arg1")};
CompositeCall(&b, computation, absl::MakeSpan(operands), "",
"{n = 1 : i32, tensor = dense<1> : tensor<i32>}",
1);
auto statusor = BuildHloModule(b);
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(),
HasSubstr("A composite call op must have frontend attributes "
"with key composite.name whose value is non-empty"));
}
TEST(XlaBuilderTest, CompositeCallMissingAttribute) {
XlaBuilder b(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {});
const Shape expected = ShapeUtil::MakeTupleShape({shape, shape, shape});
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, shape, "arg0"), Parameter(&bsum, 1, shape, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(const XlaComputation computation, bsum.Build());
std::vector<XlaOp> operands = {Parameter(&b, 0, shape, "arg0"),
Parameter(&b, 1, shape, "arg1")};
CompositeCall(&b, computation, absl::MakeSpan(operands), "foo.bar",
"", 1);
auto statusor = BuildHloModule(b);
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(
statusor.status().message(),
HasSubstr(
"A composite call op must have frontend attributes with key "
"composite.attributes whose value is default: {} or non-empty"));
}
TEST(XlaBuilderTest, CompositeCallNonNegativeVersion) {
XlaBuilder b(TestName());
FrontendAttributes frontend_attributes = b.frontend_attributes();
frontend_attributes.mutable_map()->insert({"foo", "bar"});
b.SetFrontendAttributes(frontend_attributes);
const Shape shape = ShapeUtil::MakeShape(F32, {});
const Shape expected = ShapeUtil::MakeTupleShape({shape, shape, shape});
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, shape, "arg0"), Parameter(&bsum, 1, shape, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(const XlaComputation computation, bsum.Build());
std::vector<XlaOp> operands = {Parameter(&b, 0, shape, "arg0"),
Parameter(&b, 1, shape, "arg1")};
CompositeCall(&b, computation, absl::MakeSpan(operands),
"foo.bar",
"{n = 1 : i32, tensor = dense<1> : tensor<i32>}",
-1);
auto statusor = BuildHloModule(b);
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(),
HasSubstr("A composite call op must have frontend attributes "
"with a composite.version whose value is a "
"non-negative integer but got: -1"));
}
TEST(XlaBuilderTest, CompositeCallOptionalVersionAndAttribute) {
XlaBuilder b(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {});
const Shape expected = ShapeUtil::MakeTupleShape({shape, shape, shape});
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, shape, "arg0"), Parameter(&bsum, 1, shape, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(const XlaComputation computation, bsum.Build());
std::vector<XlaOp> operands = {Parameter(&b, 0, shape, "arg0"),
Parameter(&b, 1, shape, "arg1")};
CompositeCall(&b, computation, absl::MakeSpan(operands), "foo.bar");
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
ASSERT_THAT(GetRoot(*module),
GmockMatch(m::Call(m::Parameter(), m::Parameter())));
ASSERT_TRUE(GetRoot(*module)->frontend_attributes().map().contains(
"composite.attributes"));
EXPECT_EQ(
GetRoot(*module)->frontend_attributes().map().at("composite.attributes"),
"{}");
EXPECT_EQ(
GetRoot(*module)->frontend_attributes().map().at("composite.version"),
"0");
}
TEST(XlaBuilderTest, CompositeCallWithExtraFrontendAttributes) {
XlaBuilder b(TestName());
FrontendAttributes frontend_attributes = b.frontend_attributes();
frontend_attributes.mutable_map()->insert({"foo", "bar"});
b.SetFrontendAttributes(frontend_attributes);
const Shape shape = ShapeUtil::MakeShape(F32, {});
const Shape expected = ShapeUtil::MakeTupleShape({shape, shape, shape});
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, shape, "arg0"), Parameter(&bsum, 1, shape, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(const XlaComputation computation, bsum.Build());
std::vector<XlaOp> operands = {Parameter(&b, 0, shape, "arg0"),
Parameter(&b, 1, shape, "arg1")};
CompositeCall(&b, computation, absl::MakeSpan(operands),
"foo.bar",
"{n = 1 : i32, tensor = dense<1> : tensor<i32>}",
1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Call(m::Parameter(), m::Parameter())));
ASSERT_TRUE(GetRoot(*module)->frontend_attributes().map().contains("foo"));
EXPECT_EQ(GetRoot(*module)->frontend_attributes().map().at("foo"), "bar");
}
TEST(XlaBuilderTest, BinopHasDegenerateBroadcast) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {1, 2, 3}), "x");
auto y = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {1, 2, 1}), "y");
Add(x, y);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Add(m::Parameter(0),
m::Broadcast(m::Reshape(m::Parameter(1))))));
}
TEST(XlaBuilderTest, BinopHasInDimAndDegenerateBroadcast) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {2, 3}), "x");
auto y = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {2, 1, 4}), "y");
Add(x, y, {0, 1});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Add(m::Broadcast(m::Parameter(0)),
m::Broadcast(m::Reshape(m::Parameter(1))))));
}
TEST(XlaBuilderTest, BroadcastInDim) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {2, 3}), "x");
BroadcastInDim(x, {2, 4, 3},
{0, 2});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module), GmockMatch(m::Broadcast()));
}
TEST(XlaBuilderTest, BroadcastInDimWithDegeneratedDim) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {2, 1, 4}), "x");
BroadcastInDim(x, {2, 3, 4},
{0, 1, 2});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Broadcast(m::Reshape(m::Broadcast()))));
}
TEST(XlaBuilderTest, BroadcastInDimWithNegativeSize) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {2, 1, 4}), "x");
BroadcastInDim(x, {-3, 3, 4},
{0, 1, 2});
auto statusor = BuildHloModule(b);
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(), HasSubstr("invalid shape"));
}
TEST(XlaBuilderTest, OperandFromWrongBuilder) {
XlaBuilder b1("b1");
auto p0 = Parameter(&b1, 0, ShapeUtil::MakeShape(F32, {}), "p0");
XlaBuilder builder("main");
auto p = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "p");
Add(p, p0);
auto statusor = builder.Build();
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(
statusor.status().message(),
HasSubstr(
"built by builder 'b1', but is trying to use it in builder 'main'"));
}
TEST(XlaBuilderTest, ReshapeDefaultOrder) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {2, 3, 5, 7}), "x");
Reshape(x, {6, 35});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module), GmockMatch(m::Reshape(m::Parameter())));
}
TEST(XlaBuilderTest, ReshapeHasTranspose) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {2, 3, 5, 7}), "x");
Reshape(x, {3, 2, 1, 0}, {6, 35});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Reshape(m::Transpose(m::Parameter()))));
}
TEST(XlaBuilderTest, Transpose) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {5, 7}), "x");
Transpose(x, {1, 0});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module), GmockMatch(m::Transpose(m::Parameter())));
}
TEST(XlaBuilderTest, AllGatherR1) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {4}), "x");
AllGather(x, 0, 4);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
EXPECT_EQ(root->opcode(), HloOpcode::kAllGather);
EXPECT_TRUE(ShapeUtil::Equal(root->shape(), ShapeUtil::MakeShape(F32, {16})));
}
TEST(XlaBuilderTest, AllGatherR2) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {4, 16}), "x");
AllGather(x, 1, 4);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
EXPECT_EQ(root->opcode(), HloOpcode::kAllGather);
EXPECT_TRUE(
ShapeUtil::Equal(root->shape(), ShapeUtil::MakeShape(F32, {4, 64})));
}
TEST(XlaBuilderTest, AllGatherWithTuple) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {4}), "x");
auto x2 = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {16, 4}), "x2");
AllGather(Tuple(&b, {x, x2}), 0,
4);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
EXPECT_EQ(root->opcode(), HloOpcode::kAllGather);
EXPECT_TRUE(ShapeUtil::Equal(
root->shape(),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {16}),
ShapeUtil::MakeShape(F32, {64, 4})})));
}
TEST(XlaBuilderTest, AllGatherTuple) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {128, 4}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {128, 8}), "p1");
AllGatherTuple({p0, p1}, 1, 4);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
auto tuple_shape =
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {128, 16}),
ShapeUtil::MakeShape(F32, {128, 32})});
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op()
.WithOpcode(HloOpcode::kAllGather)
.WithShapeEqualTo(&tuple_shape)));
}
TEST(XlaBuilderTest, ReduceScatter) {
XlaBuilder b(TestName());
XlaComputation to_apply;
{
auto sub_builder = b.CreateSubBuilder("add");
auto arg0 =
Parameter(sub_builder.get(), 0, ShapeUtil::MakeScalarShape(F32), "x");
auto arg1 =
Parameter(sub_builder.get(), 1, ShapeUtil::MakeScalarShape(F32), "y");
Add(arg0, arg1);
TF_ASSERT_OK_AND_ASSIGN(to_apply, sub_builder->Build());
}
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {4, 16}), "x");
ReplicaGroup group;
group.add_replica_ids(0);
group.add_replica_ids(1);
ReduceScatter(x, to_apply, 1, 2,
{group});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
EXPECT_EQ(root->opcode(), HloOpcode::kReduceScatter);
EXPECT_TRUE(
ShapeUtil::Equal(root->shape(), ShapeUtil::MakeShape(F32, {4, 8})));
}
TEST(XlaBuilderTest, ReduceScatterWithTuple) {
XlaBuilder b(TestName());
XlaComputation to_apply;
{
auto sub_builder = b.CreateSubBuilder("add");
auto arg0 =
Parameter(sub_builder.get(), 0, ShapeUtil::MakeScalarShape(F32), "x");
auto arg1 =
Parameter(sub_builder.get(), 1, ShapeUtil::MakeScalarShape(F32), "y");
Add(arg0, arg1);
TF_ASSERT_OK_AND_ASSIGN(to_apply, sub_builder->Build());
}
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {4, 16}), "x");
auto x2 = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {16, 4}), "x2");
ReplicaGroup group;
group.add_replica_ids(0);
group.add_replica_ids(1);
ReduceScatter(Tuple(&b, {x, x2}), to_apply, 1,
2,
{group});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
EXPECT_EQ(root->opcode(), HloOpcode::kReduceScatter);
EXPECT_TRUE(ShapeUtil::Equal(
root->shape(),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {4, 8}),
ShapeUtil::MakeShape(F32, {16, 2})})));
}
TEST(XlaBuilderTest, AllToAll) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {4, 16}), "x");
AllToAll(x, 1, 0,
2);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
EXPECT_EQ(root->opcode(), HloOpcode::kReshape);
EXPECT_EQ(root->operand(0)->operand(0)->operand(0)->opcode(),
HloOpcode::kAllToAll);
EXPECT_TRUE(
ShapeUtil::Equal(root->shape(), ShapeUtil::MakeShape(F32, {8, 8})));
}
TEST(XlaBuilderTest, AllToAllSpecial) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {4, 16, 8}), "x");
AllToAll(x, 0, 0,
2);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
EXPECT_EQ(root->opcode(), HloOpcode::kAllToAll);
EXPECT_TRUE(
ShapeUtil::Equal(root->shape(), ShapeUtil::MakeShape(F32, {4, 16, 8})));
}
TEST(XlaBuilderTest, AllToAllTuple) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {2, 4}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {2, 4}), "p1");
ReplicaGroup replica_group;
replica_group.add_replica_ids(0);
replica_group.add_replica_ids(1);
AllToAllTuple({p0, p1}, {replica_group}, LayoutUtil::MakeAscendingLayout(2));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
auto expected_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 4},
{0, 1});
auto tuple_shape =
ShapeUtil::MakeTupleShape({expected_shape, expected_shape});
auto is_replica_group_pred = [](const HloInstruction* instr) {
return instr->replica_groups().size() == 1 &&
absl::c_equal(instr->replica_groups()[0].replica_ids(),
std::vector<int64_t>{0, 1});
};
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op()
.WithOpcode(HloOpcode::kAllToAll)
.WithShapeEqualTo(&tuple_shape)
.WithPredicate(is_replica_group_pred)));
}
TEST(XlaBuilderTest, AllReduceTuple) {
XlaBuilder b(TestName());
auto shape0 = ShapeUtil::MakeShape(F32, {});
auto shape1 = ShapeUtil::MakeShape(F32, {1, 2});
auto p0 = Parameter(&b, 0, shape0, "p0");
auto p1 = Parameter(&b, 1, shape1, "p1");
XlaBuilder bsum(TestName());
auto f32Scalar = ShapeUtil::MakeShape(F32, {});
Add(Parameter(&bsum, 0, f32Scalar, "x"), Parameter(&bsum, 1, f32Scalar, "y"));
TF_ASSERT_OK_AND_ASSIGN(const auto sum, bsum.Build());
AllReduceTuple({p0, p1}, sum);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
auto tuple_shape = ShapeUtil::MakeTupleShape({shape0, shape1});
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op()
.WithOpcode(HloOpcode::kAllReduce)
.WithShapeEqualTo(&tuple_shape)));
}
TEST(XlaBuilderTest, CollectiveBroadcast) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {5, 7}), "x");
ReplicaGroup replica_group;
replica_group.add_replica_ids(0);
replica_group.add_replica_ids(1);
CollectiveBroadcast(x, {replica_group});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_EQ(GetRoot(*module)->opcode(), HloOpcode::kCollectiveBroadcast);
}
TEST(XlaBuilderTest, CollectivePermute) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {5, 7}), "x");
CollectivePermute(x, {{0, 1}, {1, 2}, {2, 3}});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_EQ(GetRoot(*module)->opcode(), HloOpcode::kCollectivePermute);
}
TEST(XlaBuilderTest, GetDimensionSize) {
XlaBuilder b(TestName());
auto x =
Parameter(&b, 0, ShapeUtil::MakeShape(F32, {5, 7}, {false, true}), "x");
GetDimensionSize(x, 1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_EQ(GetRoot(*module)->opcode(), HloOpcode::kGetDimensionSize);
}
TEST(XlaBuilderTest, GetDimensionSizeConstant) {
XlaBuilder b(TestName());
auto x =
Parameter(&b, 0, ShapeUtil::MakeShape(F32, {5, 7}, {false, true}), "x");
GetDimensionSize(x, 0);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_EQ(GetRoot(*module)->opcode(), HloOpcode::kConstant);
}
TEST(XlaBuilderTest, ReportError) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {5, 7}), "x");
Add(b.ReportError(InvalidArgument("a test error")), x);
auto statusor = b.Build();
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(), HasSubstr("a test error"));
}
TEST(XlaBuilderTest, ReportErrorOrReturnHandlesNonErrors) {
XlaBuilder b(TestName());
absl::StatusOr<XlaOp> op(ConstantR0<float>(&b, 1.0));
Add(b.ReportErrorOrReturn(op), ConstantR0<float>(&b, 2.0));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Add(m::Constant(), m::Constant())));
}
TEST(XlaBuilderTest, ReportErrorOrReturnHandlesErrors) {
XlaBuilder b(TestName());
absl::StatusOr<XlaOp> op(InvalidArgument("a test error"));
Add(b.ReportErrorOrReturn(op), ConstantR0<float>(&b, 2.0));
auto statusor = b.Build();
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(), HasSubstr("a test error"));
}
TEST(XlaBuilderTest, BuildWithSpecificRoot) {
XlaBuilder b(TestName());
const XlaOp constant = ConstantR0<float>(&b, 1.0);
Add(constant, ConstantR0<float>(&b, 2.0));
TF_ASSERT_OK_AND_ASSIGN(const auto module,
BuildHloModule(b, constant));
EXPECT_THAT(GetRoot(*module), GmockMatch(m::Constant()));
}
TEST(XlaBuilderTest, BuildWithSpecificRootAndMultipleParameters) {
XlaBuilder b(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {42, 123});
const XlaOp x = Parameter(&b, 0, shape, "x");
const XlaOp y = Parameter(&b, 1, shape, "y");
const XlaOp z = Parameter(&b, 2, shape, "z");
Add(x, Sub(y, z));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b, x));
EXPECT_THAT(GetRoot(*module), GmockMatch(m::Parameter()));
EXPECT_EQ(module->entry_computation()->num_parameters(), 3);
EXPECT_EQ(module->entry_computation()->instruction_count(), 5);
}
TEST(XlaBuilderTest, BuildWithSpecificRootWithWrongBuilder) {
XlaBuilder b(TestName());
XlaBuilder other_b(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {42, 123});
Parameter(&b, 0, shape, "param");
const XlaOp other_param = Parameter(&other_b, 0, shape, "other_param");
absl::Status status = b.Build(other_param).status();
ASSERT_IS_NOT_OK(status);
EXPECT_THAT(
status.message(),
::testing::HasSubstr("root operation is not in this computation"));
}
TEST(XlaBuilderTest, ProtoMatches) {
std::vector<XlaComputation> computations;
const int n = 2;
computations.reserve(n);
for (int i = 0; i < n; ++i) {
XlaBuilder b_call("the_only_to_apply");
auto p0 = Parameter(&b_call, 0, ShapeUtil::MakeShape(F32, {}), "p0");
auto p1 = Parameter(&b_call, 1, ShapeUtil::MakeShape(F32, {}), "p1");
Add(p0, Add(p1, p0));
TF_ASSERT_OK_AND_ASSIGN(const auto call, b_call.Build());
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {}), "x");
auto y = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {}), "y");
auto one = ConstantR0<float>(&b, 1);
auto two = ConstantR0<float>(&b, 2);
Add(Call(&b, call, {x, y}), Call(&b, call, {one, two}));
computations.push_back(b.Build().value());
}
auto c0_string = computations[0].proto().SerializeAsString();
auto c1_string = computations[1].proto().SerializeAsString();
EXPECT_EQ(c0_string, c1_string);
}
TEST(XlaBuilderTest, DynamicParameter) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {6}, {true})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
Parameter(&b, 1, ShapeUtil::MakeShape(U32, {}), "p1");
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b, p0));
const Shape& param_shape = module->entry_computation()
->parameter_instruction(0)
->shape()
.tuple_shapes(1);
EXPECT_TRUE(param_shape.is_dynamic_dimension(0));
}
TEST(XlaBuilderTest, SetDimensionSize) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {10}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(S32, {}), "p1");
auto set_dim_size = SetDimensionSize(p0, p1, 0);
TF_ASSERT_OK_AND_ASSIGN(const auto module,
BuildHloModule(b, set_dim_size));
const Shape& root_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(root_shape.is_dynamic_dimension(0));
}
TEST(XlaBuilderTest, RemoveDynamicDimension) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {10}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(S32, {}), "p1");
auto set_dim_size = SetDimensionSize(p0, p1, 0);
auto remove_dim_size = RemoveDynamicDimension(set_dim_size, 0);
TF_ASSERT_OK_AND_ASSIGN(const auto module,
BuildHloModule(b, remove_dim_size));
const Shape& root_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_FALSE(root_shape.is_dynamic_dimension(0));
}
TEST(XlaBuilderTest, RemoveDynamicDimensionMultiDims) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {10, 10}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(S32, {}), "p1");
auto set_dim_size = SetDimensionSize(p0, p1, 0);
set_dim_size = SetDimensionSize(set_dim_size, p1, 1);
auto remove_dim_size = RemoveDynamicDimension(set_dim_size, 0);
remove_dim_size = RemoveDynamicDimension(remove_dim_size, 1);
TF_ASSERT_OK_AND_ASSIGN(const auto module,
BuildHloModule(b, remove_dim_size));
const Shape& root_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_FALSE(root_shape.is_dynamic_dimension(0));
EXPECT_FALSE(root_shape.is_dynamic_dimension(1));
}
TEST(XlaBuilderTest, DynamicUnary) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}, {true}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto gte = GetTupleElement(p0, 0);
Neg(gte);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(result_shape.is_dynamic_dimension(0));
}
TEST(XlaBuilderTest, DynamicBinary) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}, {true}),
ShapeUtil::MakeShape(F32, {5}, {true}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto gte0 = GetTupleElement(p0, 0);
auto gte1 = GetTupleElement(p0, 1);
Add(gte0, gte1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(result_shape.is_dynamic_dimension(0));
}
TEST(XlaBuilderTest, DynamicBinaryHasBroadcast) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5, 4}, {true, false}),
ShapeUtil::MakeShape(F32, {5}, {true}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto gte0 = GetTupleElement(p0, 0);
auto gte1 = GetTupleElement(p0, 1);
Add(gte0, gte1, {0});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(ContainersEqual(result_shape.dynamic_dimensions(), {true, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicBroadcast) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5, 4}, {true, false}),
ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto gte = GetTupleElement(p0, 0);
BroadcastInDim(gte, {3, 5, 4},
{1, 2});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(
ContainersEqual(result_shape.dynamic_dimensions(), {false, true, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicBinaryHasDegenerateBroadcast) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {10}, {true}),
ShapeUtil::MakeShape(F32, {1, 15}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto gte0 = GetTupleElement(p0, 0);
auto gte1 = GetTupleElement(p0, 1);
Add(gte0, gte1, {0});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(ContainersEqual(result_shape.dynamic_dimensions(), {true, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicSelectOnlyPredDynamic) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {10}, {true}),
ShapeUtil::MakeShape(F32, {10}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto gte0 = GetTupleElement(p0, 0);
auto gte1 = GetTupleElement(p0, 1);
Select(gte0, gte1, gte1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(ContainersEqual(result_shape.dynamic_dimensions(), {true}))
<< result_shape;
}
TEST(XlaBuilderTest, SelectIntoConditional) {
XlaBuilder b(TestName());
const Shape selector_shape = ShapeUtil::MakeShape(PRED, {});
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {}), ShapeUtil::MakeShape(F32, {})});
const XlaOp p0 = Parameter(&b, 0, selector_shape, "p0");
const XlaOp p1 = Parameter(&b, 1, tuple_param_shape, "p1");
const XlaOp p2 = Parameter(&b, 2, tuple_param_shape, "p2");
Select(p0, p1, p2);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Conditional(m::Parameter(0), m::Parameter(1),
m::Parameter(2))));
EXPECT_THAT(module->entry_computation()
->root_instruction()
->branch_computation(0)
->root_instruction(),
GmockMatch(m::Parameter(0)));
EXPECT_THAT(module->entry_computation()
->root_instruction()
->branch_computation(1)
->root_instruction(),
GmockMatch(m::Parameter(0)));
}
TEST(XlaBuilderTest, DynamicPad) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5, 4}, {true, false}),
ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto pad_val = ConstantR0<float>(&b, -1);
auto gte = GetTupleElement(p0, 0);
PaddingConfig padding_config;
for (int i = 0; i < 2; i++) {
auto dimension = padding_config.add_dimensions();
dimension->set_edge_padding_low(0);
dimension->set_edge_padding_high(0);
dimension->set_interior_padding(0);
}
Pad(gte, pad_val, padding_config);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(ContainersEqual(result_shape.dynamic_dimensions(), {true, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicConvolution) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {1, 2, 2, 128}, {true, false, false, false}),
ShapeUtil::MakeShape(F32, {2, 2, 128, 8}, {false, false, true, false}),
ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto input = GetTupleElement(p0, 0);
auto filter = GetTupleElement(p0, 1);
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(0);
dnums.add_input_spatial_dimensions(1);
dnums.add_output_spatial_dimensions(1);
dnums.add_input_spatial_dimensions(2);
dnums.add_output_spatial_dimensions(2);
dnums.set_input_feature_dimension(3);
dnums.set_output_feature_dimension(3);
dnums.add_kernel_spatial_dimensions(0);
dnums.add_kernel_spatial_dimensions(1);
dnums.set_kernel_input_feature_dimension(2);
dnums.set_kernel_output_feature_dimension(3);
ConvWithGeneralDimensions(input, filter, {1, 1}, Padding::kValid, dnums,
1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(ContainersEqual(result_shape.dynamic_dimensions(),
{true, false, false, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicDot) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {2, 3, 4}, {true, true, false}),
ShapeUtil::MakeShape(F32, {2, 4, 5}, {true, false, false}),
ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto lhs = GetTupleElement(p0, 0);
auto rhs = GetTupleElement(p0, 1);
DotDimensionNumbers dnums;
dnums.add_lhs_contracting_dimensions(2);
dnums.add_rhs_contracting_dimensions(1);
dnums.add_lhs_batch_dimensions(0);
dnums.add_rhs_batch_dimensions(0);
DotGeneral(lhs, rhs, dnums);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(
ContainersEqual(result_shape.dynamic_dimensions(), {true, true, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicReduce) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5, 4, 3}, {false, true, false}),
ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto init = ConstantR0<float>(&b, 0);
auto gte = GetTupleElement(p0, 0);
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, ShapeUtil::MakeShape(F32, {}), "x"),
Parameter(&bsum, 1, ShapeUtil::MakeShape(F32, {}), "y"));
TF_ASSERT_OK_AND_ASSIGN(const auto sum, bsum.Build());
Reduce(gte, init, sum, {0});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(ContainersEqual(result_shape.dynamic_dimensions(), {true, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicReduceWindow) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {2, 4, 8}, {true, false, false}),
ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto init = ConstantR0<float>(&b, 0.f);
auto gte = GetTupleElement(p0, 0);
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, ShapeUtil::MakeShape(F32, {}), "x"),
Parameter(&bsum, 1, ShapeUtil::MakeShape(F32, {}), "y"));
TF_ASSERT_OK_AND_ASSIGN(const auto sum, bsum.Build());
ReduceWindow(gte, init, sum, {1, 2, 4},
{1, 1, 1}, Padding::kValid);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
VLOG(2) << module->entry_computation()->root_instruction()->ToString()
<< "\n";
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(
ContainersEqual(result_shape.dynamic_dimensions(), {true, false, false}))
<< result_shape;
}
TEST(XlaBuilderTest, VariadicDynamicReduceWindow) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {2, 4, 8}, {true, false, false}),
ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto p1 = Parameter(&b, 1, tuple_param_shape, "p1");
auto gte0 = GetTupleElement(p0, 0);
auto gte1 = GetTupleElement(p1, 0);
std::vector<XlaOp> input_operands = {gte0, gte1};
XlaBuilder bsum(TestName());
auto p2 = Parameter(&bsum, 0, ShapeUtil::MakeShape(F32, {}), "x0");
auto p3 = Parameter(&bsum, 1, ShapeUtil::MakeShape(F32, {}), "x1");
auto p4 = Parameter(&bsum, 2, ShapeUtil::MakeShape(F32, {}), "y0");
auto p5 = Parameter(&bsum, 3, ShapeUtil::MakeShape(F32, {}), "y1");
std::vector<XlaOp> output_operands = {Add(p2, p4), Add(p3, p5)};
Tuple(&bsum, absl::MakeSpan(output_operands));
TF_ASSERT_OK_AND_ASSIGN(const auto sum, bsum.Build());
auto init = ConstantR0<float>(&b, 0.f);
ReduceWindow(input_operands, {init, init}, sum,
{1, 2, 4},
{1, 1, 1}, Padding::kValid);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
VLOG(2) << module->entry_computation()->root_instruction()->ToString()
<< "\n";
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(ContainersEqual(result_shape.tuple_shapes(0).dynamic_dimensions(),
{true, false, false}))
<< result_shape.tuple_shapes(0);
EXPECT_TRUE(ContainersEqual(result_shape.tuple_shapes(1).dynamic_dimensions(),
{true, false, false}))
<< result_shape.tuple_shapes(1);
}
TEST(XlaBuilderTest, DynamicSelectAndScatter) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {2, 4, 8}, {true, false, false}),
ShapeUtil::MakeShape(F32, {2, 2, 2}, {true, false, false}),
ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto init = ConstantR0<float>(&b, 0.f);
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, ShapeUtil::MakeShape(F32, {}), "x"),
Parameter(&bsum, 1, ShapeUtil::MakeShape(F32, {}), "y"));
TF_ASSERT_OK_AND_ASSIGN(const auto sum, bsum.Build());
XlaBuilder bge(TestName());
Ge(Parameter(&bge, 0, ShapeUtil::MakeShape(F32, {}), "x"),
Parameter(&bge, 1, ShapeUtil::MakeShape(F32, {}), "y"));
TF_ASSERT_OK_AND_ASSIGN(const auto ge, bge.Build());
auto gte0 = GetTupleElement(p0, 0);
auto source = GetTupleElement(p0, 1);
SelectAndScatter(gte0, ge, {1, 2, 4}, {1, 2, 4}, Padding::kValid, source,
init, sum);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(
ContainersEqual(result_shape.dynamic_dimensions(), {true, false, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicReshape) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {2, 3, 4, 5, 6},
{false, false, true, true, false}),
ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto gte = GetTupleElement(p0, 0);
Reshape(gte, {6, 4, 5, 2, 3});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(result_shape.is_dynamic_dimension(1));
EXPECT_TRUE(result_shape.is_dynamic_dimension(2));
EXPECT_TRUE(ContainersEqual(result_shape.dynamic_dimensions(),
{false, true, true, false, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicSelect) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {4, 5, 6}, {false, true, false}),
ShapeUtil::MakeShape(F32, {4, 5, 6}, {false, true, false}),
ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto pred = Parameter(&b, 1, ShapeUtil::MakeShape(PRED, {}), "pred");
auto gte0 = GetTupleElement(p0, 0);
auto gte1 = GetTupleElement(p0, 1);
Select(pred, gte0, gte1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(result_shape.is_dynamic_dimension(1));
EXPECT_FALSE(result_shape.is_dynamic_dimension(2));
EXPECT_TRUE(
ContainersEqual(result_shape.dynamic_dimensions(), {false, true, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicSelectNotCompatible) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {4, 5, 6}, {false, true, false}),
ShapeUtil::MakeShape(F32, {4, 5, 6}, {false, false, true}),
ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto pred = Parameter(&b, 1, ShapeUtil::MakeShape(PRED, {}), "pred");
auto gte0 = GetTupleElement(p0, 0);
auto gte1 = GetTupleElement(p0, 1);
Select(pred, gte0, gte1);
absl::Status status = BuildHloModule(b).status();
ASSERT_IS_OK(status);
}
TEST(XlaBuilderTest, DynamicTranspose) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 5}, {true, false}),
ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto gte = GetTupleElement(p0, 0);
Transpose(gte, {1, 0});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(ContainersEqual(result_shape.dynamic_dimensions(), {false, true}))
<< result_shape;
}
TEST(XlaBuilderTest, DotWithPreferredElementType) {
XlaBuilder b(TestName());
const Shape p0_shape = ShapeUtil::MakeShape(U8, {2, 3});
const Shape p1_shape = ShapeUtil::MakeShape(U16, {3, 2});
auto p0 = Parameter(&b, 0, p0_shape, "p0");
auto p1 = Parameter(&b, 1, p1_shape, "p1");
DotDimensionNumbers dnums;
dnums.add_lhs_contracting_dimensions(1);
dnums.add_rhs_contracting_dimensions(0);
DotGeneral(p0, p1, dnums, nullptr,
U32);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
ASSERT_TRUE(
ShapeUtil::Equal(ShapeUtil::MakeShape(U32, {2, 2}), result_shape));
}
TEST(XlaBuilderTest, FftWithFFT) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("c64[5, <=10]"));
const std::vector<int64_t> fft_length = {5, 10};
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("c64[5, <=10]"));
Fft(Parameter(&b, 0, operand, "operand"), FftType::FFT,
fft_length);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, FftWithIFFT) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("c64[5, <=10]"));
const std::vector<int64_t> fft_length = {5, 10};
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("c64[5, <=10]"));
Fft(Parameter(&b, 0, operand, "operand"), FftType::IFFT,
fft_length);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, FftWithRFFT) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f64[10, <=5]"));
const std::vector<int64_t> fft_length = {5};
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("c128[10, <=3]"));
Fft(Parameter(&b, 0, operand, "operand"), FftType::RFFT,
fft_length);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, FftWithIRFFT) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("c128[10, <=3]"));
const std::vector<int64_t> fft_length = {5};
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f64[10, <=5]"));
Fft(Parameter(&b, 0, operand, "operand"), FftType::IRFFT,
fft_length);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, SparseDot) {
XlaBuilder b(TestName());
auto lhs = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {10, 16}), "lhs");
auto rhs = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {32, 20}), "rhs");
auto meta = Parameter(&b, 2, ShapeUtil::MakeShape(U16, {10, 2}), "meta");
DotDimensionNumbers dnums;
dnums.add_lhs_contracting_dimensions(1);
dnums.add_rhs_contracting_dimensions(0);
SparsityDescriptor sparsity_descriptor;
sparsity_descriptor.set_type(SparsityType::SPARSITY_STRUCTURED_N_M);
sparsity_descriptor.set_n(2);
sparsity_descriptor.set_m(4);
sparsity_descriptor.set_index(0);
sparsity_descriptor.set_dimension(1);
std::vector<SparsityDescriptor> sparsity = {sparsity_descriptor};
std::vector<XlaOp> sparse_meta = {meta};
SparseDot(lhs, rhs, sparse_meta, sparsity, dnums);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[10, 20]"));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, ConvolutionWithPreferredElementType) {
XlaBuilder b(TestName());
const Shape p0_shape = ShapeUtil::MakeShape(S16, {1, 2, 2, 128});
const Shape p1_shape = ShapeUtil::MakeShape(S8, {2, 2, 128, 8});
auto p0 = Parameter(&b, 0, p0_shape, "p0");
auto p1 = Parameter(&b, 1, p1_shape, "p1");
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(0);
dnums.add_input_spatial_dimensions(1);
dnums.add_output_spatial_dimensions(1);
dnums.add_input_spatial_dimensions(2);
dnums.add_output_spatial_dimensions(2);
dnums.set_input_feature_dimension(3);
dnums.set_output_feature_dimension(3);
dnums.add_kernel_spatial_dimensions(0);
dnums.add_kernel_spatial_dimensions(1);
dnums.set_kernel_input_feature_dimension(2);
dnums.set_kernel_output_feature_dimension(3);
ConvWithGeneralDimensions(p0, p1, {1, 1}, Padding::kValid, dnums,
1, 1,
nullptr,
S32);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
ASSERT_TRUE(
ShapeUtil::Equal(ShapeUtil::MakeShape(S32, {1, 1, 1, 8}), result_shape));
}
TEST(XlaBuilderTest, AfterAllWithNonTokenOperands) {
XlaBuilder b(TestName());
AfterAll(&b, {CreateToken(&b), ConstantR0<float>(&b, 1.0)});
absl::Status status = b.Build().status();
ASSERT_IS_NOT_OK(status);
EXPECT_THAT(status.message(),
::testing::HasSubstr("All operands to AfterAll must be tokens"));
}
TEST(XlaBuilderTest, AfterAllWithNoInputs) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("token[]"));
AfterAll(&b, {});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, CheckInputOutputAlias) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {8, 4}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {8, 4}), "p1");
auto add = Add(p0, p1);
auto sub = Sub(p0, p1);
auto root = Tuple(&b, {add, sub});
b.SetUpAlias({1}, 0, {});
b.SetUpAlias({0}, 1, {});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b, root));
const HloInputOutputAliasConfig& config = module->input_output_alias_config();
EXPECT_TRUE(config.ParameterHasAlias(0, {}));
EXPECT_TRUE(config.ParameterHasAlias(1, {}));
auto alias_p0 = config.GetAliasedOutput(0, {});
ASSERT_TRUE(alias_p0.has_value());
EXPECT_EQ(*alias_p0, ShapeIndex({1}));
auto alias_p1 = config.GetAliasedOutput(1, {});
ASSERT_TRUE(alias_p1.has_value());
EXPECT_EQ(*alias_p1, ShapeIndex({0}));
}
TEST(XlaBuilderTest, CheckBufferDonor) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {8, 4}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {8, 4}), "p1");
auto add = Add(p0, p1);
auto sub = Sub(p0, p1);
auto root = Tuple(&b, {add, sub});
b.AddBufferDonor(0, {});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b, root));
const HloBufferDonorConfig& config = module->buffer_donor_config();
EXPECT_TRUE(config.ParameterIsBufferDonor(0, {}));
EXPECT_FALSE(config.ParameterIsBufferDonor(1, {}));
}
TEST(XlaBuilderTest, ConstantLiteral) {
XlaBuilder b(TestName());
#if defined(__x86_64__) && defined(_MM_DENORMALS_ZERO_ON)
int old_csr = _mm_getcsr();
_mm_setcsr(old_csr | _MM_DENORMALS_ZERO_ON);
#endif
ConstantR1<float>(&b, {0.0f, 1.401298e-45f});
#if defined(__x86_64__) && defined(_MM_DENORMALS_ZERO_ON)
_mm_setcsr(old_csr);
#endif
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
ASSERT_THAT(root, GmockMatch(m::Constant()));
}
TEST(XlaBuilderTest, InvalidInputOutputAliasBufferDonor) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {8, 4}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {8, 4}), "p1");
auto add = Add(p0, p1);
auto sub = Sub(p0, p1);
auto root = Tuple(&b, {add, sub});
b.SetUpAlias({1}, 0, {});
b.AddBufferDonor(0, {});
auto statusor = BuildHloModule(b, root);
EXPECT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(),
HasSubstr("is already aliased with one output, thus it cannot be "
"added as a buffer donor for any output."));
}
TEST(XlaBuilderTest, ValidInputOutputAliasBufferDonor) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {8, 4}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {8, 4}), "p1");
auto add = Add(p0, p1);
auto sub = Sub(p0, p1);
auto root = Tuple(&b, {add, sub});
b.SetUpAlias({1}, 0, {});
b.AddBufferDonor(1, {});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b, root));
const HloInputOutputAliasConfig& io_alias_config =
module->input_output_alias_config();
const HloBufferDonorConfig& buffer_donor_config =
module->buffer_donor_config();
EXPECT_TRUE(io_alias_config.ParameterHasAlias(0, {}));
EXPECT_FALSE(io_alias_config.ParameterHasAlias(1, {}));
EXPECT_FALSE(buffer_donor_config.ParameterIsBufferDonor(0, {}));
EXPECT_TRUE(buffer_donor_config.ParameterIsBufferDonor(1, {}));
auto alias_p0 = io_alias_config.GetAliasedOutput(0, {});
ASSERT_TRUE(alias_p0.has_value());
EXPECT_EQ(*alias_p0, ShapeIndex({1}));
}
void ExpectAttributesMatch(const FrontendAttributes& attr,
const FrontendAttributes& ref) {
EXPECT_EQ(ref.map_size(), attr.map_size());
for (auto reference : ref.map()) {
auto other = attr.map().find(reference.first);
EXPECT_NE(other, attr.map().end());
EXPECT_EQ(other->second, reference.second);
}
}
void ExpectInstructionsAttributesMatch(
const HloModule& module, const std::vector<FrontendAttributes>& expected) {
ASSERT_EQ(module.computation_count(), 1);
auto expected_it = expected.begin();
for (auto inst : module.entry_computation()->instructions()) {
ASSERT_NE(expected_it, expected.end());
ExpectAttributesMatch(inst->frontend_attributes(), *expected_it);
expected_it++;
}
EXPECT_EQ(expected_it, expected.end());
}
TEST(XlaBuilderTest, SimpleSetFrontendAttributes) {
XlaBuilder b(TestName());
FrontendAttributes attributes;
ConstantR0(&b, 0);
(*attributes.mutable_map())["attr_a"] = "a";
b.SetFrontendAttributes(attributes);
ConstantR0(&b, 0);
b.ClearFrontendAttributes();
ConstantR0(&b, 0);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
std::vector<FrontendAttributes> expected{FrontendAttributes(), attributes,
FrontendAttributes()};
ExpectInstructionsAttributesMatch(*module, expected);
}
TEST(XlaBuilderTest, ComplexSetFrontendAttributes) {
XlaBuilder b(TestName());
ConstantR0(&b, 0);
std::vector<FrontendAttributes> expected{FrontendAttributes()};
{
FrontendAttributes attributes;
(*attributes.mutable_map())["attr_a"] = "a";
b.SetFrontendAttributes(attributes);
ConstantR0(&b, 0);
expected.push_back(attributes);
}
{
FrontendAttributes attributes;
(*attributes.mutable_map())["attr_b"] = "b";
b.SetFrontendAttributes(attributes);
ConstantR0(&b, 0);
expected.push_back(attributes);
}
{
FrontendAttributes attributes;
(*attributes.mutable_map())["attr_b"] = "b";
(*attributes.mutable_map())["attr_c"] = "c";
b.SetFrontendAttributes(attributes);
ConstantR0(&b, 0);
expected.push_back(attributes);
}
b.ClearFrontendAttributes();
ConstantR0(&b, 0);
expected.push_back(FrontendAttributes());
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
ExpectInstructionsAttributesMatch(*module, expected);
}
TEST(XlaBuilderTest, AddFrontendAttribute) {
XlaBuilder b(TestName());
ConstantR0(&b, 0);
std::vector<FrontendAttributes> expected{FrontendAttributes()};
{
FrontendAttributes attributes;
(*attributes.mutable_map())["attr_a"] = "a";
b.SetFrontendAttributes(attributes);
ConstantR0(&b, 0);
expected.push_back(attributes);
}
{
auto op = ConstantR0(&b, 0);
EXPECT_IS_OK(b.SetInstructionFrontendAttribute(op, "attr_c", "c"));
FrontendAttributes attributes;
(*attributes.mutable_map())["attr_a"] = "a";
(*attributes.mutable_map())["attr_c"] = "c";
expected.push_back(attributes);
}
{
auto op = ConstantR0(&b, 0);
EXPECT_IS_OK(b.SetInstructionFrontendAttribute(op, "attr_a", "a2"));
FrontendAttributes attributes;
(*attributes.mutable_map())["attr_a"] = "a2";
expected.push_back(attributes);
}
{
auto op = ConstantR0(&b, 0);
(void)op;
FrontendAttributes attributes;
(*attributes.mutable_map())["attr_a"] = "a";
expected.push_back(attributes);
}
b.ClearFrontendAttributes();
ConstantR0(&b, 0);
expected.push_back(FrontendAttributes());
{
auto op = ConstantR0(&b, 0);
EXPECT_IS_OK(b.SetInstructionFrontendAttribute(op, "attr_d", "d"));
FrontendAttributes attributes;
(*attributes.mutable_map())["attr_d"] = "d";
expected.push_back(attributes);
}
ConstantR0(&b, 0);
expected.push_back(FrontendAttributes());
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
ExpectInstructionsAttributesMatch(*module, expected);
}
TEST(XlaBuilderTest, SetAndGetSharding) {
XlaBuilder b(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {1024});
OpSharding op_sharding_1 = sharding_builder::Replicate();
OpSharding op_sharding_2 = sharding_builder::Tile1D(shape, 4);
TF_ASSERT_OK_AND_ASSIGN(HloSharding hlo_sharding_1,
HloSharding::FromProto(op_sharding_1));
TF_ASSERT_OK_AND_ASSIGN(HloSharding hlo_sharding_2,
HloSharding::FromProto(op_sharding_2));
b.SetSharding(op_sharding_1);
XlaOp p0 = Parameter(&b, 0, shape, "p0");
TF_ASSERT_OK_AND_ASSIGN(auto p0_sharding, b.GetOpSharding(p0));
EXPECT_TRUE(p0_sharding.has_value());
EXPECT_EQ(HloSharding::FromProto(p0_sharding.value()).value(),
hlo_sharding_1);
EXPECT_TRUE(b.SetInstructionSharding(p0, std::nullopt).ok());
TF_ASSERT_OK_AND_ASSIGN(p0_sharding, b.GetOpSharding(p0));
EXPECT_FALSE(p0_sharding.has_value());
EXPECT_TRUE(b.SetInstructionSharding(p0, op_sharding_2).ok());
TF_ASSERT_OK_AND_ASSIGN(p0_sharding, b.GetOpSharding(p0));
EXPECT_TRUE(p0_sharding.has_value());
EXPECT_EQ(HloSharding::FromProto(p0_sharding.value()).value(),
hlo_sharding_2);
EXPECT_EQ(HloSharding::FromProto(b.sharding().value()).value(),
hlo_sharding_1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_TRUE(
module->entry_computation()->parameter_instruction(0)->has_sharding());
EXPECT_EQ(module->entry_computation()->parameter_instruction(0)->sharding(),
hlo_sharding_2);
}
TEST(XlaBuilderTest, ComparisonType) {
XlaBuilder b(TestName());
(void)Le(ConstantR0<int32_t>(&b, 1), ConstantR0<int32_t>(&b, 2));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
ASSERT_THAT(root, GmockMatch(m::Compare(m::Constant(), m::Constant())));
EXPECT_EQ(Comparison::Type::kSigned,
DynCast<HloCompareInstruction>(root)->type());
}
TEST(XlaBuilderTest, StableLookUpInstructionByHandle) {
XlaBuilder b(TestName());
internal::XlaBuilderFriend builder_friend;
const XlaOp le = Le(ConstantR0<int32_t>(&b, 1), ConstantR0<int32_t>(&b, 2));
HloInstructionProto* first_op = builder_friend.GetInstruction(le);
for (int i = 0; i < 100; ++i) {
(void)Le(ConstantR0<int32_t>(&b, 1), ConstantR0<int32_t>(&b, 2));
}
HloInstructionProto* first_op_now = builder_friend.GetInstruction(le);
EXPECT_EQ(first_op, first_op_now);
}
TEST(XlaBuilderTest, ComplexAbsConstant) {
XlaBuilder b(TestName());
const XlaOp out =
Abs(ConstantR0<std::complex<float>>(&b, std::complex<float>{-1, -1}));
ValueInference value_inference(&b);
absl::StatusOr<OptionalLiteral> analyzed =
value_inference.AnalyzeConstant(out, kUpperBound);
EXPECT_IS_OK(analyzed.status());
EXPECT_EQ(analyzed->GetValue().value().shape().element_type(),
PrimitiveType::F32);
}
TEST(XlaBuilderTest, OutfeedDummyTupleSharding) {
XlaBuilder b(TestName());
const XlaOp value = ConstantR1<int32_t>(&b, {0});
const Shape shape =
ShapeUtil::MakeShapeWithDenseLayout(S32, {1},
{0});
Outfeed(value, shape, "");
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_FALSE(module->entry_computation()->root_instruction()->has_sharding());
}
TEST(XlaBuilderTest, OutfeedTokenSharding) {
XlaBuilder b(TestName());
const XlaOp value = ConstantR1<int32_t>(&b, {0});
const Shape shape =
ShapeUtil::MakeShapeWithDenseLayout(S32, {1},
{0});
b.SetSharding(sharding_builder::Replicate());
Outfeed(value, shape, "");
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
auto it = std::find_if(module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end(),
HloPredicateIsOp<HloOpcode::kOutfeed>);
EXPECT_NE(it, module->entry_computation()->instructions().end());
auto* outfeed = *it;
EXPECT_TRUE(outfeed->has_sharding());
EXPECT_TRUE(outfeed->sharding().IsTuple());
EXPECT_EQ(outfeed->sharding().tuple_elements().size(), 2);
EXPECT_TRUE(outfeed->operand(1)->has_sharding());
EXPECT_EQ(outfeed->sharding().tuple_elements().back(),
HloSharding::FromProto(sharding_builder::AssignDevice(0)).value());
EXPECT_EQ(outfeed->operand(1)->sharding(),
HloSharding::FromProto(sharding_builder::AssignDevice(0)).value());
}
TEST(XlaBuilderTest, NormalizeTupleSharding) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {6})});
b.SetSharding(sharding_builder::Replicate());
Parameter(&b, 0, tuple_param_shape, "p0");
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
EXPECT_TRUE(root->has_sharding());
EXPECT_TRUE(root->sharding().IsTuple());
EXPECT_EQ(GetRoot(*module)->sharding().tuple_elements().size(), 2);
}
TEST(XlaBuilderTest, InvalidSharding) {
XlaBuilder b(TestName());
const Shape shape2d = ShapeUtil::MakeShape(F32, {6, 8});
const Shape shape1d = ShapeUtil::MakeShape(F32, {5});
b.SetSharding(sharding_builder::Tile1D(shape1d, 4));
Parameter(&b, 0, shape2d, "p0");
auto statusor = b.Build();
EXPECT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(),
HasSubstr("Number of tile assignment dimensions (excluding "
"subgroups) is different than the input rank"));
}
TEST(XlaBuilderTest, TopKDimensions) {
XlaBuilder b(TestName());
int64_t k = 1;
int64_t largest = true;
TopK(Parameter(&b, 0, ShapeUtil::MakeShape(F32, {6, 8}), "p0"), k, largest);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
EXPECT_TRUE(root->opcode() == HloOpcode::kTopK);
EXPECT_TRUE(root->shape().IsTuple());
EXPECT_EQ(root->shape().tuple_shapes_size(), 2);
EXPECT_EQ(root->shape().tuple_shapes(0).rank(), 2);
EXPECT_EQ(root->shape().tuple_shapes(1).rank(), 2);
EXPECT_EQ(root->shape().tuple_shapes(0).dimensions(0), 6);
EXPECT_EQ(root->shape().tuple_shapes(0).dimensions(1), k);
EXPECT_EQ(root->shape().tuple_shapes(1).dimensions(0), 6);
EXPECT_EQ(root->shape().tuple_shapes(1).dimensions(1), k);
}
TEST(XlaBuilderTest, MhloDynamicBroadcastInDimExportSuccess) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[1, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_dimensions, ParseShape("s32[3]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("f32[1, 2, 3]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[1, 2, 3]"));
MhloDynamicBroadcastInDim(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_dimensions, "output_dimensions"),
{1, 2}, output_shape);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(module->ToString(), HasSubstr("mhlo.dynamic_broadcast_in_dim"));
EXPECT_THAT(module->ToString(), HasSubstr("broadcast_dimensions=[1,2]"));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest,
MhloDynamicBroadcastInDimNonBroadcastDimSizeGreaterThanOne) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_dimensions, ParseShape("s32[3]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("f32[2, 2, 3]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[2, 2, 3]"));
MhloDynamicBroadcastInDim(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_dimensions, "output_dimensions"),
{1, 2}, output_shape);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(module->ToString(), HasSubstr("mhlo.dynamic_broadcast_in_dim"));
EXPECT_THAT(module->ToString(), HasSubstr("broadcast_dimensions=[1,2]"));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, MhloDynamicBroadcastInDimDynamicResultSize) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[1, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_dimensions, ParseShape("s32[3]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("f32[1, 2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[1, 2, ?]"));
MhloDynamicBroadcastInDim(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_dimensions, "output_dimensions"),
{1, 2}, output_shape);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(module->ToString(), HasSubstr("mhlo.dynamic_broadcast_in_dim"));
EXPECT_THAT(module->ToString(), HasSubstr("broadcast_dimensions=[1,2]"));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest,
MhloDynamicBroadcastInDimInvalidOutputDimensionsElementType) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_dimensions, ParseShape("f32[3]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("f32[2, 3, 3]"));
MhloDynamicBroadcastInDim(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_dimensions, "output_dimensions"),
{1, 2}, output_shape);
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_,
HasSubstr("output_dimensions must be an integer type f32[3]")));
}
TEST(XlaBuilderTest, MhloDynamicBroadcastInDimInvalidOutputDimensionsRank) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_dimensions,
ParseShape("s32[2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("f32[2, 3, 3]"));
MhloDynamicBroadcastInDim(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_dimensions, "output_dimensions"),
{1, 2}, output_shape);
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_,
HasSubstr("output_dimensions must be rank 1 but got rank 2")));
}
TEST(XlaBuilderTest, MhloDynamicBroadcastInDimIncompatibleBroadcastSize) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_dimensions, ParseShape("s32[3]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("f32[2, 3, 3]"));
MhloDynamicBroadcastInDim(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_dimensions, "output_dimensions"),
{1, 2}, output_shape);
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_, HasSubstr("size of operand dimension 0 (2) is not compatible "
"with size of result dimension 1 (3)")));
}
TEST(XlaBuilderTest, MhloDynamicReshapeExportSuccess) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("s32[2]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[?, 15]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 15]"));
MhloDynamicReshape(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_shape, "output_shape"),
shape);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(module->ToString(), HasSubstr("mhlo.dynamic_reshape"));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, MhloDynamicReshapeIncompatibleElementType) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("s32[2]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("s32[?, 15]"));
MhloDynamicReshape(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_shape, "output_shape"),
shape);
EXPECT_THAT(BuildHloModule(b),
StatusIs(_, HasSubstr("Element type of operand f32[?,15] and "
"output s32[?,15] must match")));
}
TEST(XlaBuilderTest, MhloDynamicReshapeElementCountMismatch) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[3, 15]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("s32[2]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[4, 15]"));
MhloDynamicReshape(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_shape, "output_shape"),
shape);
EXPECT_THAT(BuildHloModule(b),
StatusIs(_, HasSubstr("MhloDynamicReshape has mismatched "
"element counts: from=45 (f32[3,15]) "
"to=60 (f32[4,15])")));
}
TEST(XlaBuilderTest, MhloDynamicReshapeRankMismatch) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("s32[3]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[?, 15]"));
MhloDynamicReshape(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_shape, "output_shape"),
shape);
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_, HasSubstr("output_shape dimension size=3 (s32[3]) and rank "
"of shape=2 (f32[?,15]) must match")));
}
struct UnaryOpTestCase {
std::string operand;
std::string expected;
std::function<XlaOp(XlaOp)> unary_op;
};
struct BinaryOpTestCase {
std::string lhs;
std::string rhs;
absl::Span<const int64_t> broadcast_dimensions;
std::string expected;
std::function<XlaOp(XlaOp, XlaOp, absl::Span<const int64_t>)> binary_op;
std::optional<std::string_view> error_message;
};
constexpr absl::string_view kBroadcastDimensionMismatch =
"Broadcast dimension 0 mismatch: 2 != -9223372036854775808; f32[2] and "
"f32[?,10].";
std::array<const int64_t, 0> empty_array = {};
std::array<const int64_t, 1> zero_array = {0};
class XlaBuilderUnboundedUnaryOpTest
: public ::testing::TestWithParam<UnaryOpTestCase> {};
class XlaBuilderUnboundedBinaryOpTest
: public ::testing::TestWithParam<BinaryOpTestCase> {};
TEST_P(XlaBuilderUnboundedUnaryOpTest, UnboundedUnaryOpTest) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape(GetParam().operand));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape(GetParam().expected));
GetParam().unary_op(Parameter(&b, 0, operand, "operand"));
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<xla::HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST_P(XlaBuilderUnboundedBinaryOpTest, UnboundedBinaryOpTest) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape(GetParam().lhs));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape(GetParam().rhs));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape(GetParam().expected));
GetParam().binary_op(Parameter(&b, 0, lhs, "lhs"),
Parameter(&b, 1, rhs, "rhs"),
GetParam().broadcast_dimensions);
if (const auto result = BuildHloModule(b); result.ok()) {
ASSERT_NE(*result, nullptr);
EXPECT_THAT(GetRoot(**result),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
} else {
ASSERT_TRUE(GetParam().error_message.has_value());
EXPECT_THAT(result, StatusIs(_, HasSubstr(*GetParam().error_message)));
}
}
TEST(XlaBuilderTest, UnboundedAddScalarBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Add(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
empty_array);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedAddDegenerateBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[1, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Add(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
{0, 1});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedAddUnsupportedImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[2]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Add(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
zero_array);
EXPECT_THAT(BuildHloModule(b),
StatusIs(_, HasSubstr(kBroadcastDimensionMismatch)));
}
TEST(XlaBuilderTest, UnboundedAllGather) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
AllGather(Parameter(&b, 0, operand, "operand"), 0,
2,
{});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedAllReduce) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
XlaComputation computation;
{
const std::unique_ptr<XlaBuilder> sub_builder = b.CreateSubBuilder("add");
Add(Parameter(sub_builder.get(), 0, operand, "arg0"),
Parameter(sub_builder.get(), 1, operand, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(computation, sub_builder->Build());
}
AllReduce(Parameter(&b, 0, operand, "operand"), computation,
{});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedAllToAllDynamicSplitDimension) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 45]"));
AllToAll(Parameter(&b, 0, operand, "operand"),
0,
1,
3,
{});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedAllToAllDynamicConcatDimension) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 5]"));
AllToAll(Parameter(&b, 0, operand, "operand"),
1,
0,
3,
{});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedAllToAllDynamicSplitAndConcatDimensionEqual) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 15]"));
AllToAll(Parameter(&b, 0, operand, "operand"),
0,
0,
3,
{});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedAllToAllFullyDynamic) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, ?]"));
AllToAll(Parameter(&b, 0, operand, "operand"),
0,
1,
3,
{});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedAllToAllTupleVariadicUnsupported) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]{1,0}"));
b.ReportErrorOrReturn(
AllToAllTuple({Parameter(&b, 0, operand, "operand0"),
Parameter(&b, 1, operand, "operand1")},
{}));
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_,
HasSubstr(
"AllToAllTuple does not support unbounded dynamic shapes")));
}
TEST(XlaBuilderTest, UnboundedAllToAllTupleUnsupported) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]{1,0}"));
b.ReportErrorOrReturn(
AllToAllTuple(Parameter(&b, 0, operand, "operand"),
0,
1,
3,
{}));
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_,
HasSubstr(
"AllToAllTuple does not support unbounded dynamic shapes")));
}
TEST(XlaBuilderTest, BoundedAllToAllTupleUnsupported) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[3, <=15]{1,0}"));
b.ReportErrorOrReturn(
AllToAllTuple(Parameter(&b, 0, operand, "operand"),
0,
1,
3,
{}));
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_,
HasSubstr("AllToAll does not support bounded dynamic shapes")));
}
TEST(XlaBuilderTest, BoundedAllToAllUnsupported) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[3, <=15]{1,0}"));
b.ReportErrorOrReturn(
AllToAllTuple(Parameter(&b, 0, operand, "operand"),
0,
1,
3,
{}));
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_,
HasSubstr("AllToAll does not support bounded dynamic shapes")));
}
TEST(XlaBuilderTest, UnboundedAnd) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs,
ParseShape("s32[1, ?, 2, ?, <=2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs,
ParseShape("s32[?, 1, ?, 2, ?, <=2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("s32[?, ?, 2, 2, <=2, <=2, ?]"));
And(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
empty_array);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedBatchNormGrad) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, ?, 7]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape grad_operand, ParseShape("f32[?, ?, 7]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape scale, ParseShape("f32[5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape mean, ParseShape("f32[?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape variance, ParseShape("f32[?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape grad_scale, ParseShape("f32[?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape grad_offset, ParseShape("f32[?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape grad_output, ParseShape("f32[5, ?, 7]"));
const Shape expected =
ShapeUtil::MakeTupleShape({grad_operand, grad_scale, grad_offset});
BatchNormGrad(
Parameter(&b, 0, operand, "operand"), Parameter(&b, 1, scale, "scale"),
Parameter(&b, 2, mean, "mean"), Parameter(&b, 3, variance, "variance"),
Parameter(&b, 4, grad_output, "grad_output"), 1.0, 1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedBatchNormInference) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, ?, 7]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, ?, 7]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape scale, ParseShape("f32[5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape offset, ParseShape("f32[5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape mean, ParseShape("f32[5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape variance, ParseShape("f32[5]"));
BatchNormInference(
Parameter(&b, 0, operand, "operand"), Parameter(&b, 1, scale, "scale"),
Parameter(&b, 2, offset, "offset"), Parameter(&b, 3, mean, "mean"),
Parameter(&b, 4, variance, "variance"), 1.0, 1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedBatchNormTraining) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, ?, 7]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output, ParseShape("f32[?, ?, 7]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape scale, ParseShape("f32[5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape offset, ParseShape("f32[5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape batch_mean, ParseShape("f32[?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape batch_var, ParseShape("f32[?]"));
const Shape expected =
ShapeUtil::MakeTupleShape({output, batch_mean, batch_var});
BatchNormTraining(Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, scale, "scale"),
Parameter(&b, 2, offset, "offset"), 1.0, 1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedBitcastConvert) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f16[?, 10, 2]"));
BitcastConvertType(Parameter(&b, 0, operand, "operand"), PrimitiveType::F16);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedBroadcastUnsupportedOperand) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[<=3, ?]"));
Broadcast(Parameter(&b, 0, operand, "operand"), {1});
EXPECT_THAT(BuildHloModule(b),
StatusIs(_, HasSubstr("is_unbounded_dynamic")));
}
TEST(XlaBuilderTest, UnboundedBroadcastUnsupportedBroadcastSize) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[1]"));
Broadcast(Parameter(&b, 0, operand, "operand"),
{Shape::kUnboundedSize});
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_, HasSubstr("Non-broadcast dimensions must not be dynamic.")));
}
TEST(XlaBuilderTest, UnboundedBroadcastInDim) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[<=2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[<=2, 3, 4]"));
BroadcastInDim(Parameter(&b, 0, operand, "operand"),
{2, 3, 4},
{0, 2});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedBroadcastInDimUnsupported) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[<=3, ?]"));
BroadcastInDim(Parameter(&b, 0, operand, "operand"),
{2, 3, Shape::kUnboundedSize},
{0, 2});
EXPECT_THAT(BuildHloModule(b),
StatusIs(_, HasSubstr("BroadcastInDim output must shape be "
"static or bounded dynamic")));
}
TEST(XlaBuilderTest, UnboundedCall) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
XlaComputation computation;
{
const std::unique_ptr<XlaBuilder> sub_builder = b.CreateSubBuilder("add");
Add(Parameter(sub_builder.get(), 0, operand, "arg0"),
Parameter(sub_builder.get(), 1, operand, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(computation, sub_builder->Build());
}
Call(&b, computation,
{Parameter(&b, 0, operand, "arg0"), Parameter(&b, 1, operand, "arg1")});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedCholesky) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape a, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Cholesky(Parameter(&b, 0, a, "a"), true);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedClamp) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs,
ParseShape("f32[1, ?, 2, ?, <=2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs,
ParseShape("f32[?, 1, ?, 2, ?, <=2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs,
ParseShape("f32[1, ?, 2, ?, <=2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("f32[?, 1, ?, 2, ?, <=2, ?]"));
Clamp(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedClampScalarMinImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Clamp(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedClampScalarMinMaxImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Clamp(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedClampScalarOperandMaxImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Clamp(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedClampScalarMinOperandImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Clamp(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest,
UnboundedClampUnsupportedDegenerateOperandImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[1]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[?, 10]"));
Clamp(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
EXPECT_THAT(BuildHloModule(b),
StatusIs(_, HasSubstr("Unimplemented implicit broadcast.")));
}
TEST(XlaBuilderTest, UnboundedCollectiveBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
CollectiveBroadcast(Parameter(&b, 0, operand, "operand"),
{});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedCollectivePermute) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
CollectivePermute(Parameter(&b, 0, operand, "operand"),
{std::make_pair(0, 1)});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedCompare) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs,
ParseShape("f32[1, ?, 2, ?, <=2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs,
ParseShape("f32[?, 1, ?, 2, ?, <=2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("pred[?, ?, 2, 2, <=2, <=2, ?]"));
Compare(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
{});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedConcatenate) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand1,
ParseShape("f32[3, ?, 2, ?, <=2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape operand2,
ParseShape("f32[?, 4, ?, 2, ?, <=2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape operand3,
ParseShape("f32[?, ?, 2, 2, <=2, <=2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("f32[3, 4, ?, 2, <=2, <=2, ?]"));
ConcatInDim(&b,
{Parameter(&b, 0, operand1, "operand1"),
Parameter(&b, 1, operand2, "operand2"),
Parameter(&b, 2, operand3, "operand3")},
2);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedConvert) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("s32[?]"));
ConvertElementType(Parameter(&b, 0, operand, "operand"), PrimitiveType::S32);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedConvolution) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[?, 2, ?, 128]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[2, 2, <=128, 8]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 1, ?, 8]"));
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(0);
dnums.add_input_spatial_dimensions(1);
dnums.add_output_spatial_dimensions(1);
dnums.add_input_spatial_dimensions(2);
dnums.add_output_spatial_dimensions(2);
dnums.set_input_feature_dimension(3);
dnums.set_output_feature_dimension(3);
dnums.add_kernel_spatial_dimensions(0);
dnums.add_kernel_spatial_dimensions(1);
dnums.set_kernel_input_feature_dimension(2);
dnums.set_kernel_output_feature_dimension(3);
ConvWithGeneralDimensions(Parameter(&b, 0, lhs, "lhs"),
Parameter(&b, 1, rhs, "rhs"),
{1, 1}, Padding::kValid, dnums);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedDot) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Dot(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedDotGeneral) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[?, <=3, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[2, 4, 5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, <=3, 5]"));
DotDimensionNumbers dnums;
dnums.add_lhs_contracting_dimensions(2);
dnums.add_rhs_contracting_dimensions(1);
dnums.add_lhs_batch_dimensions(0);
dnums.add_rhs_batch_dimensions(0);
DotGeneral(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"), dnums);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedDynamicSlice) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape start_indices, ParseShape("s32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[2, 2]"));
DynamicSlice(Parameter(&b, 0, operand, "operand"),
{
Parameter(&b, 1, start_indices, "start_indices0"),
Parameter(&b, 2, start_indices, "start_indices1"),
},
{2, 2});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedDynamicUpdateSlice) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape update, ParseShape("f32[?, 5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape start_indices, ParseShape("s32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
DynamicUpdateSlice(Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, update, "update"),
{Parameter(&b, 2, start_indices, "start_indices0"),
Parameter(&b, 3, start_indices, "start_indices1")});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedFftWithFFT) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("c64[2, <=5, ?]"));
const std::vector<int64_t> fft_length = {5, 10};
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("c64[2, <=5, ?]"));
Fft(Parameter(&b, 0, operand, "operand"), FftType::FFT,
fft_length);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedFftWithIFFT) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("c64[2, <=5, ?]"));
const std::vector<int64_t> fft_length = {5, 10};
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("c64[2, <=5, ?]"));
Fft(Parameter(&b, 0, operand, "operand"), FftType::IFFT,
fft_length);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedFftWithRFFT) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f64[2, <=5, ?]"));
const std::vector<int64_t> fft_length = {5, 10};
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("c128[2, <=5, 6]"));
Fft(Parameter(&b, 0, operand, "operand"), FftType::RFFT,
fft_length);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedFftWithIRFFT) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("c128[2, <=5, ?]"));
const std::vector<int64_t> fft_length = {5, 10};
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f64[2, <=5, 10]"));
Fft(Parameter(&b, 0, operand, "operand"), FftType::IRFFT,
fft_length);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedGather) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[3, 4, 2]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape start_indices,
ParseShape("s32[?, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, ?, 2, 2]"));
GatherDimensionNumbers dimension_numbers;
dimension_numbers.add_offset_dims(2);
dimension_numbers.add_offset_dims(3);
dimension_numbers.add_collapsed_slice_dims(0);
dimension_numbers.add_start_index_map(1);
dimension_numbers.add_start_index_map(0);
dimension_numbers.set_index_vector_dim(2);
Gather(Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, start_indices, "start_indices"), dimension_numbers,
{1, 2, 2});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedGetTupleElement) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
GetTupleElement(Tuple(&b, {Parameter(&b, 0, operand, "operand")}), 0);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<xla::HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedInfeed) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Infeed(&b, shape, "");
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<xla::HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedInfeedWithToken) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("(f32[?, 10], token[])"));
InfeedWithToken(CreateToken(&b), shape, "");
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<xla::HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedMap) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand0, ParseShape("f32[2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape operand1, ParseShape("f32[?, 3, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[2, ?, ?]"));
XlaComputation computation;
{
const std::unique_ptr<XlaBuilder> sub_builder = b.CreateSubBuilder("add");
Add(Parameter(sub_builder.get(), 0, ShapeUtil::MakeScalarShape(F32),
"arg0"),
Parameter(sub_builder.get(), 1, ShapeUtil::MakeScalarShape(F32),
"arg1"));
TF_ASSERT_OK_AND_ASSIGN(computation, sub_builder->Build());
}
Map(&b,
{Parameter(&b, 0, operand0, "operand0"),
Parameter(&b, 1, operand1, "operand1")},
computation, {0, 1, 2},
{});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedOptimizationBarrier) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
OptimizationBarrier(Parameter(&b, 0, operand, "operand"));
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedOr) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs,
ParseShape("s32[1, ?, 2, ?, <=2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs,
ParseShape("s32[?, 1, ?, 2, ?, <=2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("s32[?, ?, 2, 2, <=2, <=2, ?]"));
Or(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
empty_array);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedOutfeed) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape shape_with_layout,
ParseShape("f32[?, 10]"));
Outfeed(Parameter(&b, 0, operand, "operand"),
shape_with_layout, "");
EXPECT_IS_OK(BuildHloModule(b));
}
TEST(XlaBuilderTest, UnboundedOutfeedWithToken) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape shape_with_layout,
ParseShape("f32[?, 10]"));
OutfeedWithToken(Parameter(&b, 0, operand, "operand"),
CreateToken(&b),
shape_with_layout,
"");
EXPECT_IS_OK(BuildHloModule(b));
}
TEST(XlaBuilderTest, UnboundedPad) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 21]"));
PaddingConfig padding_config;
for (int i = 0; i < 2; i++) {
auto dimension = padding_config.add_dimensions();
dimension->set_edge_padding_low(1);
dimension->set_edge_padding_high(1);
dimension->set_interior_padding(1);
}
Pad(Parameter(&b, 0, operand, "operand"),
ConstantR0<float>(&b, 0), padding_config);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedRecv) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[?, 10]"));
ChannelHandle handle;
handle.set_handle(1);
handle.set_type(ChannelHandle::DEVICE_TO_DEVICE);
Recv(&b, shape, handle);
EXPECT_IS_OK(BuildHloModule(b));
}
TEST(XlaBuilderTest, UnboundedRecvFromHost) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[?, 10]"));
ChannelHandle handle;
handle.set_handle(1);
handle.set_type(ChannelHandle::HOST_TO_DEVICE);
RecvFromHost(CreateToken(&b), shape, handle);
EXPECT_IS_OK(BuildHloModule(b));
}
TEST(XlaBuilderTest, UnboundedRecvWithToken) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[?, 10]"));
ChannelHandle handle;
handle.set_handle(1);
handle.set_type(ChannelHandle::DEVICE_TO_DEVICE);
RecvWithToken(CreateToken(&b), shape, handle);
EXPECT_IS_OK(BuildHloModule(b));
}
TEST(XlaBuilderTest, UnboundedReduce) {
XlaBuilder b(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {7}, {false});
const Shape expected = ShapeUtil::MakeTupleShape({shape, shape, shape});
TF_ASSERT_OK_AND_ASSIGN(const Shape input0, ParseShape("f32[7, 5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape input1, ParseShape("f32[?, 5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape input2, ParseShape("f32[7, ?]"));
const Shape scalar_f32 = ShapeUtil::MakeShape(F32, {});
const XlaOp init = Parameter(&b, 3, scalar_f32, "init");
XlaBuilder bsum(TestName());
std::vector<XlaOp> output_operands = {
Add(Parameter(&bsum, 0, scalar_f32, "arg0"),
Parameter(&bsum, 1, scalar_f32, "arg1")),
Add(Parameter(&bsum, 2, scalar_f32, "arg2"),
Parameter(&bsum, 3, scalar_f32, "arg3")),
Add(Parameter(&bsum, 4, scalar_f32, "arg4"),
Parameter(&bsum, 5, scalar_f32, "arg5"))};
Tuple(&bsum, absl::MakeSpan(output_operands));
TF_ASSERT_OK_AND_ASSIGN(const XlaComputation sum, bsum.Build());
Reduce(
&b,
{Parameter(&b, 0, input0, "input0"), Parameter(&b, 1, input1, "input1"),
Parameter(&b, 2, input2, "input2")},
{init, init, init}, sum, {1});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedReducePrecision) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
ReducePrecision(Parameter(&b, 0, operand, "operand"), 2,
2);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedReduceScatter) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
XlaComputation computation;
{
const std::unique_ptr<XlaBuilder> sub_builder = b.CreateSubBuilder("add");
Add(Parameter(sub_builder.get(), 0, operand, "arg0"),
Parameter(sub_builder.get(), 1, operand, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(computation, sub_builder->Build());
}
ReplicaGroup replica_group;
replica_group.add_replica_ids(0);
replica_group.add_replica_ids(1);
ReduceScatter(
Parameter(&b, 0, operand, "operand"),
computation,
0,
2,
{replica_group});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedReduceWindow) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape input, ParseShape("f32[?, 4, 8]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 3, 5]"));
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, ShapeUtil::MakeShape(F32, {}), "x"),
Parameter(&bsum, 1, ShapeUtil::MakeShape(F32, {}), "y"));
TF_ASSERT_OK_AND_ASSIGN(const XlaComputation sum, bsum.Build());
ReduceWindow(Parameter(&b, 0, input, "input"), ConstantR0<float>(&b, 0.f),
sum,
{1, 2, 4},
{1, 1, 1}, Padding::kValid);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedReshape) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[2,3]"));
Reshape(Parameter(&b, 0, operand, "operand"), {0},
{2, 3});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedReshapeUnsupportedOutputShape) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[6]"));
Reshape(Parameter(&b, 0, operand, "operand"), {0},
{Shape::kUnboundedSize, Shape::kUnboundedSize});
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_,
HasSubstr(
"Reshaping with unbounded result shape is not supported.")));
}
TEST(XlaBuilderTest, UnboundedReshapeUnsupportedInferredShape) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?]"));
Reshape(operand, Parameter(&b, 0, operand, "operand"));
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_,
HasSubstr(
"Reshaping with unbounded result shape is not supported.")));
}
TEST(XlaBuilderTest, UnboundedReverse) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Rev(Parameter(&b, 0, operand, "operand"), {0, 1});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedRngBitGenerator) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape initial_state, ParseShape("u32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("u32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("(u32[?, 10], u32[?, 10])"));
RngBitGenerator(RandomAlgorithm::RNG_DEFAULT,
Parameter(&b, 0, initial_state, "initial_state"), shape);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedRngNormal) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
RngNormal(Parameter(&b, 0, ShapeUtil::MakeScalarShape(F32), "mu"),
Parameter(&b, 1, ShapeUtil::MakeScalarShape(F32), "sigma"), shape);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedRngUniform) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
RngUniform(Parameter(&b, 0, ShapeUtil::MakeScalarShape(F32), "a"),
Parameter(&b, 1, ShapeUtil::MakeScalarShape(F32), "b"), shape);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedScatter) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape input, ParseShape("f32[?, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape scatter_indices,
ParseShape("s32[?, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape updates, ParseShape("f32[?, ?, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, ?, ?]"));
XlaComputation update_computation;
{
const std::unique_ptr<XlaBuilder> sub_builder = b.CreateSubBuilder("add");
Add(Parameter(sub_builder.get(), 0, ShapeUtil::MakeScalarShape(F32),
"arg0"),
Parameter(sub_builder.get(), 1, ShapeUtil::MakeScalarShape(F32),
"arg1"));
TF_ASSERT_OK_AND_ASSIGN(update_computation, sub_builder->Build());
}
ScatterDimensionNumbers dimension_numbers;
dimension_numbers.add_update_window_dims(2);
dimension_numbers.add_update_window_dims(3);
dimension_numbers.add_inserted_window_dims(0);
dimension_numbers.add_scatter_dims_to_operand_dims(1);
dimension_numbers.add_scatter_dims_to_operand_dims(0);
dimension_numbers.set_index_vector_dim(2);
Scatter(Parameter(&b, 0, input, "input"),
Parameter(&b, 1, scatter_indices, "scatter_indices"),
Parameter(&b, 2, updates, "updates"), update_computation,
dimension_numbers, false,
false);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedSelect) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs,
ParseShape("pred[1, ?, 2, ?, <=2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs,
ParseShape("f32[?, 1, ?, 2, ?, <=2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs,
ParseShape("f32[1, ?, 2, ?, <=2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("f32[1, 1, 2, 2, <=2, <=2, ?]"));
Select(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedSelectScalarPred) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("pred[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Select(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedSelectScalarOnTrueOnFalseImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("pred[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Select(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedSelectScalarPredOnFalseImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("pred[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Select(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedSelectScalarPredOnTrueImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("pred[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Select(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest,
UnboundedSelectUnsupportedDegenerateOperandImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("pred[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[1]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[?, 10]"));
Select(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
EXPECT_THAT(BuildHloModule(b),
StatusIs(_, HasSubstr("Unimplemented implicit broadcast.")));
}
TEST(XlaBuilderTest, UnboundedSelectAndScatter) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape source, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape init_value, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
XlaComputation select;
{
const std::unique_ptr<XlaBuilder> sub_builder =
b.CreateSubBuilder("compare");
Compare(Parameter(sub_builder.get(), 0, ShapeUtil::MakeScalarShape(F32),
"arg0"),
Parameter(sub_builder.get(), 1, ShapeUtil::MakeScalarShape(F32),
"arg1"),
ComparisonDirection::kGe);
TF_ASSERT_OK_AND_ASSIGN(select, sub_builder->Build());
}
XlaComputation scatter;
{
const std::unique_ptr<XlaBuilder> sub_builder = b.CreateSubBuilder("add");
Add(Parameter(sub_builder.get(), 0, ShapeUtil::MakeScalarShape(F32),
"arg0"),
Parameter(sub_builder.get(), 1, ShapeUtil::MakeScalarShape(F32),
"arg1"));
TF_ASSERT_OK_AND_ASSIGN(scatter, sub_builder->Build());
}
SelectAndScatter(Parameter(&b, 0, operand, "operand"), select,
std::array<int64_t, 2>({3, 1}),
std::array<int64_t, 2>({2, 1}),
Padding::kValid, Parameter(&b, 1, source, "source"),
Parameter(&b, 2, init_value, "init_value"), scatter);
TF_ASSERT_OK_AND_ASSIGN(auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedSend) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
ChannelHandle handle;
handle.set_handle(1);
handle.set_type(ChannelHandle::DEVICE_TO_DEVICE);
Send(Parameter(&b, 0, operand, "operand"), handle);
EXPECT_IS_OK(BuildHloModule(b));
}
TEST(XlaBuilderTest, UnboundedSendToHost) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape shape_with_layout,
ParseShape("f32[?, 10]"));
ChannelHandle handle;
handle.set_handle(1);
handle.set_type(ChannelHandle::DEVICE_TO_HOST);
SendToHost(Parameter(&b, 0, operand, "operand"),
CreateToken(&b), shape_with_layout,
handle);
EXPECT_IS_OK(BuildHloModule(b));
}
TEST(XlaBuilderTest, UnboundedSendWithToken) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
ChannelHandle handle;
handle.set_handle(1);
handle.set_type(ChannelHandle::DEVICE_TO_DEVICE);
SendWithToken(Parameter(&b, 0, operand, "operand"),
CreateToken(&b), handle);
EXPECT_IS_OK(BuildHloModule(b));
}
TEST(XlaBuilderTest, UnboundedSlice) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[1, <=3, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[1, <=2, 3]"));
Slice(Parameter(&b, 0, operand, "operand"),
{0, 1, 2},
{1, 3, 5},
{1, 1, 1});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedSort) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
XlaComputation comparator;
{
const std::unique_ptr<XlaBuilder> sub_builder =
b.CreateSubBuilder("compare");
Compare(Parameter(sub_builder.get(), 0, ShapeUtil::MakeScalarShape(F32),
"arg0"),
Parameter(sub_builder.get(), 1, ShapeUtil::MakeScalarShape(F32),
"arg1"),
ComparisonDirection::kLt);
TF_ASSERT_OK_AND_ASSIGN(comparator, sub_builder->Build());
}
Sort({Parameter(&b, 0, operand, "operand")}, comparator,
0, true);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedTranspose) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand,
ParseShape("f32[1, ?, 2, ?, <=2]{4,3,2,1,0}"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("f32[<=2, 1, ?, 2, ?]{0,2,3,4,1}"));
Transpose(Parameter(&b, 0, operand, "operand"),
{4, 0, 3, 2, 1});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedTriangularSolve) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape a_shape, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape b_shape, ParseShape("f32[10, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[10, ?]"));
TriangularSolveOptions options;
TriangularSolve(Parameter(&b, 0, a_shape, "a"),
Parameter(&b, 1, b_shape, "b"),
true, true, false,
TriangularSolveOptions::TRANSPOSE);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedTuple) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
const Shape expected = ShapeUtil::MakeTupleShape({operand});
Tuple(&b, {Parameter(&b, 0, operand, "operand")});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedWhile) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape init, ParseShape("f32[?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?]"));
XlaComputation add;
{
const std::unique_ptr<XlaBuilder> sub_builder = b.CreateSubBuilder("add");
Add(Parameter(sub_builder.get(), 0, ShapeUtil::MakeScalarShape(F32),
"arg0"),
Parameter(sub_builder.get(), 1, ShapeUtil::MakeScalarShape(F32),
"arg1"));
TF_ASSERT_OK_AND_ASSIGN(add, sub_builder->Build());
}
XlaComputation condition;
{
const std::unique_ptr<XlaBuilder> sub_builder =
b.CreateSubBuilder("compare");
Ge(ConstantR0<float>(sub_builder.get(), 10.0f),
Reduce(Parameter(sub_builder.get(), 0, init, "prev"),
ConstantR0<float>(sub_builder.get(), 0.0f), add,
{0}));
TF_ASSERT_OK_AND_ASSIGN(condition, sub_builder->Build());
}
XlaComputation body;
{
const std::unique_ptr<XlaBuilder> sub_builder = b.CreateSubBuilder("add");
Add(ConstantR1<float>(sub_builder.get(), {1.0f}),
Parameter(sub_builder.get(), 0, init, "prev"),
{0});
TF_ASSERT_OK_AND_ASSIGN(body, sub_builder->Build());
}
While(condition, body, Parameter(&b, 0, init, "init"));
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedXor) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs,
ParseShape("s32[1, ?, 2, ?, <=2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs,
ParseShape("s32[?, 1, ?, 2, ?, <=2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("s32[?, ?, 2, 2, <=2, <=2, ?]"));
Xor(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
empty_array);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
INSTANTIATE_TEST_SUITE_P(UnboundedDynamism, XlaBuilderUnboundedUnaryOpTest,
::testing::ValuesIn<UnaryOpTestCase>(
{{"f32[?]", "f32[?]", &Abs},
{"f32[?]", "f32[?]", &Cbrt},
{"f32[?]", "f32[?]", &Ceil},
{"u32[?]", "u32[?]", &Clz},
{"f32[?]", "f32[?]", &Cos},
{"f32[?]", "f32[?]", &Erf},
{"f32[?]", "f32[?]", &Exp},
{"f32[?]", "f32[?]", &Expm1},
{"f32[?]", "f32[?]", &Floor},
{"f32[?]", "f32[?]", &Imag},
{"f32[?]", "pred[?]", &IsFinite},
{"f32[?]", "f32[?]", &Log},
{"f32[?]", "f32[?]", &Log1p},
{"f32[?]", "f32[?]", &Logistic},
{"f32[?]", "f32[?]", &Neg},
{"s32[?]", "s32[?]", &Not},
{"u32[?]", "u32[?]", &PopulationCount},
{"f32[?]", "f32[?]", &Real},
{"f32[?]", "f32[?]", &Round},
{"f32[?]", "f32[?]", &RoundNearestEven},
{"f32[?]", "f32[?]", &Rsqrt},
{"f32[?]", "f32[?]", &Sign},
{"f32[?]", "f32[?]", &Sin},
{"f32[?]", "f32[?]", &Sqrt},
{"f32[?]", "f32[?]", &Tanh}}));
INSTANTIATE_TEST_SUITE_P(
UnboundedDynamism, XlaBuilderUnboundedBinaryOpTest,
::testing::ValuesIn<BinaryOpTestCase>({
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&Add},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &Add},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&Atan2},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "c64[?, ?, 2, 2, <=2, <=2, ?]",
&Complex},
{"f32[?, 10]", "f32[1]", zero_array,
"c64[?, 10]", &Complex},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&Div},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &Div},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&Max},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &Max},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&Min},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &Min},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&Mul},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &Mul},
{"f32[?, 10]", "f32[1]", zero_array,
"pred[?, 10]", &Ne},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&Pow},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &Pow},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&Rem},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &Rem},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&ShiftLeft},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &ShiftLeft},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&ShiftRightArithmetic},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &ShiftRightArithmetic},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&ShiftRightLogical},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &ShiftRightLogical},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&Sub},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &Sub},
}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/xla_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/xla_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
76cfe482-6e50-4a45-9dcc-84e407a2fbd2 | cpp | tensorflow/tensorflow | costmodel | tensorflow/core/graph/costmodel.cc | tensorflow/core/graph/costmodel_test.cc | #include "tensorflow/core/graph/costmodel.h"
#include <algorithm>
#include <vector>
#include "tensorflow/core/framework/allocation_description.pb.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_description.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace {
const Microseconds kDefaultTimeEstimate(1);
const Microseconds kMinTimeEstimate(1);
}
void CostModel::SuppressInfrequent() {
if (count_.empty()) return;
std::vector<int32> non_zero;
for (auto v : count_) {
if (v > 0) non_zero.push_back(v);
}
const size_t sz = non_zero.size();
if (sz > 0) {
std::nth_element(non_zero.begin(), non_zero.begin() + sz / 2,
non_zero.end());
int32_t median_value = non_zero[sz / 2];
min_count_ = median_value / 2;
VLOG(1) << "num non_zero vals: " << non_zero.size() << " median_value "
<< median_value;
} else {
min_count_ = 1;
}
}
void CostModel::MergeFromLocal(const Graph& g, const CostModel& cm) {
CHECK(is_global_);
CHECK(!cm.is_global());
for (const Node* n : g.nodes()) {
const int local_id = cm.Id(n);
const int global_id = Id(n);
if (local_id < 0 || global_id < 0) continue;
int num_slots = cm.slot_bytes_[local_id].size();
Ensure(global_id, num_slots);
count_[global_id] += cm.count_[local_id];
time_[global_id] += cm.time_[local_id];
if (num_slots > 0) {
if (slot_bytes_[global_id].empty()) {
slot_bytes_[global_id].resize(num_slots);
} else {
CHECK_EQ(num_slots, slot_bytes_[global_id].size());
}
for (int s = 0; s < num_slots; ++s) {
auto& current_v = slot_bytes_[global_id][s];
auto other_v = cm.slot_bytes_[local_id][s];
if (current_v < 0) {
current_v = other_v;
} else if (other_v > 0) {
current_v += other_v;
}
}
}
}
}
void CostModel::MergeFromGlobal(const CostModel& cm) {
CHECK(is_global_);
CHECK_EQ(true, cm.is_global());
const int num_nodes = cm.count_.size();
for (int i = num_nodes - 1; i >= 0; --i) {
int num_slots = cm.slot_bytes_[i].size();
Ensure(i, num_slots);
count_[i] += cm.count_[i];
time_[i] += cm.time_[i];
if (num_slots > 0) {
if (slot_bytes_[i].empty()) {
slot_bytes_[i].resize(num_slots);
} else {
CHECK_EQ(num_slots, slot_bytes_[i].size());
}
for (int s = 0; s < num_slots; ++s) {
auto& current_v = slot_bytes_[i][s];
auto other_v = cm.slot_bytes_[i][s];
if (current_v < 0) {
current_v = other_v;
} else if (other_v > 0) {
current_v += other_v;
}
}
}
}
}
void CostModel::MergeFromStats(const NodeNameToCostIdMap& map,
const StepStats& ss) {
CHECK(is_global_);
for (auto& ds : ss.dev_stats()) {
for (auto& ns : ds.node_stats()) {
NodeNameToCostIdMap::const_iterator iter = map.find(ns.node_name());
if (iter == map.end()) continue;
int32_t global_id = iter->second;
Ensure(global_id, ns.output_size());
int64_t elapsed_micros =
ns.op_end_rel_micros() - ns.op_start_rel_micros();
count_[global_id]++;
time_[global_id] += elapsed_micros;
for (auto& no : ns.output()) {
int si = no.slot();
if (static_cast<size_t>(si) >= slot_bytes_[global_id].size()) {
slot_bytes_[global_id].resize(1 + si);
}
auto& current_v = slot_bytes_[global_id][si];
auto other_v =
no.tensor_description().allocation_description().requested_bytes();
if (current_v < 0) {
current_v = other_v;
} else if (other_v > 0) {
current_v += other_v;
}
}
}
}
}
void CostModel::Ensure(int id, int num_outputs) {
if (slot_bytes_.size() <= static_cast<size_t>(id)) {
slot_bytes_.resize(id + 1);
count_.resize(id + 1);
time_.resize(id + 1);
max_mem_usage_.resize(id + 1);
max_exec_time_.resize(id + 1);
output_port_alloc_ids_.resize(id + 1);
}
if (num_outputs > 0) {
auto perslot = &slot_bytes_[id];
auto output_port_alloc_ids = &output_port_alloc_ids_[id];
auto max_mem_usage = &max_mem_usage_[id];
CHECK_LE(perslot->size(), num_outputs);
DCHECK_EQ(output_port_alloc_ids->size(), perslot->size());
DCHECK_EQ(max_mem_usage->output_port_mem.size(), perslot->size());
DCHECK_EQ(max_mem_usage->output_port_shape.size(), perslot->size());
DCHECK_EQ(max_mem_usage->output_port_type.size(), perslot->size());
perslot->resize(num_outputs, Bytes(-1));
output_port_alloc_ids->resize(num_outputs, -1);
max_mem_usage->output_port_mem.resize(num_outputs, Bytes(-1));
max_mem_usage->output_port_shape.resize(num_outputs, unknown_shape_);
max_mem_usage->output_port_type.resize(num_outputs, DT_INVALID);
}
}
void CostModel::SetNumOutputs(const Node* node, int num_outputs) {
const int id = Id(node);
if (id < 0) return;
Ensure(id, 0);
auto perslot = &slot_bytes_[id];
if (!perslot->empty()) {
CHECK_EQ(num_outputs, perslot->size())
<< "Cannot resize slot_bytes, node=" << node->name();
}
Ensure(id, num_outputs);
}
void CostModel::RecordCount(const Node* node, int count) {
const int id = Id(node);
if (id < 0) return;
CHECK_LT(id, slot_bytes_.size());
count_[id] += count;
}
int32 CostModel::TotalCount(const Node* node) const {
const int id = Id(node);
if (id < 0) return 0;
return (static_cast<size_t>(id) < slot_bytes_.size()) ? count_[id] : 0;
}
void CostModel::RecordSize(const Node* node, int slot, Bytes bytes) {
const int id = Id(node);
if (id < 0) return;
CHECK_LT(id, slot_bytes_.size());
auto perslot = &slot_bytes_[id];
CHECK_LT(slot, perslot->size());
auto v = &(*perslot)[slot];
if (*v >= 0) {
*v += bytes;
} else {
*v = bytes;
}
}
Bytes CostModel::TotalBytes(const Node* node, int slot) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= slot_bytes_.size() ||
slot_bytes_[id].size() <= static_cast<size_t>(slot)) {
return Bytes(0);
}
return slot_bytes_[id][slot];
}
Bytes CostModel::SizeEstimate(const Node* node, int slot) const {
int32_t count = TotalCount(node);
if (count < min_count_) return Bytes(0);
return TotalBytes(node, slot) / std::max(1, TotalCount(node));
}
void CostModel::RecordTime(const Node* node, Microseconds time) {
const int id = Id(node);
if (id < 0) return;
DCHECK(node->IsOp()) << node->DebugString();
Ensure(id, node->num_outputs());
time_[id] += time;
}
Microseconds CostModel::TotalTime(const Node* node) const {
DCHECK(node->IsOp()) << node->DebugString();
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= time_.size() ||
time_[id] < Microseconds(0)) {
return Microseconds(0);
}
return time_[id];
}
Microseconds CostModel::TimeEstimate(const Node* node) const {
int32_t count = TotalCount(node);
if (count <= min_count_) return kMinTimeEstimate;
return std::max(kMinTimeEstimate, TotalTime(node) / std::max(1, count));
}
void CostModel::CheckInitialized(const Graph& graph) const {
for (const Node* n : graph.op_nodes()) {
CHECK(static_cast<size_t>(n->id()) < time_.size() &&
time_[n->id()] >= Microseconds(0))
<< ": no time estimate for " << n->DebugString();
CHECK(static_cast<size_t>(n->id()) < slot_bytes_.size())
<< ": no size estimate for " << n->DebugString();
const auto& perslot = slot_bytes_[n->id()];
for (size_t i = 0; i < perslot.size(); i++) {
CHECK_GE(perslot[i], Bytes(0)) << ": no size estimate for output# " << i
<< " of " << n->DebugString();
}
}
}
void CostModel::RecordMaxMemorySize(const Node* node, int output_slot,
Bytes bytes,
const TensorShapeProto& tensor_shape,
const DataType& dtype) {
const int id = Id(node);
if (id < 0) return;
if (output_slot >= node->num_outputs()) {
LOG(ERROR) << "Unexpected output slot for node " << node->DebugString()
<< ". Got " << output_slot << " but its num_outputs is "
<< node->num_outputs();
return;
}
Ensure(id, node->num_outputs());
auto& current_max = max_mem_usage_[id].output_port_mem[output_slot];
if (bytes.value() < 0) {
bytes = MinTensorMemoryUsage(tensor_shape, dtype);
}
if (bytes.value() > current_max.value()) {
current_max = bytes.value();
max_mem_usage_[id].output_port_shape[output_slot] = tensor_shape;
max_mem_usage_[id].output_port_type[output_slot] = dtype;
}
}
Bytes CostModel::MaxMemorySize(const Node* node, int slot) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= max_mem_usage_.size() ||
max_mem_usage_[id].output_port_mem.size() <= static_cast<size_t>(slot)) {
return Bytes(0);
}
return max_mem_usage_[id].output_port_mem[slot];
}
const TensorShapeProto& CostModel::MaxMemoryShape(const Node* node,
int slot) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= max_mem_usage_.size() ||
max_mem_usage_[id].output_port_shape.size() <=
static_cast<size_t>(slot)) {
return unknown_shape_;
}
return max_mem_usage_[id].output_port_shape[slot];
}
DataType CostModel::MaxMemoryType(const Node* node, int slot) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= max_mem_usage_.size() ||
max_mem_usage_[id].output_port_type.size() <= static_cast<size_t>(slot)) {
return DT_INVALID;
}
return max_mem_usage_[id].output_port_type[slot];
}
Bytes CostModel::TempMemorySize(const Node* node) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= max_mem_usage_.size()) {
return Bytes(0);
}
return max_mem_usage_[id].temp_memory_size;
}
Bytes CostModel::PersistentMemorySize(const Node* node) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= max_mem_usage_.size()) {
return Bytes(0);
}
return max_mem_usage_[id].persistent_memory_size;
}
void CostModel::RecordMemoryStats(const Node* node,
const MemoryStats& memory_stats) {
const int id = Id(node);
if (id < 0) return;
Ensure(id, node->num_outputs());
max_mem_usage_[id].temp_memory_size = memory_stats.temp_memory_size();
max_mem_usage_[id].persistent_memory_size =
memory_stats.persistent_memory_size();
for (int64_t alloc_id : memory_stats.persistent_tensor_alloc_ids()) {
if (alloc_id > 0) {
persistent_alloc_ids_.insert(alloc_id);
}
}
}
void CostModel::RecordMaxExecutionTime(const Node* node, Microseconds time) {
const int id = Id(node);
if (id < 0) return;
Ensure(id, node->num_outputs());
max_exec_time_[id] = std::max(max_exec_time_[id], time);
}
Microseconds CostModel::MaxExecutionTime(const Node* node) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= max_exec_time_.size()) {
return Microseconds(0);
}
return max_exec_time_[id];
}
void CostModel::RecordAllocationId(const Node* node, int output_slot,
int64_t alloc_id) {
const int id = Id(node);
if (id < 0) return;
Ensure(id, node->num_outputs());
output_port_alloc_ids_[id][output_slot] = alloc_id;
}
int64_t CostModel::AllocationId(const Node* node, int slot) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= output_port_alloc_ids_.size() ||
output_port_alloc_ids_[id].size() <= static_cast<size_t>(slot)) {
return -1;
}
return output_port_alloc_ids_[id][slot];
}
bool CostModel::IsPersistentTensor(const Node* node, int64_t alloc_id) const {
if (persistent_alloc_ids_.count(alloc_id) > 0) {
return true;
}
return false;
}
Microseconds CostModel::CopyTimeEstimate(Bytes b, double network_latency_millis,
double estimated_gbps) {
int64_t copy_bytes = b.value();
const double bytes_per_usec = estimated_gbps * 1000.0 / 8;
const double min_micros = network_latency_millis * 1000.0;
return Microseconds(
static_cast<int64_t>(copy_bytes / bytes_per_usec + min_micros));
}
Microseconds CostModel::ComputationTimeEstimate(int64_t math_ops) {
return Microseconds(math_ops / 1000);
}
void CostModel::IncrementUpdateTimes() { update_times_++; }
int32 CostModel::GetUpdateTimes() const { return update_times_; }
namespace {
static void AddNodesToCostModel(const Graph& g, CostModel* cost_model) {
for (Node* n : g.nodes()) {
const int num_outputs = n->num_outputs();
cost_model->SetNumOutputs(n, num_outputs);
for (int output = 0; output < num_outputs; output++) {
cost_model->RecordSize(n, output, Bytes(1));
}
}
}
static void AssignSizes(const Graph& g, CostModel* cost_model) {
for (const Edge* e : g.edges()) {
if (e->IsControlEdge()) {
continue;
}
const Node* src = e->src();
Bytes size(1);
cost_model->RecordSize(src, e->src_output(), size);
}
}
static Microseconds TimeEstimateForNode(CostModel* cost_model, Node* n) {
CHECK(n->IsOp());
VLOG(2) << "Node " << n->id() << ": " << n->name()
<< " type_string: " << n->type_string();
if (IsConstant(n) || IsVariable(n)) {
return Microseconds(0);
}
return kDefaultTimeEstimate;
}
static void EstimateComputationCosts(const Graph& g, CostModel* cost_model) {
for (Node* n : g.nodes()) {
if (!n->IsOp()) continue;
cost_model->RecordTime(n, TimeEstimateForNode(cost_model, n));
}
}
}
void CostModel::InitFromGraph(const Graph& g) {
const int num_node_ids = g.num_node_ids();
slot_bytes_.reserve(num_node_ids);
count_.reserve(num_node_ids);
time_.reserve(num_node_ids);
max_mem_usage_.reserve(num_node_ids);
max_exec_time_.reserve(num_node_ids);
output_port_alloc_ids_.reserve(num_node_ids);
AddNodesToCostModel(g, this);
AssignSizes(g, this);
EstimateComputationCosts(g, this);
CheckInitialized(g);
}
void CostModel::AddToCostGraphDef(const Graph* graph,
CostGraphDef* cost_graph) const {
std::vector<const Edge*> inputs;
std::vector<const Edge*> control_inputs;
int offset = cost_graph->node_size();
for (const Node* n : graph->nodes()) {
CostGraphDef::Node* cnode = cost_graph->add_node();
cnode->set_name(n->name());
cnode->set_device(n->assigned_device_name());
cnode->set_id(GlobalId(n, offset));
inputs.clear();
inputs.resize(n->num_inputs(), nullptr);
control_inputs.clear();
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) {
control_inputs.push_back(e);
} else {
inputs[e->dst_input()] = e;
}
}
std::sort(control_inputs.begin(), control_inputs.end(),
[this](Edge const* a, Edge const* b) {
return Id(a->src()) < Id(b->src());
});
for (const Edge* e : inputs) {
CostGraphDef::Node::InputInfo* input_info = cnode->add_input_info();
input_info->set_preceding_node(GlobalId(e->src(), offset));
input_info->set_preceding_port(e->src_output());
}
for (int i = 0; i < n->num_outputs(); i++) {
CostGraphDef::Node::OutputInfo* output_info = cnode->add_output_info();
int64_t alloc_id = AllocationId(n, i);
int64_t alias_to_input = -1;
for (const Edge* e : inputs) {
int64_t input_alloc_id = AllocationId(e->src(), e->src_output());
if (input_alloc_id == alloc_id) {
alias_to_input = e->dst_input();
break;
}
}
output_info->set_alias_input_port(alias_to_input);
output_info->set_dtype(MaxMemoryType(n, i));
*output_info->mutable_shape() = MaxMemoryShape(n, i);
if (alias_to_input < 0 && IsPersistentTensor(n, alloc_id)) {
output_info->set_size(0);
} else {
output_info->set_size(MaxMemorySize(n, i).value());
}
}
for (const Edge* e : control_inputs) {
cnode->add_control_input(GlobalId(e->src(), offset));
}
cnode->set_temporary_memory_size(TempMemorySize(n).value());
cnode->set_persistent_memory_size(PersistentMemorySize(n).value());
cnode->set_compute_cost(MaxExecutionTime(n).value());
cnode->set_is_final(n->IsSend());
}
}
void CostModel::WriteSummaryToLog() const {
LOG(INFO) << " min_count_=" << min_count_;
for (size_t i = 0; i < count_.size(); ++i) {
LOG(INFO) << "Node " << i << " count " << count_[i] << " total time "
<< time_[i] << " avg time "
<< (time_[i] / (std::max(1, count_[i])));
}
}
Bytes CostModel::MinTensorMemoryUsage(const TensorShapeProto& tensor_shape,
const DataType& dtype) {
if (tensor_shape.unknown_rank()) {
return Bytes(-1);
}
size_t num_coefficients = 1;
for (const TensorShapeProto::Dim& dim : tensor_shape.dim()) {
num_coefficients *= std::max<size_t>(dim.size(), 1);
}
return Bytes(num_coefficients * DataTypeSize(dtype));
}
} | #include "tensorflow/core/graph/costmodel.h"
#include <memory>
#include <string>
#include <unordered_map>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/core/common_runtime/costmodel_manager.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/step_stats_collector.h"
#include "tensorflow/core/framework/allocation_description.pb.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_description.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/types.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
using ::testing::Not;
MATCHER_P(ShapeProtoEquals, other, "") {
if (arg.unknown_rank()) {
return other.unknown_rank();
}
if (arg.dim_size() != other.dim_size()) {
return false;
}
for (int i = 0; i < arg.dim_size(); ++i) {
if (arg.dim(i).size() != other.dim(i).size()) {
return false;
}
}
return true;
}
static void InitGraph(const string& s, Graph* graph) {
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(s, &graph_def)) << s;
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, graph));
}
static void InitModelFromGraph(const Graph& graph, CostModel& cm) {
for (const auto& node : graph.nodes()) {
cm.SetNumOutputs(node, node->num_outputs());
}
}
static std::unique_ptr<Graph> CreateBasicTestGraph() {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }",
graph.get());
return graph;
}
Node* FindNode(const Graph& graph, std::string name) {
for (const auto& node : graph.nodes()) {
if (node->name() == name) {
return node;
}
}
return nullptr;
}
Node* AddNode(Graph& graph, const string& name, const string& node_type,
int num_inputs) {
auto builder = NodeDefBuilder(name, node_type);
for (int i = 0; i < num_inputs; ++i) {
builder = builder.Input(strings::StrCat("node_", i), i, DT_FLOAT);
}
NodeDef node_def;
TF_CHECK_OK(builder.Finalize(&node_def));
Status s;
Node* node = graph.AddNode(node_def, &s);
TF_CHECK_OK(s);
return node;
}
static void GenerateStepStats(Graph* graph, StepStats* step_stats,
const string& device_name) {
DeviceStepStats* device_stepstats = step_stats->add_dev_stats();
device_stepstats->set_device(device_name);
for (const auto& node_def : graph->nodes()) {
NodeExecStats* node_stats = device_stepstats->add_node_stats();
node_stats->set_node_name(node_def->name());
}
}
REGISTER_OP("Input").Output("o: float").SetIsStateful();
TEST(CostModelTest, WorksWithManager) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto graph1 = std::make_unique<Graph>(OpRegistry::Global());
auto graph2 = std::make_unique<Graph>(OpRegistry::Global());
InitGraph(
"node { name: 'A1' op: 'Input'}"
"node { name: 'B1' op: 'Input'}"
"node { name: 'C1' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A1', 'B1'] }"
"node { name: 'D1' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A1', 'B1'] }",
graph1.get());
InitGraph(
"node { name: 'A2' op: 'Input'}"
"node { name: 'B2' op: 'Input'}"
"node { name: 'C2' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A2', 'B2'] }"
"node { name: 'D2' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A2', 'B2'] }",
graph2.get());
StepStats step_stats;
GenerateStepStats(graph1.get(), &step_stats, "DummyDevice1");
GenerateStepStats(graph2.get(), &step_stats, "DummyDevice2");
StepStatsCollector collector(&step_stats);
std::unordered_map<string, const Graph*> device_map;
device_map["DummyDevice1"] = graph1.get();
device_map["DummyDevice2"] = graph2.get();
CostModelManager cost_model_manager;
collector.BuildCostModel(&cost_model_manager, device_map);
CostGraphDef cost_graph_def;
TF_ASSERT_OK(
cost_model_manager.AddToCostGraphDef(graph1.get(), &cost_graph_def));
TF_ASSERT_OK(
cost_model_manager.AddToCostGraphDef(graph2.get(), &cost_graph_def));
ASSERT_EQ(cost_graph_def.node_size(), 12);
absl::flat_hash_map<int32, const CostGraphDef::Node> ids;
for (auto node : cost_graph_def.node()) {
int32_t index = node.id();
auto result = ids.insert({index, node});
EXPECT_TRUE(result.second);
}
}
TEST(CostModelTest, GlobalId) {
auto graph = CreateBasicTestGraph();
CostModel cm_local(false);
CostModel cm_global(true);
constexpr int kOffset = 7;
for (const auto& node : graph->nodes()) {
EXPECT_EQ(cm_local.GlobalId(node, kOffset), node->id() + kOffset);
EXPECT_EQ(cm_global.GlobalId(node, kOffset), node->cost_id());
}
}
TEST(CostModelTest, RecordTime) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
constexpr int kIters = 100;
constexpr int kMicrosPerIter = 1000;
for (int i = 0; i < kIters; ++i) {
for (const auto& node : graph->op_nodes()) {
cm.RecordTime(node, node->id() * Microseconds(kMicrosPerIter));
}
}
for (const auto& node : graph->op_nodes()) {
EXPECT_EQ(cm.TotalTime(node),
Microseconds(node->id() * kIters * kMicrosPerIter));
}
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.TotalTime(E), Microseconds(0));
}
TEST(CostModelTest, RecordCount) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
constexpr int kIters = 100;
constexpr int kCountPerIter = 4;
for (int i = 0; i < kIters; ++i) {
for (const auto& node : graph->op_nodes()) {
cm.RecordCount(node, node->id() * kCountPerIter);
}
}
for (const auto& node : graph->op_nodes()) {
EXPECT_EQ(cm.TotalCount(node), node->id() * kIters * kCountPerIter);
}
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.TotalCount(E), 0);
}
TEST(CostModelTest, RecordSize) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
constexpr int kIters = 100;
constexpr int kBytesPerIter = 4;
for (int i = 0; i < kIters; ++i) {
for (const auto& node : graph->op_nodes()) {
for (int slot = 0; slot < node->num_outputs(); ++slot) {
cm.RecordSize(node, slot, Bytes((node->id() + slot) * kBytesPerIter));
}
}
}
for (const auto& node : graph->op_nodes()) {
for (int slot = 0; slot < node->num_outputs(); ++slot) {
EXPECT_EQ(cm.TotalBytes(node, slot),
Bytes((node->id() + slot) * kIters * kBytesPerIter));
}
}
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.TotalBytes(E, 0), Bytes(0));
}
TEST(CostModelTest, SizeEstimate) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
Node* C = FindNode(*graph, "C");
constexpr int kBytesPerCount = 31;
constexpr int kCount = 17;
cm.RecordCount(C, kCount);
cm.RecordSize(C, 0, Bytes(kCount * kBytesPerCount));
EXPECT_EQ(cm.SizeEstimate(C, 0), Bytes(kBytesPerCount));
}
TEST(CostModelTest, TimeEstimate) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
Node* C = FindNode(*graph, "C");
constexpr int kMicrosPerCount = 31;
constexpr int kCount = 17;
cm.RecordCount(C, kCount);
cm.RecordTime(C, Microseconds(kCount * kMicrosPerCount));
EXPECT_EQ(cm.TimeEstimate(C), Microseconds(kMicrosPerCount));
}
TensorShapeProto CreateTensorShapeProto(absl::Span<const int64_t> dims) {
TensorShapeProto shape;
for (int i = 0; i < dims.size(); ++i) {
shape.add_dim()->set_size(dims[i]);
}
return shape;
}
int64_t Count(const TensorShapeProto& shape) {
int64_t count = 1;
for (int i = 0; i < shape.dim_size(); ++i) {
count *= shape.dim(i).size();
}
return count;
}
TEST(CostModelTest, RecordMaxMemorySize) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
Node* C = FindNode(*graph, "C");
InitModelFromGraph(*graph, cm);
EXPECT_EQ(cm.MaxMemorySize(C, 0), Bytes(-1));
{
const TensorShapeProto shape = CreateTensorShapeProto({2, 5, 10});
const DataType dtype = DataType::DT_FLOAT;
const Bytes bytes = Bytes(Count(shape) * sizeof(float));
cm.RecordMaxMemorySize(C, 0, bytes, shape, dtype);
EXPECT_EQ(cm.MaxMemorySize(C, 0), bytes);
EXPECT_EQ(cm.MaxMemoryType(C, 0), dtype);
EXPECT_THAT(cm.MaxMemoryShape(C, 0), ShapeProtoEquals(shape));
}
{
const TensorShapeProto shape = CreateTensorShapeProto({3, 6, 11});
const DataType dtype = DataType::DT_DOUBLE;
const Bytes bytes = Bytes(Count(shape) * sizeof(double));
cm.RecordMaxMemorySize(C, 0, bytes, shape, dtype);
EXPECT_EQ(cm.MaxMemorySize(C, 0), bytes);
EXPECT_EQ(cm.MaxMemoryType(C, 0), dtype);
EXPECT_THAT(cm.MaxMemoryShape(C, 0), ShapeProtoEquals(shape));
}
{
const TensorShapeProto shape = CreateTensorShapeProto({1, 1, 1});
const DataType dtype = DataType::DT_BFLOAT16;
const Bytes bytes = Bytes(Count(shape) * sizeof(double));
cm.RecordMaxMemorySize(C, 0, bytes, shape, dtype);
EXPECT_GT(cm.MaxMemorySize(C, 0), bytes);
EXPECT_NE(cm.MaxMemoryType(C, 0), dtype);
EXPECT_THAT(cm.MaxMemoryShape(C, 0), Not(ShapeProtoEquals(shape)));
}
{
const TensorShapeProto shape = CreateTensorShapeProto({100, 100, 100});
const DataType dtype = DataType::DT_BFLOAT16;
cm.RecordMaxMemorySize(C, 0, Bytes(-1), shape, dtype);
EXPECT_EQ(cm.MaxMemorySize(C, 0), Bytes(Count(shape) * sizeof(bfloat16)));
EXPECT_EQ(cm.MaxMemoryType(C, 0), dtype);
EXPECT_THAT(cm.MaxMemoryShape(C, 0), ShapeProtoEquals(shape));
}
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.MaxMemorySize(E, 0), Bytes(0));
EXPECT_THAT(cm.MaxMemoryType(E, 0), DataType::DT_INVALID);
TensorShapeProto unknown;
unknown.set_unknown_rank(true);
EXPECT_THAT(cm.MaxMemoryShape(E, 0), ShapeProtoEquals(unknown));
}
TEST(CostModelTest, RecordMaxExecutionTime) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
Node* C = FindNode(*graph, "C");
EXPECT_EQ(cm.MaxExecutionTime(C), Microseconds(0));
cm.RecordMaxExecutionTime(C, Microseconds(13));
EXPECT_EQ(cm.MaxExecutionTime(C), Microseconds(13));
cm.RecordMaxExecutionTime(C, Microseconds(27));
EXPECT_EQ(cm.MaxExecutionTime(C), Microseconds(27));
cm.RecordMaxExecutionTime(C, Microseconds(9));
EXPECT_EQ(cm.MaxExecutionTime(C), Microseconds(27));
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.MaxExecutionTime(E), Microseconds(0));
}
TEST(CostModelTest, RecordMemoryStats) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
Node* C = FindNode(*graph, "C");
MemoryStats stats;
stats.set_temp_memory_size(256);
stats.set_persistent_memory_size(16);
stats.add_persistent_tensor_alloc_ids(1);
stats.add_persistent_tensor_alloc_ids(3);
stats.add_persistent_tensor_alloc_ids(5);
stats.add_persistent_tensor_alloc_ids(5);
cm.RecordMemoryStats(C, stats);
EXPECT_EQ(cm.TempMemorySize(C), stats.temp_memory_size());
EXPECT_EQ(cm.PersistentMemorySize(C), stats.persistent_memory_size());
EXPECT_TRUE(cm.IsPersistentTensor(C, 1));
EXPECT_TRUE(cm.IsPersistentTensor(C, 3));
EXPECT_TRUE(cm.IsPersistentTensor(C, 5));
EXPECT_FALSE(cm.IsPersistentTensor(C, 31));
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.TempMemorySize(E), Bytes(0));
EXPECT_EQ(cm.PersistentMemorySize(E), Bytes(0));
}
TEST(CostModelTest, RecordAllocationId) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
Node* C = FindNode(*graph, "C");
cm.RecordAllocationId(C, 0, 13);
EXPECT_EQ(cm.AllocationId(C, 0), 13);
EXPECT_EQ(cm.AllocationId(C, 7), -1);
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.AllocationId(E, 0), -1);
}
TEST(CostModelTest, CopyTimeEstimate) {
int64_t bytes = 32568;
double latency_ms = 10.2;
double gbps = 2.2;
double bytes_per_usec = gbps * 1000 / 8;
double cost_usecs = (bytes / bytes_per_usec + latency_ms * 1000);
EXPECT_EQ(CostModel::CopyTimeEstimate(Bytes(bytes), latency_ms, gbps),
Microseconds(static_cast<uint64_t>(cost_usecs)));
}
TEST(CostModelTest, ComputationTimeEstimate) {
constexpr int64_t kNumMathOps = 32150;
EXPECT_EQ(CostModel::ComputationTimeEstimate(kNumMathOps),
Microseconds(kNumMathOps / 1000));
}
TEST(CostModel, UpdateTimes) {
CostModel cm(false);
EXPECT_EQ(cm.GetUpdateTimes(), 0);
constexpr int kNumUpdates = 111;
for (int i = 0; i < kNumUpdates; ++i) {
cm.IncrementUpdateTimes();
}
EXPECT_EQ(cm.GetUpdateTimes(), kNumUpdates);
}
TEST(CostModel, SuppressInfrequent) {
CostModel cm(false);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Node* A = AddNode(*graph, "A", "Mul", 2);
Node* B = AddNode(*graph, "B", "Mul", 2);
Node* C = AddNode(*graph, "B", "Mul", 2);
InitModelFromGraph(*graph, cm);
cm.RecordCount(A, 1000);
cm.RecordSize(A, 0, Bytes(8 * 1000));
cm.RecordTime(A, Microseconds(8 * 1000));
cm.RecordCount(B, 2000);
cm.RecordSize(B, 0, Bytes(2000 * 10));
cm.RecordTime(B, Microseconds(2000 * 10));
cm.RecordCount(C, 17);
cm.RecordSize(C, 0, Bytes(32 * 17));
cm.RecordTime(C, Microseconds(32 * 17));
EXPECT_EQ(cm.SizeEstimate(A, 0), Bytes(8));
EXPECT_EQ(cm.TimeEstimate(A), Microseconds(8));
EXPECT_EQ(cm.SizeEstimate(B, 0), Bytes(10));
EXPECT_EQ(cm.TimeEstimate(B), Microseconds(10));
EXPECT_EQ(cm.SizeEstimate(C, 0), Bytes(32));
EXPECT_EQ(cm.TimeEstimate(C), Microseconds(32));
cm.SuppressInfrequent();
EXPECT_EQ(cm.SizeEstimate(A, 0), Bytes(8));
EXPECT_EQ(cm.TimeEstimate(A), Microseconds(8));
EXPECT_EQ(cm.SizeEstimate(B, 0), Bytes(10));
EXPECT_EQ(cm.TimeEstimate(B), Microseconds(10));
EXPECT_EQ(cm.SizeEstimate(C, 0), Bytes(0));
EXPECT_EQ(cm.TimeEstimate(C), Microseconds(1));
}
TEST(CostModelTest, MergeFromLocal) {
CostModel cm_global(true);
CostModel cm_local(false);
auto graph = CreateBasicTestGraph();
InitModelFromGraph(*graph, cm_global);
Node* C = FindNode(*graph, "C");
Node* D = FindNode(*graph, "D");
cm_global.RecordCount(C, 23);
cm_global.RecordSize(C, 0, Bytes(23));
cm_global.RecordTime(C, Microseconds(123));
cm_global.RecordCount(D, 17);
cm_global.RecordSize(D, 0, Bytes(17));
cm_global.RecordTime(D, Microseconds(117));
Node* E = AddNode(*graph, "E", "Mul", 2);
graph->AddEdge(C, 0, E, 0);
graph->AddEdge(D, 0, E, 1);
Node* F = AddNode(*graph, "F", "Mul", 2);
graph->AddEdge(E, 0, F, 0);
graph->AddEdge(D, 0, F, 1);
InitModelFromGraph(*graph, cm_local);
cm_local.RecordCount(E, 37);
cm_local.RecordSize(E, 0, Bytes(37));
cm_local.RecordTime(E, Microseconds(137));
cm_local.RecordCount(F, 41);
cm_local.RecordSize(F, 0, Bytes(41));
cm_local.RecordTime(F, Microseconds(141));
cm_local.RecordCount(C, 1);
cm_local.RecordSize(C, 0, Bytes(1));
cm_local.RecordTime(C, Microseconds(100));
cm_global.MergeFromLocal(*graph, cm_local);
EXPECT_EQ(cm_global.TotalCount(E), cm_local.TotalCount(E));
EXPECT_EQ(cm_global.TotalBytes(E, 0), cm_local.TotalBytes(E, 0));
EXPECT_EQ(cm_global.TotalTime(E), cm_local.TotalTime(E));
EXPECT_EQ(cm_global.TotalCount(F), cm_local.TotalCount(F));
EXPECT_EQ(cm_global.TotalBytes(F, 0), cm_local.TotalBytes(F, 0));
EXPECT_EQ(cm_global.TotalTime(F), cm_local.TotalTime(F));
EXPECT_EQ(cm_global.TotalCount(C), Microseconds(24));
EXPECT_EQ(cm_global.TotalBytes(C, 0), Bytes(24));
EXPECT_EQ(cm_global.TotalTime(C), Microseconds(223));
}
TEST(CostModelTest, MergeFromGlobal) {
CostModel cm1(true);
CostModel cm2(true);
auto graph = CreateBasicTestGraph();
InitModelFromGraph(*graph, cm1);
Node* C = FindNode(*graph, "C");
Node* D = FindNode(*graph, "D");
cm1.RecordCount(C, 23);
cm1.RecordSize(C, 0, Bytes(23));
cm1.RecordTime(C, Microseconds(123));
cm1.RecordCount(D, 17);
cm1.RecordSize(D, 0, Bytes(17));
cm1.RecordTime(D, Microseconds(117));
Node* E = AddNode(*graph, "E", "Mul", 2);
graph->AddEdge(C, 0, E, 0);
graph->AddEdge(D, 0, E, 1);
Node* F = AddNode(*graph, "F", "Mul", 2);
graph->AddEdge(E, 0, F, 0);
graph->AddEdge(D, 0, F, 1);
InitModelFromGraph(*graph, cm2);
cm2.RecordCount(E, 37);
cm2.RecordSize(E, 0, Bytes(37));
cm2.RecordTime(E, Microseconds(137));
cm2.RecordCount(F, 41);
cm2.RecordSize(F, 0, Bytes(41));
cm2.RecordTime(F, Microseconds(141));
cm2.RecordCount(C, 1);
cm2.RecordSize(C, 0, Bytes(1));
cm2.RecordTime(C, Microseconds(100));
cm1.MergeFromGlobal(cm2);
EXPECT_EQ(cm1.TotalCount(E), cm2.TotalCount(E));
EXPECT_EQ(cm1.TotalBytes(E, 0), cm2.TotalBytes(E, 0));
EXPECT_EQ(cm1.TotalTime(E), cm2.TotalTime(E));
EXPECT_EQ(cm1.TotalCount(F), cm2.TotalCount(F));
EXPECT_EQ(cm1.TotalBytes(F, 0), cm2.TotalBytes(F, 0));
EXPECT_EQ(cm1.TotalTime(F), cm2.TotalTime(F));
EXPECT_EQ(cm1.TotalCount(C), Microseconds(24));
EXPECT_EQ(cm1.TotalBytes(C, 0), Bytes(24));
EXPECT_EQ(cm1.TotalTime(C), Microseconds(223));
}
NodeExecStats CreateNodeExecStats(const Node* node, int64_t time,
int64_t bytes) {
NodeExecStats stats;
stats.set_node_name(node->name());
stats.set_op_start_rel_micros(10);
stats.set_op_end_rel_micros(10 + time);
for (int i = 0; i < node->num_outputs(); ++i) {
NodeOutput* no = stats.add_output();
no->set_slot(i);
no->mutable_tensor_description()
->mutable_allocation_description()
->set_requested_bytes(bytes);
}
return stats;
}
TEST(CostModelTest, MergeFromStats) {
CostModel cm(true);
auto graph = CreateBasicTestGraph();
InitModelFromGraph(*graph, cm);
Node* C = FindNode(*graph, "C");
Node* D = FindNode(*graph, "D");
cm.RecordCount(C, 23);
cm.RecordTime(C, Microseconds(123));
cm.RecordCount(D, 17);
cm.RecordTime(D, Microseconds(117));
Node* E = AddNode(*graph, "E", "Mul", 2);
graph->AddEdge(C, 0, E, 0);
graph->AddEdge(D, 0, E, 1);
Node* F = AddNode(*graph, "F", "Mul", 2);
graph->AddEdge(E, 0, F, 0);
graph->AddEdge(D, 0, F, 1);
StepStats stats;
DeviceStepStats* dstats = stats.add_dev_stats();
*(dstats->add_node_stats()) = CreateNodeExecStats(C, 10, 10);
*(dstats->add_node_stats()) = CreateNodeExecStats(D, 10, 10);
*(dstats->add_node_stats()) = CreateNodeExecStats(E, 20, 20);
*(dstats->add_node_stats()) = CreateNodeExecStats(E, 20, 20);
*(dstats->add_node_stats()) = CreateNodeExecStats(F, 30, 30);
*(dstats->add_node_stats()) = CreateNodeExecStats(F, 30, 30);
NodeNameToCostIdMap id_map;
for (const auto& node : graph->nodes()) {
id_map.emplace(node->name(), node->cost_id());
}
cm.MergeFromStats(id_map, stats);
EXPECT_EQ(cm.TotalCount(C), 24);
EXPECT_EQ(cm.TotalTime(C), Microseconds(133));
EXPECT_EQ(cm.TotalBytes(C, 0), Bytes(10));
EXPECT_EQ(cm.TotalCount(D), 18);
EXPECT_EQ(cm.TotalTime(D), Microseconds(127));
EXPECT_EQ(cm.TotalBytes(D, 0), Bytes(10));
EXPECT_EQ(cm.TotalCount(E), 2);
EXPECT_EQ(cm.TotalTime(E), Microseconds(40));
EXPECT_EQ(cm.TotalBytes(E, 0), Bytes(40));
EXPECT_EQ(cm.TotalCount(F), 2);
EXPECT_EQ(cm.TotalTime(F), Microseconds(60));
EXPECT_EQ(cm.TotalBytes(F, 0), Bytes(60));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/costmodel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/costmodel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f470b0a5-df2e-44f5-a4bb-da606c6ec291 | cpp | tensorflow/tensorflow | dynamic_index_splitter | third_party/xla/xla/service/dynamic_index_splitter.cc | third_party/xla/xla/service/dynamic_index_splitter_test.cc | #include "xla/service/dynamic_index_splitter.h"
#include <map>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
namespace xla {
absl::StatusOr<bool> DynamicIndexSplitter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::vector<HloComputation*> computations =
module->MakeNonfusionComputations(execution_threads);
for (HloComputation* computation : computations) {
for (HloInstruction* dynamic_op : computation->MakeInstructionPostOrder()) {
switch (dynamic_op->opcode()) {
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
break;
default:
continue;
}
auto parent = dynamic_op->parent();
bool is_update = dynamic_op->opcode() == HloOpcode::kDynamicUpdateSlice;
int64_t num_indices = dynamic_op->operand(0)->shape().rank();
if (num_indices == 0) {
if (is_update) {
TF_CHECK_OK(parent->ReplaceInstruction(
dynamic_op, dynamic_op->mutable_operand(1)));
} else {
TF_CHECK_OK(parent->ReplaceInstruction(
dynamic_op, dynamic_op->mutable_operand(0)));
}
changed = true;
continue;
}
int64_t index_operand_number =
Cast<HloDynamicIndexInstruction>(dynamic_op)
->first_index_operand_number();
auto index_operand = dynamic_op->mutable_operand(index_operand_number);
if (ShapeUtil::IsScalar(index_operand->shape())) {
continue;
}
TF_RET_CHECK(index_operand->shape().rank() == 1);
auto index_element_type = index_operand->shape().element_type();
std::vector<HloInstruction*> index_array;
index_array.reserve(num_indices);
for (int64_t dim = 0; dim < num_indices; ++dim) {
auto slice = parent->AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(index_element_type, {1}), index_operand, {dim},
{dim + 1}, {1}));
auto bitcast = parent->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(index_element_type, {}), slice));
index_array.push_back(bitcast);
}
auto new_dynamic_op =
is_update
? HloInstruction::CreateDynamicUpdateSlice(
dynamic_op->shape(), dynamic_op->mutable_operand(0),
dynamic_op->mutable_operand(1), absl::MakeSpan(index_array))
: HloInstruction::CreateDynamicSlice(
dynamic_op->shape(), dynamic_op->mutable_operand(0),
absl::MakeSpan(index_array),
dynamic_op->dynamic_slice_sizes());
TF_CHECK_OK(parent->ReplaceWithNewInstruction(dynamic_op,
std::move(new_dynamic_op)));
changed = true;
}
}
return changed;
}
} | #include "xla/service/dynamic_index_splitter.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class DynamicIndexSplitterTest : public HloTestBase {};
TEST_F(DynamicIndexSplitterTest, DynamicSlice) {
const char* const kDynamicSlice = R"(
HloModule DynamicSlice_module
ENTRY entry (operand: s32[4,5,6], indices: s32[3]) -> s32[1,1,1] {
operand = s32[4,5,6] parameter(0)
indices = s32[3] parameter(1)
ROOT dynamic-slice = s32[1,1,1] dynamic-slice(operand, indices), dynamic_slice_sizes={1,1,1}
}
)";
HloModuleConfig config;
DebugOptions debug_options = config.debug_options();
debug_options.set_xla_allow_scalar_index_dynamic_ops(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kDynamicSlice, config));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
DynamicIndexSplitter().Run(module.get()));
EXPECT_TRUE(changed);
ASSERT_THAT(module->entry_computation()->root_instruction(),
op::DynamicSlice(op::Parameter(0),
op::Reshape(op::Slice(op::Parameter(1))),
op::Reshape(op::Slice(op::Parameter(1))),
op::Reshape(op::Slice(op::Parameter(1)))));
for (int i = 0; i < 3; ++i) {
const HloInstruction* slice = module->entry_computation()
->root_instruction()
->operand(i + 1)
->operand(0);
EXPECT_EQ(slice->slice_starts(0), i);
EXPECT_EQ(slice->slice_limits(0), i + 1);
}
}
TEST_F(DynamicIndexSplitterTest, DynamicUpdateSlice) {
const char* const kDynamicUpdateSlice = R"(
HloModule DynamicUpdatedSlice_module
ENTRY entry (operand: s32[4,5,6], indices: s32[3], update: s32[1,1,1]) -> s32[4,5,6] {
operand = s32[4,5,6] parameter(0)
indices = s32[3] parameter(1)
update = s32[1,1,1] parameter(2)
ROOT dynamic-update-slice = s32[4,5,6] dynamic-update-slice(operand, update, indices)
}
)";
HloModuleConfig config;
DebugOptions debug_options = config.debug_options();
debug_options.set_xla_allow_scalar_index_dynamic_ops(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(kDynamicUpdateSlice, config));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
DynamicIndexSplitter().Run(module.get()));
EXPECT_TRUE(changed);
ASSERT_THAT(module->entry_computation()->root_instruction(),
op::DynamicUpdateSlice(op::Parameter(0), op::Parameter(2),
op::Reshape(op::Slice(op::Parameter(1))),
op::Reshape(op::Slice(op::Parameter(1))),
op::Reshape(op::Slice(op::Parameter(1)))));
for (int i = 0; i < 3; ++i) {
const HloInstruction* slice = module->entry_computation()
->root_instruction()
->operand(i + 2)
->operand(0);
EXPECT_EQ(slice->slice_starts(0), i);
EXPECT_EQ(slice->slice_limits(0), i + 1);
}
}
TEST_F(DynamicIndexSplitterTest, AlreadyScalar) {
const char* const kDynamicSlice = R"(
HloModule DynamicSlice_module
ENTRY entry (operand: s32[4,5,6], index.0: s32[], index.1: s32[], index.2: s32[]) -> s32[1,1,1] {
operand = s32[4,5,6] parameter(0)
index.0 = s32[] parameter(1)
index.1 = s32[] parameter(2)
index.2 = s32[] parameter(3)
ROOT dynamic-slice = s32[1,1,1] dynamic-slice(operand, index.0, index.1, index.2), dynamic_slice_sizes={1,1,1}
}
)";
HloModuleConfig config;
DebugOptions debug_options = config.debug_options();
debug_options.set_xla_allow_scalar_index_dynamic_ops(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kDynamicSlice, config));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
DynamicIndexSplitter().Run(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::DynamicSlice(op::Parameter(0), op::Parameter(1),
op::Parameter(2), op::Parameter(3)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_index_splitter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_index_splitter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8866eb13-7cf5-4174-912e-2006853b5606 | cpp | tensorflow/tensorflow | function | tensorflow/compiler/mlir/tfrt/function/function.cc | tensorflow/core/tfrt/mlrt/bytecode/function_test.cc | #include "tensorflow/compiler/mlir/tfrt/function/function.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/passes.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/tfrt_pipeline_options.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tfrt/bef/bef_buffer.h"
#include "tfrt/bef_converter/mlir_to_bef.h"
namespace tensorflow {
Status CompileTFMLIRToBEF(const TfrtFunctionCompileOptions& options,
mlir::ModuleOp module, tfrt::BefBuffer* bef_buffer) {
mlir::OpPrintingFlags print_flags;
print_flags.elideLargeElementsAttrs();
if (VLOG_IS_ON(1)) {
VLOG(1) << "Input TF Executor dialect:";
DumpMlirOpToFile("tf_to_tfrt_tf_executor_dialect", module);
}
mlir::StatusScopedDiagnosticHandler diag_handler(module.getContext());
mlir::PassManager pm(module.getContext());
tensorflow::applyTensorflowAndCLOptions(pm);
tensorflow::TfrtPipelineOptions pass_options;
if (!options.default_device.empty()) {
pass_options.default_device = options.default_device;
}
if (!options.force_data_format.empty()) {
pass_options.force_data_format = options.force_data_format;
}
if (absl::StrContains(pass_options.default_device, "CPU")) {
pass_options.skip_fold_transpose_in_ops = true;
}
pass_options.enable_optimizer = options.enable_optimizer;
pass_options.target_tpurt = false;
pass_options.tpu_use_core_selector = options.tpu_use_core_selector;
pass_options.tpu_use_bundled_transfer = options.tpu_use_bundled_transfer;
pass_options.tpu_lower_to_fallback = options.tpu_lower_to_fallback;
pass_options.tpu_fuse_ops = options.tpu_fuse_ops;
pass_options.tpu_transfer_result_to_host =
options.tpu_transfer_result_to_host;
Status status = tensorflow::CreateTfExecutorToTfrtPipeline(pm, pass_options);
if (!status.ok()) {
return diag_handler.Combine(status);
}
if (mlir::failed(pm.run(module)))
return diag_handler.Combine(tensorflow::errors::Internal(
"failed to lower TF Dialect to CoreRT dialect."));
if (VLOG_IS_ON(1)) {
VLOG(1) << "TFRT dialect: ";
DumpMlirOpToFile("tf_to_tfrt_tfrt_dialect", module);
}
*bef_buffer =
tfrt::ConvertMLIRToBEF(module, true);
if (bef_buffer->empty())
return diag_handler.Combine(
tensorflow::errors::Internal("failed to convert MLIR to BEF."));
return absl::OkStatus();
}
} | #include "tensorflow/core/tfrt/mlrt/bytecode/function.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/kernel.h"
namespace mlrt {
namespace bc {
namespace {
TEST(FunctionTest, Function) {
Buffer buffer;
Allocator allocator(&buffer);
Function::Constructor ctor = New<Function>(&allocator);
ctor.construct_name("main");
ctor.set_num_regs(10);
ctor.construct_input_regs(2).Assign({0, 1});
ctor.construct_output_regs(1).Assign({9});
ctor.construct_output_last_uses(1).Assign({true});
ctor.construct_kernels(3);
Function function(buffer.Get(ctor.address()));
EXPECT_EQ(function.name().Get(), "main");
EXPECT_EQ(function.num_regs(), 10);
EXPECT_THAT(function.input_regs(), ::testing::ElementsAreArray({0, 1}));
EXPECT_THAT(function.output_regs(), ::testing::ElementsAreArray({9}));
EXPECT_THAT(function.output_last_uses(), ::testing::ElementsAreArray({true}));
Vector<Kernel> kernels = function.kernels();
EXPECT_EQ(kernels.size(), 3);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/function/function.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/bytecode/function_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b08dd595-724f-4f37-ad2d-7ae6ec6c1b89 | cpp | tensorflow/tensorflow | name_uniquer | third_party/xla/xla/service/name_uniquer.cc | third_party/xla/xla/service/name_uniquer_test.cc | #include "xla/service/name_uniquer.h"
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "xla/primitive_util.h"
#include "xla/types.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
bool IsAllowed(char character) {
auto c = static_cast<unsigned char>(character);
return (absl::ascii_isalnum(c) != 0) || c == '_' || c == '.' || c == '-';
}
}
NameUniquer::NameUniquer(const std::string& separator) {
CHECK(absl::c_all_of(separator, IsAllowed))
<< "separator should comprises allowed characters only";
separator_ = separator;
}
std::string NameUniquer::GetSanitizedName(absl::string_view name) {
if (name.empty()) {
return "";
}
std::string result(name);
char c = static_cast<unsigned char>(result[0]);
if (!absl::ascii_isalpha(c) && c != '_') {
result[0] = '_';
}
for (int i = 1, iter_limit = result.length(); i < iter_limit; i++) {
if (!IsAllowed(result[i])) {
result[i] = '_';
}
}
if (primitive_util::IsPrimitiveTypeName(result) && result != "tuple") {
result += "_";
}
if (absl::StartsWith(result, "__") && !absl::StartsWith(result, "__xla_")) {
result[0] = 'a';
}
return result;
}
std::string NameUniquer::GetUniqueName(absl::string_view prefix) {
std::string root =
GetSanitizedName(prefix.empty() ? "name" : std::string(prefix));
bool has_numeric_suffix = false;
int64_t numeric_suffix = 0;
size_t separator_index = root.rfind(separator_);
if (separator_index != std::string::npos && (separator_index > 0) &&
(separator_index < root.size() - separator_.size())) {
std::string after_suffix = root.substr(separator_index + separator_.size());
if (absl::SimpleAtoi(after_suffix, &numeric_suffix)) {
has_numeric_suffix = true;
root = root.substr(0, separator_index);
} else {
numeric_suffix = 0;
}
}
SequentialIdGenerator& id_generator = generated_names_[root];
numeric_suffix = id_generator.RegisterId(numeric_suffix);
if (numeric_suffix == 0) {
return has_numeric_suffix ? absl::StrCat(root, separator_, 0) : root;
}
absl::StrAppend(&root, separator_, numeric_suffix);
return root;
}
} | #include "xla/service/name_uniquer.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using NameUniquerTest = ::testing::Test;
TEST_F(NameUniquerTest, SimpleUniquer) {
NameUniquer uniquer;
EXPECT_EQ("foo", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo__1", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo__2", uniquer.GetUniqueName("foo"));
EXPECT_EQ("bar", uniquer.GetUniqueName("bar"));
EXPECT_EQ("foo__3", uniquer.GetUniqueName("foo"));
EXPECT_EQ("bar__1", uniquer.GetUniqueName("bar"));
EXPECT_EQ("qux", uniquer.GetUniqueName("qux"));
}
TEST_F(NameUniquerTest, DifferentSeparator) {
NameUniquer uniquer(".");
EXPECT_EQ("foo", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo.1", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo.2", uniquer.GetUniqueName("foo"));
EXPECT_EQ("bar", uniquer.GetUniqueName("bar"));
EXPECT_EQ("foo.3", uniquer.GetUniqueName("foo"));
EXPECT_EQ("bar.1", uniquer.GetUniqueName("bar"));
}
TEST_F(NameUniquerTest, NumericSuffixes) {
NameUniquer uniquer(".");
EXPECT_EQ("foo", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo.54", uniquer.GetUniqueName("foo.54"));
EXPECT_EQ("foo.1", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo.55.1", uniquer.GetUniqueName("foo.55.1"));
EXPECT_EQ("foo.55.0", uniquer.GetUniqueName("foo.55.1"));
EXPECT_EQ("bar.1000", uniquer.GetUniqueName("bar.1000"));
EXPECT_EQ("bar.2000", uniquer.GetUniqueName("bar.2000"));
EXPECT_EQ("bar.-2000", uniquer.GetUniqueName("bar.-2000"));
EXPECT_EQ("bar.1", uniquer.GetUniqueName("bar.1"));
}
TEST_F(NameUniquerTest, PrefixHasSuffix) {
NameUniquer uniquer(".");
EXPECT_EQ("foo.11.0", uniquer.GetUniqueName("foo.11.0"));
EXPECT_EQ("foo.11", uniquer.GetUniqueName("foo.11"));
}
TEST_F(NameUniquerTest, Sanitize) {
NameUniquer uniquer("_");
EXPECT_EQ("foo", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo_1", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo.54", uniquer.GetUniqueName("foo.54"));
EXPECT_EQ("foo_54", uniquer.GetUniqueName("foo_54"));
EXPECT_EQ("foo_54.1", uniquer.GetUniqueName("foo_54.1"));
EXPECT_EQ("foo_2", uniquer.GetUniqueName("foo"));
EXPECT_EQ("bar_1000", uniquer.GetUniqueName("bar<1000"));
EXPECT_EQ("bar_2000", uniquer.GetUniqueName("bar<2000"));
EXPECT_EQ("bar_1", uniquer.GetUniqueName("bar_1"));
EXPECT_EQ("_10", uniquer.GetUniqueName(
".10"));
EXPECT_EQ("_10_1", uniquer.GetUniqueName(".10"));
EXPECT_EQ("_10_2", uniquer.GetUniqueName("_10"));
EXPECT_EQ("foobar_", uniquer.GetUniqueName("foobar_"));
EXPECT_EQ("foobar__1", uniquer.GetUniqueName("foobar_"));
}
TEST_F(NameUniquerTest, KeepNamesInRandomOrder) {
NameUniquer uniquer(".");
EXPECT_EQ("foo.11", uniquer.GetUniqueName("foo.11"));
EXPECT_EQ("foo.10", uniquer.GetUniqueName("foo.10"));
EXPECT_EQ("foo.1", uniquer.GetUniqueName("foo.1"));
EXPECT_EQ("foo.12", uniquer.GetUniqueName("foo.12"));
EXPECT_EQ("foo.3", uniquer.GetUniqueName("foo.3"));
}
TEST_F(NameUniquerTest, AvoidKeywords) {
NameUniquer uniquer(".");
EXPECT_EQ("f32_", uniquer.GetUniqueName("f32"));
EXPECT_EQ("s64_", uniquer.GetUniqueName("s64"));
EXPECT_EQ("pred_", uniquer.GetUniqueName("pred"));
EXPECT_NE(uniquer.GetUniqueName("__xla_").find("__xla_"), std::string::npos);
EXPECT_EQ(uniquer.GetUniqueName("__abx").find("__"), std::string::npos);
EXPECT_EQ("tuple", uniquer.GetUniqueName("tuple"));
EXPECT_EQ("F32", uniquer.GetUniqueName("F32"));
EXPECT_EQ("S32", uniquer.GetUniqueName("S32"));
EXPECT_EQ("Pred", uniquer.GetUniqueName("Pred"));
}
TEST_F(NameUniquerTest, DetectSeparator) {
NameUniquer uniquer;
EXPECT_EQ(uniquer.GetUniqueName("a__1"), "a__1");
EXPECT_EQ(uniquer.GetUniqueName("a"), "a");
EXPECT_EQ(uniquer.GetUniqueName("a"), "a__2");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/name_uniquer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/name_uniquer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
526cb160-0996-4b79-93f6-c2dc2427d0a2 | cpp | tensorflow/tensorflow | reduce_scatter_combiner | third_party/xla/xla/service/reduce_scatter_combiner.cc | third_party/xla/xla/service/reduce_scatter_combiner_test.cc | #include "xla/service/reduce_scatter_combiner.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_reachability.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/all_reduce_key.h"
#include "xla/service/collective_combiner_utils.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_domain_map.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
int64_t FindMostFrequentScatterDim(
absl::Span<HloInstruction* const> to_combine) {
assert(!to_combine.empty());
int64_t min_rank = std::numeric_limits<int64_t>::max();
std::vector<int64_t> frequency;
for (const HloInstruction* it : to_combine) {
int64_t dim = Cast<HloReduceScatterInstruction>(it)->scatter_dimension();
frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())),
0);
frequency[dim]++;
min_rank = std::min(min_rank, it->shape().rank());
}
int64_t most_frequent_dim = std::distance(
frequency.begin(), std::max_element(frequency.begin(), frequency.end()));
return most_frequent_dim < min_rank ? most_frequent_dim : 0;
}
using ReduceScatterKey =
std::tuple<AllReduceKey, int64_t>;
absl::Status CombineReduceScatters(
absl::Span<HloInstruction* const> to_combine) {
if (to_combine.size() < 2) {
return absl::OkStatus();
}
VLOG(1) << "Combined " << to_combine.size() << " reduce-scatter ops";
HloComputation& computation = *to_combine.back()->parent();
HloComputation* reduction = to_combine[0]->to_apply();
std::optional<ReductionKind> first_reduction_kind =
MatchReductionComputation(reduction);
TF_RET_CHECK(first_reduction_kind);
std::vector<HloInstruction*> operands;
std::vector<std::optional<std::vector<int64_t>>> operand_permutations;
std::vector<Shape> output_shapes;
int64_t most_frequent_dim = FindMostFrequentScatterDim(to_combine);
VLOG(1) << "Combining set";
for (HloInstruction* hlo : to_combine) {
VLOG(1) << "Set element: " << hlo->ToString();
TF_RET_CHECK(hlo->opcode() == HloOpcode::kReduceScatter);
const auto* rs = Cast<HloReduceScatterInstruction>(hlo);
TF_RET_CHECK(hlo->operands().size() == 1);
std::optional<ReductionKind> reduction_kind =
MatchReductionComputation(hlo->to_apply());
TF_RET_CHECK(reduction_kind);
TF_RET_CHECK(*reduction_kind == *first_reduction_kind);
TF_RET_CHECK(hlo->shape().IsArray());
HloInstruction* operand = hlo->operands().front();
operands.push_back(operand);
operand_permutations.emplace_back();
output_shapes.push_back(hlo->shape());
if (rs->scatter_dimension() != most_frequent_dim) {
const Shape& operand_shape = operand->shape();
auto& perm = operand_permutations.back();
perm = std::vector<int64_t>(operand_shape.rank());
std::iota(perm->begin(), perm->end(), 0);
std::swap((*perm)[most_frequent_dim], (*perm)[rs->scatter_dimension()]);
operands.back() =
computation.AddInstruction(HloInstruction::CreateBitcast(
ShapeUtil::PermuteDimensions(*perm, operand_shape), operand));
output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape());
}
}
HloInstruction* combined;
TF_RET_CHECK(operands.size() >= 2);
combined = computation.AddInstruction(HloInstruction::CreateReduceScatter(
ShapeUtil::MakeTupleShape(output_shapes), operands, reduction,
to_combine.front()->device_list(),
false, to_combine.front()->channel_id(),
Cast<HloReduceScatterInstruction>(to_combine.front())
->use_global_device_ids(),
most_frequent_dim));
if (to_combine.front()->has_sharding()) {
combined->set_sharding(to_combine.front()->sharding());
}
VLOG(1) << "Replacing with : " << combined->ToString();
for (int64_t i = 0; i < to_combine.size(); ++i) {
HloInstruction* replacement = computation.AddInstruction(
HloInstruction::CreateGetTupleElement(combined, i));
if (operand_permutations[i]) {
replacement = computation.AddInstruction(HloInstruction::CreateBitcast(
ShapeUtil::PermuteDimensions(*operand_permutations[i],
replacement->shape()),
replacement));
}
TF_RETURN_IF_ERROR(
computation.ReplaceInstruction(to_combine[i], replacement));
}
return absl::OkStatus();
}
}
ReduceScatterCombiner::ReduceScatterCombiner(int64_t combine_threshold_in_bytes,
int64_t combine_threshold_count,
bool combine_by_dim)
: combine_threshold_in_bytes_(combine_threshold_in_bytes),
combine_threshold_count_(combine_threshold_count),
combine_by_dim_(combine_by_dim) {}
absl::StatusOr<bool> ReduceScatterCombiner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(1) << "Running ReduceScatterCombiner with threshold of "
<< combine_threshold_in_bytes_ << " bytes";
if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) {
VLOG(1) << "Skip ReduceScatterCombiner because the threshold is zero";
return false;
}
if (hlo_query::ContainsLayoutConstrainedCollective(
*module, HloOpcode::kReduceScatter)) {
VLOG(1) << "Skip ReduceScatterCombiner because the module contains "
"reduce-scatter with constrained layouts";
return false;
}
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, ""));
auto key_fn = [&domain_map, this](const HloInstruction* instruction)
-> std::optional<ReduceScatterKey> {
auto* rs = DynCast<HloReduceScatterInstruction>(instruction);
std::optional<AllReduceKey> key =
GetAllReduceKey(instruction, domain_map.get());
if (!rs || !key) {
return std::nullopt;
}
if (!MatchReductionComputation(rs->to_apply())) {
return std::nullopt;
}
int64_t rs_dim_key = this->combine_by_dim_ ? rs->scatter_dimension() : -1;
return ReduceScatterKey{std::move(*key), rs_dim_key};
};
TF_ASSIGN_OR_RETURN(
bool computation_changed,
CombineInstructionsByKey<ReduceScatterKey>(
computation, key_fn, &CombineReduceScatters,
combine_threshold_in_bytes_, combine_threshold_count_));
changed |= computation_changed;
}
return changed;
}
} | #include "xla/service/reduce_scatter_combiner.h"
#include <cstddef>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
constexpr int64_t kMaxCombineCount = 256;
constexpr int64_t kMaxByteCount = 10 * 1024 * 1024;
class ReduceScatterCombinerTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, bool expect_change,
int64_t byte_threshold = kMaxByteCount,
int64_t count_threshold = kMaxCombineCount, bool combine_by_dim = true) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module));
VLOG(1) << "Before running ReduceScatterCombiner: "
<< ReduceScatterCount(module.get()) << " reduce-scatter ops";
auto changed =
ReduceScatterCombiner(byte_threshold, count_threshold, combine_by_dim)
.Run(module.get());
if (!changed.ok()) {
return changed.status();
}
VLOG(1) << "After running ReduceScatterCombiner: "
<< ReduceScatterCount(module.get()) << " reduce-scatter ops";
EXPECT_EQ(changed.value(), expect_change);
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
size_t ReduceScatterCount(HloModule *module) {
int64_t sum = 0;
for (auto comp : module->computations()) {
sum += absl::c_count_if(comp->instructions(),
HloPredicateIsOp<HloOpcode::kReduceScatter>);
}
return sum;
}
};
TEST_F(ReduceScatterCombinerTest, Simple) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs1 = f32[4] reduce-scatter(p1), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
ROOT t = (f32[4], f32[4]) tuple(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_EQ(ReduceScatterCount(module.get()), 1);
}
TEST_F(ReduceScatterCombinerTest, SimpleMultipleGroups) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8, 8] parameter(0)
p1 = f32[8, 8] parameter(1)
rs0 = f32[4, 8] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs1 = f32[4, 8] reduce-scatter(p1), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs2 = f32[8, 4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={1},
to_apply=sum
rs3 = f32[8, 4] reduce-scatter(p1), replica_groups={{0,1}}, dimensions={1},
to_apply=sum
ROOT t = (f32[4, 8], f32[4, 8], f32[8, 4], f32[8, 4])
tuple(rs0, rs1, rs2, rs3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_EQ(ReduceScatterCount(module.get()), 2);
}
TEST_F(ReduceScatterCombinerTest, DifferentDimensions) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8, 8] parameter(0)
p1 = f32[8, 8] parameter(1)
rs0 = f32[4, 8] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs1 = f32[4, 8] reduce-scatter(p1), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs2 = f32[8, 4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={1},
to_apply=sum
rs3 = f32[8, 4] reduce-scatter(p1), replica_groups={{0,1}}, dimensions={1},
to_apply=sum
ROOT t = (f32[4, 8], f32[4, 8], f32[8, 4], f32[8, 4])
tuple(rs0, rs1, rs2, rs3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, RunPass(hlo_string, true, kMaxByteCount,
kMaxCombineCount, false));
EXPECT_EQ(ReduceScatterCount(module.get()), 1);
}
TEST_F(ReduceScatterCombinerTest, DifferentDimensionsAndRanks) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8, 8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[8, 4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={1},
to_apply=sum
rs1 = f32[8, 4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={1},
to_apply=sum
rs2 = f32[4] reduce-scatter(p1), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
ROOT t = (f32[8, 4], f32[8, 4], f32[4])
tuple(rs0, rs1, rs2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, RunPass(hlo_string, true, kMaxByteCount,
kMaxCombineCount, false));
EXPECT_EQ(ReduceScatterCount(module.get()), 1);
}
TEST_F(ReduceScatterCombinerTest, DependentReduceScatter) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8, 8] parameter(0)
rs0 = f32[4, 8] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs1 = f32[2, 8] reduce-scatter(rs0), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
ROOT t = (f32[4, 8], f32[2, 8]) tuple(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterCombinerTest, DoNotCombineMismatched) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs1 = f32[4] reduce-scatter(p1), replica_groups={{1,0}}, dimensions={0},
to_apply=sum
ROOT t = (f32[4], f32[4]) tuple(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterCombinerTest, DoNotCombineWithoutReductionKind) {
absl::string_view hlo_string = R"(
HloModule TestModule
region_0 {
Arg_1 = bf16[] parameter(1)
Arg_0 = bf16[] parameter(0)
convert_1 = f32[] convert(Arg_1)
convert_0 = f32[] convert(Arg_0)
add0 = f32[] add(convert_1, convert_0)
ROOT convert_2 = bf16[] convert(add0)
}
region_1 {
Arg_1 = bf16[] parameter(1)
Arg_0 = bf16[] parameter(0)
convert_1 = f32[] convert(Arg_1)
convert_0 = f32[] convert(Arg_0)
add0 = f32[] add(convert_1, convert_0)
ROOT convert_2 = bf16[] convert(add0)
}
ENTRY entry{
param0 = bf16[512,256]{1,0} parameter(0)
param1 = bf16[512,256]{1,0} parameter(1)
reduce-scatter.0 = bf16[512,256]{1,0} reduce-scatter(param0),
replica_groups={{0}}, dimensions={0}, to_apply=region_0
reduce-scatter.1 = bf16[512,256]{1,0} reduce-scatter(param1),
replica_groups={{0}}, dimensions={0}, to_apply=region_1
ROOT add.0 = tuple(reduce-scatter.0, reduce-scatter.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterCombinerTest, HighThreshold) {
absl::string_view hlo_string = R"(
HloModule m
sum_reduce {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
ENTRY main {
param.0 = bf16[1024,32768]{1,0} parameter(0)
param.1 = bf16[4096,8192]{1,0} parameter(1)
param.2 = bf16[3,128,64,1024]{2,1,0,3}parameter(2)
param.3 = bf16[1024,128,64]{2,1,0} parameter(3)
reduce-scatter.19 = bf16[1024,32768]{1,0} reduce-scatter(param.0),
channel_id=132, replica_groups={{0}}, dimensions={0}, to_apply=sum_reduce
reduce-scatter.21 = bf16[4096,8192]{1,0} reduce-scatter(param.1),
channel_id=134, replica_groups={{0}}, dimensions={0}, to_apply=sum_reduce
reduce-scatter.23 = bf16[3,128,64,1024]{2,1,0,3} reduce-scatter(param.2),
channel_id=136, replica_groups={{0}}, dimensions={3}, to_apply=sum_reduce
reduce-scatter.25 = bf16[1024,128,64]{2,1,0} reduce-scatter(param.3),
channel_id=138, replica_groups={{0}}, dimensions={0}, to_apply=sum_reduce
ROOT tuple = tuple(reduce-scatter.19, reduce-scatter.21, reduce-scatter.23,
reduce-scatter.25)
})";
int64_t combined_bytes = 67108864 + 67108864 + 50331648 + 16777216;
TF_ASSERT_OK_AND_ASSIGN(
auto module,
RunPass(hlo_string, true,
combined_bytes,
kMaxCombineCount, false));
EXPECT_EQ(ReduceScatterCount(module.get()), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_combiner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_combiner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7a51674b-4549-47f9-b5af-fc0064e0296e | cpp | tensorflow/tensorflow | lower_functional_ops | tensorflow/core/common_runtime/lower_functional_ops.cc | tensorflow/core/common_runtime/lower_functional_ops_test.cc | #include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include <string>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/common_runtime/device_propagation.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/common_runtime/lower_case_op.h"
#include "tensorflow/core/common_runtime/lower_function_call_op.h"
#include "tensorflow/core/common_runtime/lower_if_op.h"
#include "tensorflow/core/common_runtime/lower_while_op.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
constexpr const char* const kLowerUsingSwitchMergeAttr =
LowerFunctionalOpsConstants::kLowerUsingSwitchMergeAttr;
constexpr const char* const kLowerAsMultiDeviceFunctionAttr =
LowerFunctionalOpsConstants::kLowerAsMultiDeviceFunctionAttr;
constexpr const char* const kTpuReplicateAttr = "_tpu_replicate";
constexpr const char* const kXlaClusterAttr = "_xla_compile_id";
constexpr const char* const kXlaMustCompileAttr = "_XlaMustCompile";
bool CheckBoolAttr(const Node* n, absl::string_view attr_name) {
bool match;
bool found = TryGetNodeAttr(n->attrs(), attr_name, &match);
return found && match;
}
bool CheckStringAttr(const Node* n, absl::string_view attr_name) {
string match;
bool found = TryGetNodeAttr(n->attrs(), attr_name, &match);
return found && !match.empty();
}
bool LowerUsingSwitchMergeIsOn(const Node* n) {
return CheckBoolAttr(n, kLowerUsingSwitchMergeAttr);
}
bool LowerAsMultiDeviceFunctionIsOn(const Node* n) {
return CheckBoolAttr(n, kLowerAsMultiDeviceFunctionAttr);
}
bool MarkedForTpuCompilation(const Node* n) {
return CheckStringAttr(n, kTpuReplicateAttr);
}
bool MarkedForXlaCompilation(const Node* n) {
return CheckStringAttr(n, kXlaClusterAttr) ||
CheckBoolAttr(n, kXlaMustCompileAttr);
}
bool HasArgsOrRetvals(const Graph& g) {
for (const Node* n : g.op_nodes()) {
if (n->IsArg() || n->IsRetval()) return true;
}
return false;
}
const absl::flat_hash_set<std::string>& DevicePropagationOpList() {
static const auto op_list = new absl::flat_hash_set<std::string>(
{"Identity", "IdentityN", "Enter", "Exit", "Switch", "Merge",
"NextIteration"});
return *op_list;
}
bool IsPropagatableDevice(StringPiece device_string) {
DeviceNameUtils::ParsedName device;
return DeviceNameUtils::ParseFullName(device_string, &device) &&
device.type == DEVICE_TPU;
}
}
Status LowerFunctionalOpsPass::Run(
const GraphOptimizationPassOptions& options) {
if (options.partition_graphs != nullptr) {
return errors::Internal(
"Lowering If/While ops should happen before partitioning.");
}
if (options.graph == nullptr) {
return absl::OkStatus();
}
Graph* g = options.graph->get();
if (g == nullptr) {
return errors::Internal(
"Lowering While op requires a graph to be available.");
}
FunctionLibraryDefinition* flib_def = options.flib_def;
if (flib_def == nullptr) {
return errors::Internal(
"Lowering If op requires a FunctionLibraryDefinition to be available.");
}
const bool lower_function_calls =
options.session_options && options.session_options->config.graph_options()
.optimizer_options()
.do_function_inlining();
bool keep_lowered_nodes_fetchable = !HasArgsOrRetvals(*g);
const bool functional_control_flow =
options.session_options &&
(options.session_options->config.experimental().executor_type() ==
"SINGLE_THREADED_EXECUTOR" ||
options.session_options->config.experimental().use_tfrt() ||
options.session_options->config.experimental()
.disable_functional_ops_lowering());
const auto used_by_xla = [](Node* node) -> bool {
return MarkedForTpuCompilation(node) || MarkedForXlaCompilation(node);
};
const auto lower_control_flow = [&](Node* node) -> bool {
return LowerUsingSwitchMergeIsOn(node) && !used_by_xla(node);
};
int num_node_ids_before_lowering = g->num_node_ids();
for (int i = 2; i < g->num_node_ids(); ++i) {
Node* n = g->FindNodeId(i);
if (n == nullptr) continue;
if (IsFunctionCall(*flib_def, *n) && !used_by_xla(n) &&
(lower_function_calls || LowerAsMultiDeviceFunctionIsOn(n))) {
TF_RETURN_IF_ERROR(RewriteFunctionCallNode(n, g, *flib_def,
keep_lowered_nodes_fetchable));
continue;
}
if (functional_control_flow) continue;
if (n->IsIfNode() && lower_control_flow(n)) {
TF_RETURN_IF_ERROR(RewriteIfNode(n, g, keep_lowered_nodes_fetchable));
} else if (n->IsCaseNode() && lower_control_flow(n)) {
TF_RETURN_IF_ERROR(RewriteCaseNode(n, g, keep_lowered_nodes_fetchable));
} else if (n->IsWhileNode() && lower_control_flow(n)) {
TF_RETURN_IF_ERROR(
RewriteWhileNode(n, g, flib_def, keep_lowered_nodes_fetchable));
} else {
DCHECK(!lower_control_flow(n))
<< "Node " << FormatNodeForError(*n) << " of type "
<< n->type_string() << " has '"
<< LowerFunctionalOpsConstants::kLowerUsingSwitchMergeAttr
<< "' attr set but it does not support lowering.\n";
}
}
PropagateDevices(
[num_node_ids_before_lowering](const Node& n) {
return DevicePropagationOpList().contains(n.type_string()) &&
n.id() >= num_node_ids_before_lowering;
},
IsPropagatableDevice, g);
return absl::OkStatus();
}
REGISTER_OPTIMIZATION(OptimizationPassRegistry::PRE_PLACEMENT, 10,
LowerFunctionalOpsPass);
} | #include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
typedef FunctionDefHelper FDH;
constexpr const char* const kLowerUsingSwitchMergeAttr =
LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr;
static void AssertHasSubstr(StringPiece s, StringPiece expected) {
ASSERT_TRUE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
SessionOptions SessionOptionsWithInlining() {
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_do_function_inlining(true);
return session_options;
}
Status Rewrite(std::unique_ptr<Graph>* graph) {
FunctionLibraryDefinition flib_def((*graph)->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options = SessionOptionsWithInlining();
opt_options.session_options = &session_options;
opt_options.graph = graph;
opt_options.flib_def = &flib_def;
LowerFunctionalOpsPass pass;
return pass.Run(opt_options);
}
FunctionDef WhileWithIfCond(int32_t N) {
const Tensor kN = test::AsScalar<int32>(N);
return FDH::Define(
"WhileWithIfCond",
{"counter: int32", "pred: bool", "x: int32"},
{"z: bool"},
{},
{
{{"N"}, "Const", {}, {{"value", kN}, {"dtype", DT_INT32}}},
{{"z"}, "Less", {"counter", "N"}, {{"T", DT_INT32}}},
});
}
FunctionDef WhileWithIfBody() {
NameAttrList then_func;
then_func.set_name("XTimesTwo");
NameAttrList else_func;
else_func.set_name("XTimesFour");
const Tensor kOne = test::AsScalar<int32>(1);
std::vector<DataType> input_types = {DT_INT32};
std::vector<DataType> output_types = {DT_INT32};
return FDH::Define(
"WhileWithIfBody",
{"counter: int32", "pred: bool", "x: int32"},
{"updated_counter: int32", "pred: bool", "if: int32"},
{},
{
{{"if"},
"If",
{"pred", "x"},
{{"then_branch", then_func},
{"else_branch", else_func},
{"Tcond", DT_BOOL},
{"Tin", input_types},
{"Tout", output_types},
{kLowerUsingSwitchMergeAttr, true}}},
{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_INT32}}},
{{"updated_counter"}, "Add", {"counter", "one"}, {{"T", DT_INT32}}},
});
}
TEST(LowerIfWhileTest, CondInWhile) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = test::function::XTimesTwo();
*f_lib_proto.add_function() = test::function::XTimesFour();
*f_lib_proto.add_function() = WhileWithIfCond(3);
*f_lib_proto.add_function() = WhileWithIfBody();
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto counter = ops::Placeholder(root.WithOpName("counter"), DT_INT32);
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
std::vector<NodeBuilder::NodeOut> inputs(
{NodeBuilder::NodeOut(counter.node()), NodeBuilder::NodeOut(pred.node()),
NodeBuilder::NodeOut(a.node())});
Node* while_node;
AttrValue cond_func;
cond_func.mutable_func()->set_name("WhileWithIfCond");
AttrValue body_func;
body_func.mutable_func()->set_name("WhileWithIfBody");
TF_ASSERT_OK(NodeBuilder("while", "While", &root.graph()->flib_def())
.Input(inputs)
.Attr("T", {DT_INT32, DT_BOOL, DT_INT32})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr(kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &while_node));
TF_ASSERT_OK(root.DoShapeInference(while_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto* op : graph->op_nodes()) {
ASSERT_NE(op->type_string(), "While");
ASSERT_NE(op->type_string(), "If");
}
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(counter.node()), Input::Initializer(0));
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(while_node, 2)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 8);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(counter.node()), Input::Initializer(0));
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(while_node, 2)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 64);
}
}
FunctionDef IfWithWhileThen() {
NameAttrList cond_func;
cond_func.set_name("LessThanOrEqualToN");
NameAttrList body_func;
body_func.set_name("XTimesTwo");
std::vector<DataType> input_and_output_types = {DT_INT32};
std::vector<TensorShape> output_shapes = {TensorShape()};
return FDH::Define(
"IfWithWhileThen",
{"x: int32"},
{"while: int32"},
{},
{
{{"while"},
"While",
{"x"},
{{"cond", cond_func},
{"body", body_func},
{"T", input_and_output_types},
{"output_shapes", output_shapes},
{kLowerUsingSwitchMergeAttr, true}}},
});
}
TEST(LowerIfWhileTest, WhileInCond) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = test::function::XTimesTwo();
*f_lib_proto.add_function() = test::function::LessThanOrEqualToN(8);
*f_lib_proto.add_function() = IfWithWhileThen();
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
AttrValue then_func;
then_func.mutable_func()->set_name("IfWithWhileThen");
AttrValue else_func;
else_func.mutable_func()->set_name("XTimesTwo");
Node* if_node;
TF_ASSERT_OK(NodeBuilder("if", "If", &root.graph()->flib_def())
.Input(pred.node())
.Input(inputs)
.Attr("then_branch", then_func)
.Attr("else_branch", else_func)
.Attr("Tout", {DT_INT32})
.Attr(kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &if_node));
TF_ASSERT_OK(root.DoShapeInference(if_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
int node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
ASSERT_FALSE(op->IsEnter());
ASSERT_FALSE(op->IsExit());
ASSERT_FALSE(op->IsSwitch());
ASSERT_FALSE(op->IsMerge());
ASSERT_FALSE(op->IsNextIteration());
ASSERT_FALSE(op->IsLoopCond());
if (op->name() == "if") {
node_called_if_count++;
}
}
ASSERT_EQ(node_called_if_count, 1);
TF_ASSERT_OK(Rewrite(&graph));
node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->name() == "if") {
node_called_if_count++;
}
ASSERT_NE(op->type_string(), "While");
ASSERT_NE(op->type_string(), "If");
}
ASSERT_EQ(node_called_if_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(if_node)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 16);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(if_node)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/lower_functional_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/lower_functional_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |