code
stringlengths 281
23.7M
|
---|
def iter_selections(manifest, selections, *, unique=True):
byname = {b.name: b for b in manifest.benchmarks}
seen = set()
included = []
excluded = set()
for (op, _, kind, parsed) in selections:
matches = _match_selection(manifest, kind, parsed, byname)
if (op == '+'):
for bench in matches:
if ((bench not in seen) or (not unique)):
included.append(bench)
seen.add(bench)
elif (op == '-'):
for bench in matches:
excluded.add(bench)
else:
raise NotImplementedError(op)
if (not included):
included = list(_match_selection(manifest, 'tag', 'default', byname))
for bench in included:
if (bench not in excluded):
(yield bench) |
_funcify.register(DimShuffle)
def jax_funcify_DimShuffle(op, **kwargs):
def dimshuffle(x):
res = jnp.transpose(x, op.transposition)
shape = list(res.shape[:len(op.shuffle)])
for augm in op.augment:
shape.insert(augm, 1)
res = jnp.reshape(res, shape)
if (not op.inplace):
res = jnp.copy(res)
return res
return dimshuffle |
class IfNodeTest(_NodeTest):
CODE = '\n if 0:\n print()\n\n if True:\n print()\n else:\n pass\n\n if "":\n print()\n elif []:\n raise\n\n if 1:\n print()\n elif True:\n print()\n elif func():\n pass\n else:\n raise\n '
def test_if_elif_else_node(self) -> None:
self.assertEqual(len(self.astroid.body), 4)
for stmt in self.astroid.body:
self.assertIsInstance(stmt, nodes.If)
self.assertFalse(self.astroid.body[0].orelse)
self.assertIsInstance(self.astroid.body[1].orelse[0], nodes.Pass)
self.assertIsInstance(self.astroid.body[2].orelse[0], nodes.If)
self.assertIsInstance(self.astroid.body[3].orelse[0].orelse[0], nodes.If)
def test_block_range(self) -> None:
self.assertEqual(self.astroid.block_range(1), (0, 22))
self.assertEqual(self.astroid.block_range(10), (0, 22))
self.assertEqual(self.astroid.body[1].block_range(5), (5, 6))
self.assertEqual(self.astroid.body[1].block_range(6), (6, 6))
self.assertEqual(self.astroid.body[1].orelse[0].block_range(7), (7, 8))
self.assertEqual(self.astroid.body[1].orelse[0].block_range(8), (8, 8)) |
def search_for_duplicates(inp_path, verbose=False):
headers = swmmio.utils.text.get_inp_sections_details(inp_path)['headers']
dups_found = False
for (header, cols) in headers.items():
if (cols != 'blob'):
df = dataframe_from_inp(inp_path, section=header)
elements = df.index
n_unique = len(elements.unique())
n_total = len(elements)
if verbose:
print('{} -> (uniques, total) -> ({}, {})'.format(header, n_unique, n_total))
if (n_unique != n_total):
dups = ', '.join(df[df.index.duplicated()].index.unique().tolist())
print('duplicate found in {}\nsection: {}\n{}'.format(inp_path, header, dups))
dups_found = True
return dups_found |
def get_semisup_dataloaders(train_dataset, test_dataset, val_dataset=None, batch_size=256, batch_size_test=256, num_workers=4, unsup_fraction=0.5):
dataset_size = train_dataset.dataset_size
train_batch_sampler = SemiSupervisedSampler(train_dataset.sup_indices, train_dataset.unsup_indices, batch_size, unsup_fraction, num_batches=int(np.ceil((dataset_size / batch_size))))
epoch_size = (len(train_batch_sampler) * batch_size)
kwargs = {'num_workers': num_workers, 'pin_memory': False}
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_sampler=train_batch_sampler, **kwargs)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size_test, shuffle=False, **kwargs)
if val_dataset:
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size_test, shuffle=False, **kwargs)
return (train_dataloader, test_dataloader, val_dataloader)
return (train_dataloader, test_dataloader) |
def configure(dir=None, format_strs=None, comm=None, log_suffix=''):
if (dir is None):
dir = os.getenv('OPENAI_LOGDIR')
if (dir is None):
dir = osp.join(tempfile.gettempdir(), datetime.datetime.now().strftime('openai-%Y-%m-%d-%H-%M-%S-%f'))
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if (rank > 0):
log_suffix = (log_suffix + ('-rank%03i' % rank))
if (format_strs is None):
if (rank == 0):
format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',')
else:
format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',')
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log(('Logging to %s' % dir)) |
class Migration(migrations.Migration):
dependencies = [('domain', '0047_attribute_locked')]
operations = [migrations.AlterField(model_name='attribute', name='locked', field=models.BooleanField(default=False, help_text='Designates whether this attribute (and its descendants) can be changed.', verbose_name='Locked'))] |
()
def valid_tmp_game_root(tmp_path):
game_root = tmp_path.joinpath('game_root')
game_root.joinpath('files').mkdir(parents=True)
game_root.joinpath('sys').mkdir()
for f in ['default.dol', 'FrontEnd.pak', 'Metroid1.pak', 'Metroid2.pak']:
game_root.joinpath('files', f).write_bytes(b'')
game_root.joinpath('sys', 'main.dol').write_bytes(b'')
return game_root |
class ImgWrapper(gym.ObservationWrapper):
def __init__(self, env=None):
super(ImgWrapper, self).__init__(env)
obs_shape = self.observation_space.shape
self.observation_space = spaces.Box(self.observation_space.low[(0, 0, 0)], self.observation_space.high[(0, 0, 0)], [obs_shape[2], obs_shape[0], obs_shape[1]], dtype=self.observation_space.dtype)
def observation(self, observation):
return observation.transpose(2, 0, 1) |
class KlapEncryptionSession():
def __init__(self, local_seed, remote_seed, user_hash):
self.local_seed = local_seed
self.remote_seed = remote_seed
self.user_hash = user_hash
self._key = self._key_derive(local_seed, remote_seed, user_hash)
(self._iv, self._seq) = self._iv_derive(local_seed, remote_seed, user_hash)
self._sig = self._sig_derive(local_seed, remote_seed, user_hash)
def _key_derive(self, local_seed, remote_seed, user_hash):
payload = (((b'lsk' + local_seed) + remote_seed) + user_hash)
return hashlib.sha256(payload).digest()[:16]
def _iv_derive(self, local_seed, remote_seed, user_hash):
payload = (((b'iv' + local_seed) + remote_seed) + user_hash)
fulliv = hashlib.sha256(payload).digest()
seq = int.from_bytes(fulliv[(- 4):], 'big', signed=True)
return (fulliv[:12], seq)
def _sig_derive(self, local_seed, remote_seed, user_hash):
payload = (((b'ldk' + local_seed) + remote_seed) + user_hash)
return hashlib.sha256(payload).digest()[:28]
def _iv_seq(self):
seq = self._seq.to_bytes(4, 'big', signed=True)
iv = (self._iv + seq)
return iv
def encrypt(self, msg):
self._seq = (self._seq + 1)
if isinstance(msg, str):
msg = msg.encode('utf-8')
cipher = Cipher(algorithms.AES(self._key), modes.CBC(self._iv_seq()))
encryptor = cipher.encryptor()
padder = padding.PKCS7(128).padder()
padded_data = (padder.update(msg) + padder.finalize())
ciphertext = (encryptor.update(padded_data) + encryptor.finalize())
digest = hashes.Hash(hashes.SHA256())
digest.update(((self._sig + self._seq.to_bytes(4, 'big', signed=True)) + ciphertext))
signature = digest.finalize()
return ((signature + ciphertext), self._seq)
def decrypt(self, msg):
cipher = Cipher(algorithms.AES(self._key), modes.CBC(self._iv_seq()))
decryptor = cipher.decryptor()
dp = (decryptor.update(msg[32:]) + decryptor.finalize())
unpadder = padding.PKCS7(128).unpadder()
plaintextbytes = (unpadder.update(dp) + unpadder.finalize())
return plaintextbytes.decode() |
def stat_proxy(path: str) -> os.stat_result:
try:
st = orig_stat(path)
except OSError as err:
print(f'stat({path!r}) -> {err}')
raise
else:
print(('stat(%r) -> (st_mode=%o, st_mtime=%d, st_size=%d)' % (path, st.st_mode, st.st_mtime, st.st_size)))
return st |
def compute_dense_reward(self, action):
reward = 0.0
cube_at_goal = (np.linalg.norm((self.obj.pose.p - self.goal_pos)) <= 0.02)
is_robot_static = (np.max(np.abs(self.agent.robot.get_qvel()[:(- 2)])) <= 0.2)
if (cube_at_goal and is_robot_static):
reward += 2.25
return reward
gripper_pos = self.tcp.pose.p
obj_pos = self.obj.pose.p
dist_to_obj = np.linalg.norm((gripper_pos - obj_pos))
reaching_reward = (1 - np.tanh((5 * dist_to_obj)))
reward += reaching_reward
is_grasped = self.agent.check_grasp(self.obj, max_angle=30)
if is_grasped:
reward += 0.25
if is_grasped:
dist_to_goal = np.linalg.norm((self.obj.pose.p - self.goal_pos))
placement_reward = (1 - np.tanh((5 * dist_to_goal)))
reward += placement_reward
action_reg = ((- np.sum(np.square(action))) / len(action))
reward += (0.1 * action_reg)
return reward |
def test_histogrambin():
hb = OSC.parameters._HistogramBin(1, OSC.Range(0, 1))
hb2 = OSC.parameters._HistogramBin(1, OSC.Range(0, 1))
hb3 = OSC.parameters._HistogramBin(1, OSC.Range(0, 2))
assert (hb == hb2)
assert (hb != hb3)
prettyprint(hb)
hb4 = OSC.parameters._HistogramBin.parse(hb.get_element())
assert (hb4 == hb)
assert (version_validation('HistogramBin', hb, 1) == ValidationResponse.OK)
assert (version_validation('HistogramBin', hb, 2) == ValidationResponse.OK) |
def test_regress_with_steps(ansi_bar: ProgressBar, ansi_io: BufferedIO) -> None:
ansi_bar.start()
ansi_bar.advance(4)
ansi_bar.advance(4)
ansi_bar.advance((- 2))
output = [' 0 [>]', ' 4 [---->]', ' 8 [>]', ' 6 [------>]']
expected = generate_output(output)
assert (expected == ansi_io.fetch_error()) |
def test_majorana_operator_with_basis_rotated_by():
H = (numpy.array([[1, 1], [1, (- 1)]]) / numpy.sqrt(2))
a = MajoranaOperator((0, 1), 2.0)
op = a.with_basis_rotated_by(H)
assert (op == MajoranaOperator.from_dict({(0, 1): (- 2.0)}))
b = MajoranaOperator((0,), 2.0)
op = b.with_basis_rotated_by(H)
assert (op == MajoranaOperator.from_dict({(0,): numpy.sqrt(2), (1,): numpy.sqrt(2)}))
c = MajoranaOperator((1,), 2.0)
op = c.with_basis_rotated_by(H)
assert (op == MajoranaOperator.from_dict({(0,): numpy.sqrt(2), (1,): (- numpy.sqrt(2))}))
P = numpy.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]])
d = (MajoranaOperator((0, 1, 2)) + MajoranaOperator((1, 2)))
op = d.with_basis_rotated_by(P)
assert (op == MajoranaOperator.from_dict({(0, 1, 2): 1.0, (0, 1): 1.0}))
with pytest.raises(ValueError):
_ = a.with_basis_rotated_by((2 * H)) |
class PreImport(WorkerPlugin):
def __init__(self, libraries):
if (libraries is None):
libraries = []
elif isinstance(libraries, str):
libraries = libraries.split(',')
self.libraries = libraries
def setup(self, worker=None):
for l in self.libraries:
importlib.import_module(l) |
_config
def test_resize(manager):
manager.c.screen[0].resize(x=10, y=10, w=100, h=100)
(ignore_exceptions=AssertionError, fail_msg="Screen didn't resize")
def run():
d = manager.c.screen[0].info()
assert (d['width'] == 100)
assert (d['height'] == 100)
return d
d = run()
assert (d['x'] == d['y'] == 10) |
def get_config():
config = get_default_configs()
training = config.training
training.sde = 'vpsde'
training.continuous = True
training.reduce_mean = True
sampling = config.sampling
sampling.method = 'pc'
sampling.predictor = 'euler_maruyama'
sampling.corrector = 'none'
data = config.data
data.centered = True
model = config.model
model.name = 'ncsnpp'
model.scale_by_sigma = False
model.ema_rate = 0.9999
model.normalization = 'GroupNorm'
model.nonlinearity = 'swish'
model.nf = 128
model.ch_mult = (1, 2, 2, 2)
model.num_res_blocks = 4
model.attn_resolutions = (16,)
model.resamp_with_conv = True
model.conditional = True
model.fir = False
model.fir_kernel = [1, 3, 3, 1]
model.skip_rescale = True
model.resblock_type = 'biggan'
model.progressive = 'none'
model.progressive_input = 'none'
model.progressive_combine = 'sum'
model.attention_type = 'ddpm'
model.init_scale = 0.0
model.embedding_type = 'positional'
model.fourier_scale = 16
model.conv_size = 3
return config |
class PlayMultiBlockChange(Packet):
id = 59
to = 1
def __init__(self, chunk_sect_x: int, chunk_sect_y: int, chunk_sect_z: int, trust_edges: bool, blocks: list) -> None:
super().__init__()
self.chunk_sect_x = chunk_sect_x
self.chunk_sect_y = chunk_sect_y
self.chunk_sect_z = chunk_sect_z
self.trust_edges = trust_edges
self.blocks = blocks
def encode(self) -> bytes:
out = ((Buffer.pack_varint(((((self.chunk_sect_x & 4194303) << 42) | (self.chunk_sect_y & 1048575)) | ((self.chunk_sect_z & 4194303) << 20))) + Buffer.pack('?', self.trust_edges)) + Buffer.pack_varint(len(self.blocks)))
for (block_id, local_x, local_y, local_z) in self.blocks:
out += Buffer.pack_varint(((block_id << 12) | (((local_x << 8) | (local_z << 4)) | local_y)))
return out |
class Word(entity):
def __init__(self, token, syllables=None, sylls_text=[], broken=False, lang=None):
if (syllables == None):
import prosodic
if (lang == None):
lang = prosodic.lang
w = prosodic.dict[lang].get(token)[0]
if (not len(w.__dict__)):
self.broken = True
else:
for (k, v) in list(w.__dict__.items()):
setattr(self, k, v)
return
self.token = token.lower()
self.punct = ''
self.sylls_text = sylls_text
self.finished = False
self.children = syllables
self.numSyll = len(syllables)
self.broken = broken
self.featpaths = {}
self.stress = '?'
self.lang = '?'
self.feats = {}
if ('/' in self.token):
tt = self.token.split('/')
self.token = tt[0]
self.pos = tt[1]
(self.token, self.punct) = gleanPunc(self.token)
self.setSyllText()
self.feat('numSyll', self.numSyll)
if ((not len(syllables)) or (self.token == '')):
self.broken = True
self.token = ('?' + self.token)
else:
len_sylls = len(self.children)
len_sylls_text = len(self.sylls_text)
if (len_sylls != len_sylls_text):
self.om(((('<error> numSyll mismatch: [ipa] ' + self.u2s('.'.join([child.str_ipa() for child in self.children]))) + ' vs [orth] ') + str('.'.join([self.u2s(x) for x in self.sylls_text]))))
length = min([len_sylls, len_sylls_text])
else:
length = len_sylls
for i in range(length):
self.children[i].settok(self.sylls_text[i])
def __repr__(self):
return (((((('<' + self.classname()) + '.') + self.u2s(self.token)) + '> [') + self.__str__stressedSylls()) + ']')
def __str__(self):
tok = (self.token if (type(self.token) == str) else self.token.encode('utf-8'))
return (((tok + str('\t<')) + str(self.__str__stressedSylls())) + str('>'))
def __str__weight(self):
if (not hasattr(self, 'weight')):
self.weight = ''.join([entity.weight_bool2str[syll.children[0].feature('prom.weight')] for syll in self.children])
return self.weight
def __str__stressedSylls(self):
lang = self.lang
import prosodic
if (not (('output_' + lang) in prosodic.config)):
lang = '**'
if (prosodic.config.get(('output_' + lang), '') == 'cmu'):
return ' . '.join([str(syll) for syll in self.children])
else:
return '.'.join([str(syll) for syll in self.children])
def output_minform(self):
return ((((((str(makeminlength(self.u2s(self.token), 20)) + '\t') + str(makeminlength(('P:' + self.__str__stressedSylls()), 35))) + '\tS:') + str(self.stress)) + '\tW:') + str(self.__str__weight()))
def CorV(self):
o = ''
for phoneme in self.phonemes():
o += phoneme.CorV()
return o
def setSyllText(self):
if (not self.children):
return
if ((not self.sylls_text) or (len(self.sylls_text) != len(self.children))):
self.setSyllText_byphonemes()
def addSuffix(self, phon):
self.children[(len(self.children) - 1)].addSuffix(phon)
def lastsyll(self):
return self.children[(len(self.children) - 1)]
def setSyllText_byphonemes(self):
return self.setSyllText_byletters(self.CorV())
corv = self.CorV()
corvi = 0
self.sylls_text = []
for syll in self.children:
syllshape = syll.feature('shape')
if (not syllshape):
continue
sylltail = syllshape[(- 1)]
vowi = corv.find('V', corvi)
if ((len(corv) - 1) == vowi):
self.sylls_text.append(corv[corvi:vowi])
elif (sylltail == 'V'):
if (corv[(vowi + 1)] == 'V'):
self.sylls_text.append(corv[corvi:(vowi + 1)])
corvi = ((vowi + 1) + 1)
else:
self.sylls_text.append(corv[corvi:vowi])
corvi = (vowi + 1)
elif (sylltail == 'C'):
self.sylls_text.append(corv[corvi:(vowi + 1)])
corvi = ((vowi + 1) + 1)
def setSyllText_byletters(self, lengthby=None):
i = 0
textSyll = []
self.sylls_text = []
numSyll = len(self.children)
numLetters = len(self.token)
if (not numLetters):
for x in self.stress:
self.sylls_text.append('?')
return None
while (i < numSyll):
textSyll.append('')
i += 1
word = self.token
if (not lengthby):
inc = (numLetters / numSyll)
else:
inc = (len(lengthby) / numSyll)
if (not inc):
inc = 1
curSyll = 0
unit = ''
curLetter = 1
for letter in word:
textSyll[curSyll] += letter
if ((curLetter % inc) == 0):
if ((curSyll + 1) < numSyll):
curSyll += 1
curLetter += 1
self.sylls_text = textSyll
def addPunct(self, punct):
self.punct = punct
def weight(self):
return ''.join([entity.weight_bool2str[syll.children[0].feature('prom.weight')] for syll in self.children])
def getToken(self, punct=True):
if (punct and (self.punct != None)):
return (self.u2s(self.token) + self.u2s(self.punct))
else:
return self.u2s(self.token)
def getPOS(self):
return self.pos
def getStress(self):
return self.stress
def getWeight(self):
return self.weight
def getFeet(self):
return self.feet
def getPunct(self):
return self.punct
def isIgnored(self):
return (('?' in self.stress) or ('?' in self.weight))
def isLexMono(self):
return ((self.numSyll == 1) and (self.stress == 'P'))
def getTokenSyll(self):
return '.'.join([str(syll) for syll in self.children])
def getNumSyll(self):
return len(self.children)
def isMonoSyllab(self):
return (self.getNumSyll() == 1)
def isPolySyllab(self):
return (self.getNumSyll() > 1)
def get_unstressed_variant(self):
new_ipa = self.ipa.replace("'", '').replace('`', '')
return self.get_word_variant(new_ipa)
def get_stressed_variant(self, stress_pattern=None):
if (not stress_pattern):
if (len(self.children) == 1):
stress_pattern = ['P']
else:
print('!! cannot force stressed variant to polysyllabic word', self, 'without a stress pattern set')
return
if (len(stress_pattern) != len(self.children)):
print('!! stress_pattern', stress_pattern, 'does not match # sylls of this word:', len(self.children), self.children)
return
if (len(stress_pattern) != len(self.ipa.split('.'))):
print('!! stress_pattern', stress_pattern, 'does not match # sylls of this word:', len(self.children), self.children)
return
new_stress_pattern = []
for x in stress_pattern:
if (x in ['P', 'S', 'U']):
new_stress_pattern += [x]
elif (x in ['1', 1, 1.0]):
new_stress_pattern += ['P']
elif (x in ['2', 2, 2.0]):
new_stress_pattern += ['S']
elif (x in ['0', 0, 0.0]):
new_stress_pattern += ['U']
stress_pattern = new_stress_pattern
newipa = []
for (i, x) in enumerate(self.ipa.replace("'", '').replace('`', '').split('.')):
stress = stress_pattern[i]
if (stress == 'P'):
newipa += [("'" + x)]
elif (stress == 'S'):
newipa += [('`' + x)]
else:
newipa += [x]
newipa = '.'.join(newipa)
return self.get_word_variant(newipa)
def get_word_variant(self, stressedipa):
from Dictionary import stressedipa2stress, getStrengthStress
from Syllable import Syllable
stress = stressedipa2stress(stressedipa)
(prom_stress, prom_strength) = getStrengthStress(stress)
syllphons = [tuple(child.phonemes()) for child in self.children]
syllbodies = self.syllableBodies()
sylls = []
for i in range(len(syllphons)):
syllbody = syllbodies[i]
syll = Syllable((syllbody, prom_strength[i], prom_stress[i]), lang=self.lang, token=self.sylls_text[i])
sylls.append(syll)
word = Word(self.token, sylls, self.sylls_text)
word.ipa = stressedipa
word.stress = stress
word.lang = self.lang
if (not word.ipa):
word.broken = True
return word |
def test_misc_object_reader(tmpdir):
tmpcatalog = os.path.join(tmpdir, 'my_catalog.xosc')
cf = xosc.CatalogFile()
cf.create_catalog(tmpcatalog, 'MiscObjectCatalog', 'My first miscobject catalog', 'Mandolin')
orig = xosc.MiscObject('pole', 50, xosc.MiscObjectCategory.pole, xosc.BoundingBox(1, 1, 1, 1, 1, 1))
cf.add_to_catalog(orig)
cf.dump()
read = xosc.CatalogReader(xosc.CatalogReference('my_catalog', 'pole'), tmpdir)
assert (read == orig) |
class RunModel():
def __init__(self, model, args, ID2wordVecIdx, ID2char, expName, m_name, m_train='train', m_dev='dev', m_test='test'):
self.model = model
self.tbWriter = None
self.args = args
self.ID2wordVecIdx = ID2wordVecIdx
self.ID2char = ID2char
self.expName = expName
self.m_name = m_name
self.m_train = m_train
self.m_dev = m_dev
self.m_test = m_test
self.batch_size = args.batch_size
self.lr = args.lr
self.lr_decay_counter = 0
self.stop_counter = 0
(self.m_x_data, self.m_x_char_data, self.m_answerData, self.m_lengthData) = input_datapickle(args.guidee_data, ID2wordVecIdx)
self.m_batchgroup = batch_sort(self.m_lengthData, self.batch_size)
def train1epoch(self, sess, batch_idx, infoInput=None, tbWriter=None):
for b_idx in batch_idx:
(x_minibatch, y_minibatch, xlen_minibatch, x_char_minibatch) = idx2data(self.m_batchgroup[self.m_train][b_idx], self.m_x_data[self.m_train], self.m_x_char_data[self.m_train], self.m_answerData[self.m_train], self.m_lengthData[self.m_train])
(x_minibatch, y_minibatch, x_char_minibatch, maxLen) = batch_padding(x_minibatch, y_minibatch, xlen_minibatch, x_char_minibatch)
(x_charPad, x_charLen) = char_padding(inputs=x_char_minibatch, voca_size=len(self.ID2char), embedding_dim=self.args.ce_dim, wordMaxLen=maxLen, charMaxLen=self.args.char_maxlen)
infos = np.zeros([len(x_minibatch), maxLen, (self.args.hidden_size * 2)])
if (infoInput is None):
infoOuts = list()
for i in range(5):
infoOuts.append(np.zeros([len(x_minibatch), maxLen, (self.args.hidden_size * 2)]))
else:
infoOuts = list()
for infotmp in infoInput[self.m_train]:
infoOuts.append(infotmp[b_idx])
tmpLen = len(infoOuts)
for i in range(tmpLen, 5):
infoOuts.append(np.zeros([len(x_minibatch), maxLen, (self.args.hidden_size * 2)]))
feed_dict1 = {self.model.X: x_minibatch, self.model.Y: y_minibatch, self.model.X_len: xlen_minibatch, self.model.X_char: x_charPad, self.model.X_char_len: x_charLen, self.model.maxLen: maxLen, self.model.lr: self.lr, self.model.infos: infos, self.model.infos1: infoOuts[0], self.model.infos2: infoOuts[1], self.model.infos3: infoOuts[2], self.model.infos4: infoOuts[3], self.model.infos5: infoOuts[4], self.model.emb_dropout: self.args.embdropout, self.model.lstm_dropout: self.args.lstmdropout}
if ((b_idx == 0) and self.args.tensorboard):
(summary, l, sl, tra, trsPara) = sess.run([self.model.summaryMerged, self.model.loss, self.model.sequence_loss, self.model.train, self.model.transition_params], feed_dict=feed_dict1)
self.tbWriter.add_summary(summary, self.global_step)
else:
(l, sl, tra, trsPara) = sess.run([self.model.loss, self.model.sequence_loss, self.model.train, self.model.transition_params], feed_dict=feed_dict1)
return (l, sl, tra, trsPara)
def dev1epoch(self, data, trsPara, sess, infoInput=None, epoch=None, report=False):
predictionResult = list()
viterbi_scoreList = list()
predictionWOCRFResult = list()
dev_x = list()
dev_ans = list()
dev_len = list()
for b_idx in range(len(self.m_batchgroup[data])):
(x_minibatch, y_minibatch, xlen_minibatch, x_char_minibatch) = idx2data(self.m_batchgroup[data][b_idx], self.m_x_data[data], self.m_x_char_data[data], self.m_answerData[data], self.m_lengthData[data])
(x_minibatch, y_minibatch, x_char_minibatch, maxLen) = batch_padding(x_minibatch, y_minibatch, xlen_minibatch, x_char_minibatch)
(x_charPad, x_charLen) = char_padding(inputs=x_char_minibatch, voca_size=len(self.ID2char), embedding_dim=self.args.ce_dim, wordMaxLen=maxLen, charMaxLen=self.args.char_maxlen)
dev_x.extend(x_minibatch)
dev_ans.extend(y_minibatch)
dev_len.extend(xlen_minibatch)
infos = np.zeros([len(x_minibatch), maxLen, (self.args.hidden_size * 2)])
if (infoInput is None):
infoOuts = list()
for i in range(5):
infoOuts.append(np.zeros([len(x_minibatch), maxLen, (self.args.hidden_size * 2)]))
else:
infoOuts = list()
for infotmp in infoInput[data]:
infoOuts.append(infotmp[b_idx])
tmpLen = len(infoOuts)
for i in range(tmpLen, 5):
infoOuts.append(np.zeros([len(x_minibatch), maxLen, (self.args.hidden_size * 2)]))
feed_dict2 = {self.model.X: x_minibatch, self.model.Y: y_minibatch, self.model.X_len: xlen_minibatch, self.model.X_char: x_charPad, self.model.X_char_len: x_charLen, self.model.maxLen: maxLen, self.model.lr: self.lr, self.model.infos: infos, self.model.infos1: infoOuts[0], self.model.infos2: infoOuts[1], self.model.infos3: infoOuts[2], self.model.infos4: infoOuts[3], self.model.infos5: infoOuts[4], self.model.emb_dropout: 0, self.model.lstm_dropout: 0}
logitsPridict = sess.run(self.model.logits, feed_dict=feed_dict2)
for sentence in logitsPridict:
(viterbi, viterbi_score) = tf.contrib.crf.viterbi_decode(sentence, trsPara)
predictionResult.append(viterbi)
viterbi_scoreList.append(viterbi_score)
predictionWOCRFResult.extend(sess.run(self.model.prediction, feed_dict=feed_dict2))
predictionResult = viterbi_pp(predictionResult, dev_len, self.args.num_class)
prfValResult = prf(predictionResult, dev_ans, dev_len)
if (data == self.m_dev):
infoschk = sess.run([self.model.infos1_w, self.model.infos2_w, self.model.infos3_w, self.model.infos4_w, self.model.infos5_w])
print(('Learning Rate : %.4f' % self.lr))
self.lr = (self.lr * (1 - self.args.lr_decay))
if ((int((epoch / 6)) == 30) and self.args.lr_pump):
self.lr = self.args.lr
if report:
print(('[%s] Precision : %.4f | Recall : %.4f | F1: %.4f' % (data, prfValResult[0], prfValResult[1], prfValResult[2])))
prfValWOCRFResult = None
return (predictionResult, prfValResult, prfValWOCRFResult, dev_x, dev_ans, dev_len)
def info1epoch(self, data, dataset, sess):
m_x_data = dataset.m_x_data
m_x_char_data = dataset.m_x_char_data
m_answerData = dataset.m_answerData
m_lengthData = dataset.m_lengthData
m_batchgroup = dataset.m_batchgroup
lstmOuts = list()
for b_idx in range(len(m_batchgroup[data])):
(x_minibatch, y_minibatch, xlen_minibatch, x_char_minibatch) = idx2data(m_batchgroup[data][b_idx], m_x_data[data], m_x_char_data[data], m_answerData[data], m_lengthData[data])
(x_minibatch, y_minibatch, x_char_minibatch, maxLen) = batch_padding(x_minibatch, y_minibatch, xlen_minibatch, x_char_minibatch)
(x_charPad, x_charLen) = char_padding(inputs=x_char_minibatch, voca_size=len(self.ID2char), embedding_dim=self.args.ce_dim, wordMaxLen=maxLen, charMaxLen=self.args.char_maxlen)
infoInputtmp = np.zeros([len(x_minibatch), maxLen, (self.args.hidden_size * 2)])
feed_dict2 = {self.model.X: x_minibatch, self.model.Y: y_minibatch, self.model.X_len: xlen_minibatch, self.model.X_char: x_charPad, self.model.X_char_len: x_charLen, self.model.maxLen: maxLen, self.model.lr: self.lr, self.model.infos: infoInputtmp, self.model.infos1: infoInputtmp, self.model.infos2: infoInputtmp, self.model.infos3: infoInputtmp, self.model.infos4: infoInputtmp, self.model.infos5: infoInputtmp, self.model.emb_dropout: 0, self.model.lstm_dropout: 0}
lstmOut = sess.run(self.model.outputs_concat, feed_dict=feed_dict2)
lstmOuts.append(lstmOut)
return lstmOuts |
.parametrize('fixture, result', [('script_callable_legacy_table', []), ('script_callable_legacy_string', []), ('script_reference_console', []), ('script_reference_file', [(Path('bin') / 'script.sh')])])
def test_builder_convert_script_files(fixture: str, result: list[Path]) -> None:
project_root = ((Path(__file__).parent / 'fixtures') / fixture)
script_files = Builder(Factory().create_poetry(project_root)).convert_script_files()
assert ([p.relative_to(project_root) for p in script_files] == result) |
class QlArchMIPS(QlArch):
type = QL_ARCH.MIPS
bits = 32
def __init__(self, ql: Qiling, endian: QL_ENDIAN):
super().__init__(ql)
self._init_endian = endian
_property
def uc(self) -> Uc:
endian = {QL_ENDIAN.EB: UC_MODE_BIG_ENDIAN, QL_ENDIAN.EL: UC_MODE_LITTLE_ENDIAN}[self.endian]
return Uc(UC_ARCH_MIPS, (UC_MODE_MIPS32 + endian))
_property
def regs(self) -> QlRegisterManager:
regs_map = dict(**mips_const.reg_map, **mips_const.reg_map_afpr128, **mips_const.reg_map_fpu)
pc_reg = 'pc'
sp_reg = 'sp'
return QlRegisterManager(self.uc, regs_map, pc_reg, sp_reg)
_property
def disassembler(self) -> Cs:
endian = {QL_ENDIAN.EL: CS_MODE_LITTLE_ENDIAN, QL_ENDIAN.EB: CS_MODE_BIG_ENDIAN}[self.endian]
return Cs(CS_ARCH_MIPS, (CS_MODE_MIPS32 + endian))
_property
def assembler(self) -> Ks:
endian = {QL_ENDIAN.EL: KS_MODE_LITTLE_ENDIAN, QL_ENDIAN.EB: KS_MODE_BIG_ENDIAN}[self.endian]
return Ks(KS_ARCH_MIPS, (KS_MODE_MIPS32 + endian))
def endian(self) -> QL_ENDIAN:
return self._init_endian |
def ql_syscall_faccessat(ql: Qiling, dirfd: int, filename: int, mode: int):
vpath = ql.os.utils.read_cstring(filename)
vpath_at = virtual_abspath_at(ql, vpath, dirfd)
if (vpath_at is None):
regreturn = (- 1)
else:
hpath = ql.os.path.virtual_to_host_path(vpath_at)
if (not ql.os.path.is_safe_host_path(hpath)):
raise PermissionError(f'unsafe path: {hpath}')
regreturn = (0 if os.path.exists(hpath) else (- 1))
ql.log.debug(f'faccessat({dirfd:d}, "{vpath}", {mode:d}) = {regreturn}')
return regreturn |
class NamespaceReader(abc.TraversableResources):
def __init__(self, namespace_path):
if ('NamespacePath' not in str(namespace_path)):
raise ValueError('Invalid path')
self.path = MultiplexedPath(*list(namespace_path))
def resource_path(self, resource):
return str(self.path.joinpath(resource))
def files(self):
return self.path |
class Migration(migrations.Migration):
dependencies = [('successstories', '0008_auto__2000')]
operations = [migrations.AlterField(model_name='story', name='slug', field=models.SlugField(max_length=200, unique=True)), migrations.AlterField(model_name='storycategory', name='slug', field=models.SlugField(max_length=200, unique=True))] |
class TestHTML():
.parametrize('pause, expectation', [(0.4, 400), (1, '^((?:[01]\\d|2[0-3]):[0-5]\\d:[0-5]\\d$)')])
def test_durations(self, pytester, pause, expectation):
pytester.makepyfile(f'''
import time
def test_sleep():
time.sleep({pause})
''')
page = run(pytester)
duration = get_text(page, "#results-table td[class='col-duration']")
total_duration = get_text(page, "p[class='run-count']")
if (pause < 1):
assert_that(int(duration.replace('ms', ''))).is_between(expectation, (expectation * 2))
assert_that(total_duration).matches('\\d+\\s+ms')
else:
assert_that(duration).matches(expectation)
assert_that(total_duration).matches('\\d{2}:\\d{2}:\\d{2}')
def test_duration_format_hook(self, pytester):
pytester.makeconftest('\n def pytest_html_duration_format(duration):\n return str(round(duration * 1000)) + " seconds"\n ')
pytester.makepyfile('def test_pass(): pass')
page = run(pytester)
assert_results(page, passed=1)
duration = get_text(page, "#results-table td[class='col-duration']")
assert_that(duration).contains('seconds')
def test_total_number_of_tests_zero(self, pytester):
page = run(pytester)
assert_results(page)
total = get_text(page, "p[class='run-count']")
assert_that(total).matches('0 test(?!s)')
def test_total_number_of_tests_singular(self, pytester):
pytester.makepyfile('def test_pass(): pass')
page = run(pytester)
assert_results(page, passed=1)
total = get_text(page, "p[class='run-count']")
assert_that(total).matches('1 test(?!s)')
def test_total_number_of_tests_plural(self, pytester):
pytester.makepyfile('\n def test_pass_one(): pass\n def test_pass_two(): pass\n ')
page = run(pytester)
assert_results(page, passed=2)
total = get_text(page, "p[class='run-count']")
assert_that(total).matches('2 tests(?!\\S)')
def test_pass(self, pytester):
pytester.makepyfile('def test_pass(): pass')
page = run(pytester)
assert_results(page, passed=1)
def test_skip(self, pytester):
reason = str(random.random())
pytester.makepyfile(f'''
import pytest
def test_skip():
pytest.skip("{reason}")
''')
page = run(pytester)
assert_results(page, skipped=1, total_tests=0)
log = get_text(page, "div[class='log']")
assert_that(log).contains(reason)
def test_skip_function_marker(self, pytester):
reason = str(random.random())
pytester.makepyfile(f'''
import pytest
.skip(reason="{reason}")
def test_skip():
assert True
''')
page = run(pytester)
assert_results(page, skipped=1, total_tests=0)
log = get_text(page, "div[class='log']")
assert_that(log).contains(reason)
def test_skip_class_marker(self, pytester):
reason = str(random.random())
pytester.makepyfile(f'''
import pytest
.skip(reason="{reason}")
class TestSkip:
def test_skip():
assert True
''')
page = run(pytester)
assert_results(page, skipped=1, total_tests=0)
log = get_text(page, "div[class='log']")
assert_that(log).contains(reason)
def test_fail(self, pytester):
pytester.makepyfile('def test_fail(): assert False')
page = run(pytester)
assert_results(page, failed=1)
assert_that(get_log(page)).contains('AssertionError')
assert_that(get_text(page, "div[class='log'] span.error")).matches('^E\\s+assert False$')
def test_xfail(self, pytester):
reason = str(random.random())
pytester.makepyfile(f'''
import pytest
def test_xfail():
pytest.xfail("{reason}")
''')
page = run(pytester)
assert_results(page, xfailed=1)
assert_that(get_log(page)).contains(reason)
def test_xfail_function_marker(self, pytester):
reason = str(random.random())
pytester.makepyfile(f'''
import pytest
.xfail(reason="{reason}")
def test_xfail():
assert False
''')
page = run(pytester)
assert_results(page, xfailed=1)
assert_that(get_log(page)).contains(reason)
def test_xfail_class_marker(self, pytester):
pytester.makepyfile('\n import pytest\n .xfail(reason="broken")\n class TestXFail:\n def test_xfail(self):\n assert False\n ')
page = run(pytester)
assert_results(page, xfailed=1)
def test_xpass(self, pytester):
pytester.makepyfile('\n import pytest\n .xfail()\n def test_xpass():\n assert True\n ')
page = run(pytester)
assert_results(page, xpassed=1)
def test_xpass_class_marker(self, pytester):
pytester.makepyfile('\n import pytest\n .xfail()\n class TestXPass:\n def test_xpass(self):\n assert True\n ')
page = run(pytester)
assert_results(page, xpassed=1)
def test_rerun(self, pytester):
pytester.makepyfile('\n import pytest\n import time\n\n .flaky(reruns=2)\n def test_example():\n time.sleep(0.2)\n assert False\n ')
page = run(pytester)
assert_results(page, failed=1, rerun=2, total_tests=1)
def test_conditional_xfails(self, pytester):
pytester.makepyfile("\n import pytest\n .xfail(False, reason='reason')\n def test_fail(): assert False\n .xfail(False, reason='reason')\n def test_pass(): pass\n .xfail(True, reason='reason')\n def test_xfail(): assert False\n .xfail(True, reason='reason')\n def test_xpass(): pass\n ")
page = run(pytester)
assert_results(page, passed=1, failed=1, xfailed=1, xpassed=1)
def test_setup_error(self, pytester):
pytester.makepyfile('\n import pytest\n \n def arg(request):\n raise ValueError()\n def test_function(arg):\n pass\n ')
page = run(pytester)
assert_results(page, error=1, total_tests=0)
col_name = get_text(page, "td[class='col-testId']")
assert_that(col_name).contains('::setup')
assert_that(get_log(page)).contains('ValueError')
.parametrize('title', ['', 'Special Report'])
def test_report_title(self, pytester, title):
pytester.makepyfile('def test_pass(): pass')
if title:
pytester.makeconftest(f'''
import pytest
def pytest_html_report_title(report):
report.title = "{title}"
''')
expected_title = (title if title else 'report.html')
page = run(pytester)
assert_that(get_text(page, '#head-title')).is_equal_to(expected_title)
assert_that(get_text(page, "h1[id='title']")).is_equal_to(expected_title)
def test_resources_inline_css(self, pytester):
pytester.makepyfile('def test_pass(): pass')
page = run(pytester, cmd_flags=['--self-contained-html'])
content = file_content()
assert_that(get_text(page, 'head style').strip()).contains(content)
def test_resources_css(self, pytester):
pytester.makepyfile('def test_pass(): pass')
page = run(pytester)
assert_that(page.select_one('head link')['href']).is_equal_to(str(Path('assets', 'style.css')))
def test_custom_content_in_summary(self, pytester):
content = {'prefix': str(random.random()), 'summary': str(random.random()), 'postfix': str(random.random())}
pytester.makeconftest(f'''
import pytest
def pytest_html_results_summary(prefix, summary, postfix):
prefix.append(r"<p>prefix is {content['prefix']}</p>")
summary.extend([r"<p>summary is {content['summary']}</p>"])
postfix.extend([r"<p>postfix is {content['postfix']}</p>"])
''')
pytester.makepyfile('def test_pass(): pass')
page = run(pytester)
elements = page.select('.additional-summary p')
assert_that(elements).is_length(3)
for element in elements:
key = re.search('(\\w+).*', element.string).group(1)
value = content.pop(key)
assert_that(element.string).contains(value)
def test_extra_html(self, pytester):
content = str(random.random())
pytester.makeconftest(f'''
import pytest
(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
if report.when == 'call':
from pytest_html import extras
report.extras = [extras.html('<div>{content}</div>')]
''')
pytester.makepyfile('def test_pass(): pass')
page = run(pytester)
assert_that(page.select_one('.extraHTML').string).is_equal_to(content)
.parametrize('content, encoded', [("u'\x81'", 'woE='), ("'foo'", 'Zm9v'), ("b'\\xe2\\x80\\x93'", '4oCT')])
def test_extra_text(self, pytester, content, encoded):
pytester.makeconftest(f'''
import pytest
(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
if report.when == 'call':
from pytest_html import extras
report.extras = [extras.text({content})]
''')
pytester.makepyfile('def test_pass(): pass')
page = run(pytester, cmd_flags=['--self-contained-html'])
element = page.select_one("a[class='col-links__extra text']")
assert_that(element.string).is_equal_to('Text')
assert_that(element['href']).is_equal_to(f'data:text/plain;charset=utf-8;base64,{encoded}')
def test_extra_json(self, pytester):
content = {str(random.random()): str(random.random())}
pytester.makeconftest(f'''
import pytest
(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
if report.when == 'call':
from pytest_html import extras
report.extras = [extras.json({content})]
''')
pytester.makepyfile('def test_pass(): pass')
page = run(pytester, cmd_flags=['--self-contained-html'])
content_str = json.dumps(content)
data = b64encode(content_str.encode('utf-8')).decode('ascii')
element = page.select_one("a[class='col-links__extra json']")
assert_that(element.string).is_equal_to('JSON')
assert_that(element['href']).is_equal_to(f'data:application/json;charset=utf-8;base64,{data}')
def test_extra_url(self, pytester):
pytester.makeconftest("\n import pytest\n\n (hookwrapper=True)\n def pytest_runtest_makereport(item, call):\n outcome = yield\n report = outcome.get_result()\n from pytest_html import extras\n report.extras = [extras.url(f'{report.when}')]\n ")
pytester.makepyfile('def test_pass(): pass')
page = run(pytester)
elements = page.select("a[class='col-links__extra url']")
assert_that(elements).is_length(3)
for each in zip(elements, ['setup', 'call', 'teardown']):
(element, when) = each
assert_that(element.string).is_equal_to('URL')
assert_that(element['href']).is_equal_to(when)
.parametrize('mime_type, extension', [('image/png', 'png'), ('image/png', 'image'), ('image/jpeg', 'jpg'), ('image/svg+xml', 'svg')])
def test_extra_image(self, pytester, mime_type, extension):
content = str(random.random())
charset = 'utf-8'
data = base64.b64encode(content.encode(charset)).decode(charset)
pytester.makeconftest(f'''
import pytest
(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
if report.when == 'call':
from pytest_html import extras
report.extras = [extras.{extension}('{data}')]
''')
pytester.makepyfile('def test_pass(): pass')
page = run(pytester, cmd_flags=['--self-contained-html'])
src = f'data:{mime_type};base64,{data}'
element = page.select_one('.media img')
assert_that(str(element)).is_equal_to(f'<img src="{src}"/>')
.parametrize('mime_type, extension', [('video/mp4', 'mp4')])
def test_extra_video(self, pytester, mime_type, extension):
content = str(random.random())
charset = 'utf-8'
data = base64.b64encode(content.encode(charset)).decode(charset)
pytester.makeconftest(f'''
import pytest
(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
if report.when == 'call':
from pytest_html import extras
report.extras = [extras.{extension}('{data}')]
''')
pytester.makepyfile('def test_pass(): pass')
page = run(pytester, cmd_flags=['--self-contained-html'])
src = f'data:{mime_type};base64,{data}'
element = page.select_one('.media video')
assert_that(str(element)).is_equal_to(f'''<video controls="">
<source src="{src}" type="{mime_type}"/>
</video>''')
def test_xdist(self, pytester):
pytester.makepyfile('def test_xdist(): pass')
page = run(pytester, cmd_flags=['-n1'])
assert_results(page, passed=1)
def test_results_table_hook_append(self, pytester):
header_selector = '#results-table-head tr:nth-child(1) th:nth-child({})'
row_selector = '#results-table tr:nth-child(1) td:nth-child({})'
pytester.makeconftest('\n def pytest_html_results_table_header(cells):\n cells.append("<th>Description</th>")\n cells.append(\n \'<th class="sortable time" data-column-type="time">Time</th>\'\n )\n\n def pytest_html_results_table_row(report, cells):\n cells.append("<td>A description</td>")\n cells.append(\'<td class="col-time">A time</td>\')\n ')
pytester.makepyfile('def test_pass(): pass')
page = run(pytester)
description_index = 5
time_index = 6
assert_that(get_text(page, header_selector.format(time_index))).is_equal_to('Time')
assert_that(get_text(page, header_selector.format(description_index))).is_equal_to('Description')
assert_that(get_text(page, row_selector.format(time_index))).is_equal_to('A time')
assert_that(get_text(page, row_selector.format(description_index))).is_equal_to('A description')
def test_results_table_hook_insert(self, pytester):
header_selector = '#results-table-head tr:nth-child(1) th:nth-child({})'
row_selector = '#results-table tr:nth-child(1) td:nth-child({})'
pytester.makeconftest('\n def pytest_html_results_table_header(cells):\n cells.insert(2, "<th>Description</th>")\n cells.insert(\n 1,\n \'<th class="sortable time" data-column-type="time">Time</th>\'\n )\n\n def pytest_html_results_table_row(report, cells):\n cells.insert(2, "<td>A description</td>")\n cells.insert(1, \'<td class="col-time">A time</td>\')\n ')
pytester.makepyfile('def test_pass(): pass')
page = run(pytester)
description_index = 4
time_index = 2
assert_that(get_text(page, header_selector.format(time_index))).is_equal_to('Time')
assert_that(get_text(page, header_selector.format(description_index))).is_equal_to('Description')
assert_that(get_text(page, row_selector.format(time_index))).is_equal_to('A time')
assert_that(get_text(page, row_selector.format(description_index))).is_equal_to('A description')
def test_results_table_hook_delete(self, pytester):
pytester.makeconftest('\n def pytest_html_results_table_row(report, cells):\n if report.skipped:\n del cells[:]\n ')
pytester.makepyfile("\n import pytest\n def test_skip():\n pytest.skip('reason')\n\n def test_pass(): pass\n\n ")
page = run(pytester)
assert_results(page, passed=1)
def test_results_table_hook_pop(self, pytester):
pytester.makeconftest('\n def pytest_html_results_table_header(cells):\n cells.pop()\n\n def pytest_html_results_table_row(report, cells):\n cells.pop()\n ')
pytester.makepyfile('def test_pass(): pass')
page = run(pytester)
header_columns = page.select('#results-table-head th')
assert_that(header_columns).is_length(3)
row_columns = page.select_one('.results-table-row').select('td:not(.extra)')
assert_that(row_columns).is_length(3)
.parametrize('no_capture', ['', '-s'])
def test_standard_streams(self, pytester, no_capture):
pytester.makepyfile('\n import pytest\n import sys\n \n def setup():\n print("this is setup stdout")\n print("this is setup stderr", file=sys.stderr)\n yield\n print("this is teardown stdout")\n print("this is teardown stderr", file=sys.stderr)\n\n def test_streams(setup):\n print("this is call stdout")\n print("this is call stderr", file=sys.stderr)\n assert True\n ')
page = run(pytester, 'report.html', cmd_flags=[no_capture])
assert_results(page, passed=1)
log = get_log(page)
for when in ['setup', 'call', 'teardown']:
for stream in ['stdout', 'stderr']:
if no_capture:
assert_that(log).does_not_match(f'- Captured {stream} {when} -')
assert_that(log).does_not_match(f'this is {when} {stream}')
else:
assert_that(log).matches(f'- Captured {stream} {when} -')
assert_that(log).matches(f'this is {when} {stream}')
def test_collect_error(self, pytester):
error_msg = 'Non existent module'
pytester.makepyfile(f'''
import pytest
raise ImportError("{error_msg}")
''')
page = run(pytester)
assert_results(page, error=1)
log = get_log(page)
assert_that(log).matches(f'E\s+ImportError: {error_msg}')
def test_report_display_utf8(self, pytester):
pytester.makepyfile('\n import pytest\n .parametrize("utf8", [("")])\n def test_pass(utf8):\n assert True\n ')
page = run(pytester)
assert_results(page, passed=1)
log = get_log(page)
assert_that(log).does_not_match('')
.parametrize('outcome, occurrence', [(True, 1), (False, 2)])
def test_log_escaping(self, pytester, outcome, occurrence):
texts = ['0 Checking object <Chopstick Container> and more', '1 Checking object < > and more', '2 Checking object <> and more', '3 Checking object < C > and more', '4 Checking object <C > and more', '5 Checking object < and more', '6 Checking object < and more', '7 Checking object < C and more', '8 Checking object <C and more', '9 Checking object "<Chopstick Container>" and more', '10 Checking object "< >" and more', '11 Checking object "<>" and more', '12 Checking object "< C >" and more', '13 Checking object "<C >" and more']
test_file = 'def test_escape():\n'
for t in texts:
test_file += f''' print('{t}')
'''
test_file += f' assert {outcome}'
pytester.makepyfile(test_file)
page = run(pytester)
assert_results(page, passed=(1 if outcome else 0), failed=(1 if (not outcome) else 0))
log = get_log(page)
for each in texts:
count = log.count(each)
assert_that(count).is_equal_to(occurrence)
.parametrize('sort, order', [(None, ['BBB', 'AAA', 'CCC']), ('result', ['BBB', 'AAA', 'CCC']), ('testId', ['AAA', 'BBB', 'CCC']), ('duration', ['CCC', 'BBB', 'AAA']), ('original', ['AAA', 'BBB', 'CCC'])])
def test_initial_sort(self, pytester, sort, order):
if (sort is not None):
pytester.makeini(f'''
[pytest]
initial_sort = {sort}
''')
pytester.makepyfile('\n import pytest\n from time import sleep\n\n def test_AAA():\n sleep(0.3)\n assert True\n\n def test_BBB():\n sleep(0.2)\n assert False\n\n def test_CCC():\n sleep(0.1)\n assert True\n ')
page = run(pytester)
assert_results(page, passed=2, failed=1)
result = page.select('td.col-testId')
assert_that(result).is_length(3)
for (row, expected) in zip(result, order):
assert_that(row.string).contains(expected) |
def test_add_row_button():
widget = QgridWidget(df=create_df())
event_history = init_event_history('row_added', widget=widget)
widget._handle_qgrid_msg_helper({'type': 'add_row'})
assert (event_history == [{'name': 'row_added', 'index': 4, 'source': 'gui'}])
added_index = event_history[0]['index']
expected_values = pd.Series({'qgrid_unfiltered_index': 4, 'A': 1, 'C': 1, 'D': 3, 'Date': pd.Timestamp('2013-01-02 00:00:00'), 'E': 'bar', 'F': 'fox'})
sort_idx = widget._df.loc[added_index].index
assert (widget._df.loc[added_index] == expected_values[sort_idx]).all() |
def check_connection_end_to_end(wrap_client: Callable[([CA, socket.socket, str], SslSocket)], wrap_server: Callable[([LeafCert, socket.socket], SslSocket)], key_type: KeyType) -> None:
def fake_ssl_client(ca: CA, raw_client_sock: socket.socket, hostname: str) -> None:
try:
wrapped_client_sock = wrap_client(ca, raw_client_sock, hostname)
wrapped_client_sock.send(b'x')
assert (wrapped_client_sock.recv(1) == b'y')
wrapped_client_sock.close()
except:
sys.excepthook(*sys.exc_info())
raise
finally:
raw_client_sock.close()
def fake_ssl_server(server_cert: LeafCert, raw_server_sock: socket.socket) -> None:
try:
wrapped_server_sock = wrap_server(server_cert, raw_server_sock)
assert (wrapped_server_sock.recv(1) == b'x')
wrapped_server_sock.send(b'y')
wrapped_server_sock.close()
except:
sys.excepthook(*sys.exc_info())
raise
finally:
raw_server_sock.close()
def doit(ca: CA, hostname: str, server_cert: LeafCert) -> None:
listener = socket.socket()
listener.bind(('127.0.0.1', 0))
listener.listen(1)
raw_client_sock = socket.socket()
raw_client_sock.connect(listener.getsockname())
(raw_server_sock, _) = listener.accept()
listener.close()
with ThreadPoolExecutor(2) as tpe:
f1 = tpe.submit(fake_ssl_client, ca, raw_client_sock, hostname)
f2 = tpe.submit(fake_ssl_server, server_cert, raw_server_sock)
f1.result()
f2.result()
ca = CA(key_type=key_type)
intermediate_ca = ca.create_child_ca(key_type=key_type)
hostname = 'my-test-host.example.org'
doit(ca, hostname, ca.issue_cert(hostname, key_type=key_type))
doit(ca, hostname, intermediate_ca.issue_cert(hostname, key_type=key_type))
with pytest.raises(Exception):
doit(ca, 'asdf.example.org', ca.issue_cert(hostname, key_type=key_type))
bad_ca = CA()
with pytest.raises(Exception):
doit(bad_ca, hostname, ca.issue_cert(hostname, key_type=key_type)) |
class DNSlog():
def __init__(self):
self.headers = headers = {'Cookie': 'UM_distinctid=17d9ee9b99ad5-08c6a2266360e7-4c3f2779-1fa400-17d9ee9b99b2b1; CNZZDATA=--%7C; PHPSESSID=kolveuasn829nk9s0jfffjg4n2'}
def getdomain(self):
getdomain = requests.get(url=' headers=self.headers, timeout=60)
global domain
domain = str(getdomain.text)
print(domain)
def TestingData(self):
print('dnslog')
for i in range(20):
print(i)
refresh = requests.get(url=' headers=self.headers, timeout=60)
time.sleep(1)
if (domain in refresh.text):
print('dns,,')
sys.exit()
if (i == 14):
print('dnslog')
sys.exit() |
class Serial(SerialBase, PlatformSpecific):
def open(self):
if (self._port is None):
raise SerialException('Port must be configured before it can be used.')
if self.is_open:
raise SerialException('Port is already open.')
self.fd = None
try:
self.fd = os.open(self.portstr, ((os.O_RDWR | os.O_NOCTTY) | os.O_NONBLOCK))
except OSError as msg:
self.fd = None
raise SerialException(msg.errno, 'could not open port {}: {}'.format(self._port, msg))
(self.pipe_abort_read_r, self.pipe_abort_read_w) = (None, None)
(self.pipe_abort_write_r, self.pipe_abort_write_w) = (None, None)
try:
self._reconfigure_port(force_update=True)
try:
if (not self._dsrdtr):
self._update_dtr_state()
if (not self._rtscts):
self._update_rts_state()
except IOError as e:
if (e.errno not in (errno.EINVAL, errno.ENOTTY)):
raise
self._reset_input_buffer()
(self.pipe_abort_read_r, self.pipe_abort_read_w) = os.pipe()
(self.pipe_abort_write_r, self.pipe_abort_write_w) = os.pipe()
fcntl.fcntl(self.pipe_abort_read_r, fcntl.F_SETFL, os.O_NONBLOCK)
fcntl.fcntl(self.pipe_abort_write_r, fcntl.F_SETFL, os.O_NONBLOCK)
except BaseException:
try:
os.close(self.fd)
except Exception:
pass
self.fd = None
if (self.pipe_abort_read_w is not None):
os.close(self.pipe_abort_read_w)
self.pipe_abort_read_w = None
if (self.pipe_abort_read_r is not None):
os.close(self.pipe_abort_read_r)
self.pipe_abort_read_r = None
if (self.pipe_abort_write_w is not None):
os.close(self.pipe_abort_write_w)
self.pipe_abort_write_w = None
if (self.pipe_abort_write_r is not None):
os.close(self.pipe_abort_write_r)
self.pipe_abort_write_r = None
raise
self.is_open = True
def _reconfigure_port(self, force_update=False):
if (self.fd is None):
raise SerialException('Can only operate on a valid file descriptor')
if (self._exclusive is not None):
if self._exclusive:
try:
fcntl.flock(self.fd, (fcntl.LOCK_EX | fcntl.LOCK_NB))
except IOError as msg:
raise SerialException(msg.errno, 'Could not exclusively lock port {}: {}'.format(self._port, msg))
else:
fcntl.flock(self.fd, fcntl.LOCK_UN)
custom_baud = None
vmin = vtime = 0
if (self._inter_byte_timeout is not None):
vmin = 1
vtime = int((self._inter_byte_timeout * 10))
try:
orig_attr = termios.tcgetattr(self.fd)
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = orig_attr
except termios.error as msg:
raise SerialException('Could not configure port: {}'.format(msg))
cflag |= (termios.CLOCAL | termios.CREAD)
lflag &= (~ ((((((termios.ICANON | termios.ECHO) | termios.ECHOE) | termios.ECHOK) | termios.ECHONL) | termios.ISIG) | termios.IEXTEN))
for flag in ('ECHOCTL', 'ECHOKE'):
if hasattr(termios, flag):
lflag &= (~ getattr(termios, flag))
oflag &= (~ ((termios.OPOST | termios.ONLCR) | termios.OCRNL))
iflag &= (~ (((termios.INLCR | termios.IGNCR) | termios.ICRNL) | termios.IGNBRK))
if hasattr(termios, 'IUCLC'):
iflag &= (~ termios.IUCLC)
if hasattr(termios, 'PARMRK'):
iflag &= (~ termios.PARMRK)
try:
ispeed = ospeed = getattr(termios, 'B{}'.format(self._baudrate))
except AttributeError:
try:
ispeed = ospeed = self.BAUDRATE_CONSTANTS[self._baudrate]
except KeyError:
try:
ispeed = ospeed = BOTHER
except NameError:
ispeed = ospeed = getattr(termios, 'B38400')
try:
custom_baud = int(self._baudrate)
except ValueError:
raise ValueError('Invalid baud rate: {!r}'.format(self._baudrate))
else:
if (custom_baud < 0):
raise ValueError('Invalid baud rate: {!r}'.format(self._baudrate))
cflag &= (~ termios.CSIZE)
if (self._bytesize == 8):
cflag |= termios.CS8
elif (self._bytesize == 7):
cflag |= termios.CS7
elif (self._bytesize == 6):
cflag |= termios.CS6
elif (self._bytesize == 5):
cflag |= termios.CS5
else:
raise ValueError('Invalid char len: {!r}'.format(self._bytesize))
if (self._stopbits == serial.STOPBITS_ONE):
cflag &= (~ termios.CSTOPB)
elif (self._stopbits == serial.STOPBITS_ONE_POINT_FIVE):
cflag |= termios.CSTOPB
elif (self._stopbits == serial.STOPBITS_TWO):
cflag |= termios.CSTOPB
else:
raise ValueError('Invalid stop bit specification: {!r}'.format(self._stopbits))
iflag &= (~ (termios.INPCK | termios.ISTRIP))
if (self._parity == serial.PARITY_NONE):
cflag &= (~ ((termios.PARENB | termios.PARODD) | CMSPAR))
elif (self._parity == serial.PARITY_EVEN):
cflag &= (~ (termios.PARODD | CMSPAR))
cflag |= termios.PARENB
elif (self._parity == serial.PARITY_ODD):
cflag &= (~ CMSPAR)
cflag |= (termios.PARENB | termios.PARODD)
elif ((self._parity == serial.PARITY_MARK) and CMSPAR):
cflag |= ((termios.PARENB | CMSPAR) | termios.PARODD)
elif ((self._parity == serial.PARITY_SPACE) and CMSPAR):
cflag |= (termios.PARENB | CMSPAR)
cflag &= (~ termios.PARODD)
else:
raise ValueError('Invalid parity: {!r}'.format(self._parity))
if hasattr(termios, 'IXANY'):
if self._xonxoff:
iflag |= (termios.IXON | termios.IXOFF)
else:
iflag &= (~ ((termios.IXON | termios.IXOFF) | termios.IXANY))
elif self._xonxoff:
iflag |= (termios.IXON | termios.IXOFF)
else:
iflag &= (~ (termios.IXON | termios.IXOFF))
if hasattr(termios, 'CRTSCTS'):
if self._rtscts:
cflag |= termios.CRTSCTS
else:
cflag &= (~ termios.CRTSCTS)
elif hasattr(termios, 'CNEW_RTSCTS'):
if self._rtscts:
cflag |= termios.CNEW_RTSCTS
else:
cflag &= (~ termios.CNEW_RTSCTS)
if ((vmin < 0) or (vmin > 255)):
raise ValueError('Invalid vmin: {!r}'.format(vmin))
cc[termios.VMIN] = vmin
if ((vtime < 0) or (vtime > 255)):
raise ValueError('Invalid vtime: {!r}'.format(vtime))
cc[termios.VTIME] = vtime
if (force_update or ([iflag, oflag, cflag, lflag, ispeed, ospeed, cc] != orig_attr)):
termios.tcsetattr(self.fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
if (custom_baud is not None):
self._set_special_baudrate(custom_baud)
if (self._rs485_mode is not None):
self._set_rs485_mode(self._rs485_mode)
def close(self):
if self.is_open:
if (self.fd is not None):
os.close(self.fd)
self.fd = None
os.close(self.pipe_abort_read_w)
os.close(self.pipe_abort_read_r)
os.close(self.pipe_abort_write_w)
os.close(self.pipe_abort_write_r)
(self.pipe_abort_read_r, self.pipe_abort_read_w) = (None, None)
(self.pipe_abort_write_r, self.pipe_abort_write_w) = (None, None)
self.is_open = False
def in_waiting(self):
s = fcntl.ioctl(self.fd, TIOCINQ, TIOCM_zero_str)
return struct.unpack('I', s)[0]
def read(self, size=1):
if (not self.is_open):
raise PortNotOpenError()
read = bytearray()
timeout = Timeout(self._timeout)
while (len(read) < size):
try:
(ready, _, _) = select.select([self.fd, self.pipe_abort_read_r], [], [], timeout.time_left())
if (self.pipe_abort_read_r in ready):
os.read(self.pipe_abort_read_r, 1000)
break
if (not ready):
break
buf = os.read(self.fd, (size - len(read)))
except OSError as e:
if (e.errno not in (errno.EAGAIN, errno.EALREADY, errno.EWOULDBLOCK, errno.EINPROGRESS, errno.EINTR)):
raise SerialException('read failed: {}'.format(e))
except select.error as e:
if (e[0] not in (errno.EAGAIN, errno.EALREADY, errno.EWOULDBLOCK, errno.EINPROGRESS, errno.EINTR)):
raise SerialException('read failed: {}'.format(e))
else:
if (not buf):
raise SerialException('device reports readiness to read but returned no data (device disconnected or multiple access on port?)')
read.extend(buf)
if timeout.expired():
break
return bytes(read)
def cancel_read(self):
if self.is_open:
os.write(self.pipe_abort_read_w, b'x')
def cancel_write(self):
if self.is_open:
os.write(self.pipe_abort_write_w, b'x')
def write(self, data):
if (not self.is_open):
raise PortNotOpenError()
d = to_bytes(data)
tx_len = length = len(d)
timeout = Timeout(self._write_timeout)
while (tx_len > 0):
try:
n = os.write(self.fd, d)
if timeout.is_non_blocking:
return n
elif (not timeout.is_infinite):
if timeout.expired():
raise SerialTimeoutException('Write timeout')
(abort, ready, _) = select.select([self.pipe_abort_write_r], [self.fd], [], timeout.time_left())
if abort:
os.read(self.pipe_abort_write_r, 1000)
break
if (not ready):
raise SerialTimeoutException('Write timeout')
else:
assert (timeout.time_left() is None)
(abort, ready, _) = select.select([self.pipe_abort_write_r], [self.fd], [], None)
if abort:
os.read(self.pipe_abort_write_r, 1)
break
if (not ready):
raise SerialException('write failed (select)')
d = d[n:]
tx_len -= n
except SerialException:
raise
except OSError as e:
if (e.errno not in (errno.EAGAIN, errno.EALREADY, errno.EWOULDBLOCK, errno.EINPROGRESS, errno.EINTR)):
raise SerialException('write failed: {}'.format(e))
except select.error as e:
if (e[0] not in (errno.EAGAIN, errno.EALREADY, errno.EWOULDBLOCK, errno.EINPROGRESS, errno.EINTR)):
raise SerialException('write failed: {}'.format(e))
if ((not timeout.is_non_blocking) and timeout.expired()):
raise SerialTimeoutException('Write timeout')
return (length - len(d))
def flush(self):
if (not self.is_open):
raise PortNotOpenError()
termios.tcdrain(self.fd)
def _reset_input_buffer(self):
termios.tcflush(self.fd, termios.TCIFLUSH)
def reset_input_buffer(self):
if (not self.is_open):
raise PortNotOpenError()
self._reset_input_buffer()
def reset_output_buffer(self):
if (not self.is_open):
raise PortNotOpenError()
termios.tcflush(self.fd, termios.TCOFLUSH)
def send_break(self, duration=0.25):
if (not self.is_open):
raise PortNotOpenError()
termios.tcsendbreak(self.fd, int((duration / 0.25)))
def _update_rts_state(self):
if self._rts_state:
fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_RTS_str)
else:
fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_RTS_str)
def _update_dtr_state(self):
if self._dtr_state:
fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_DTR_str)
else:
fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_DTR_str)
def cts(self):
if (not self.is_open):
raise PortNotOpenError()
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return ((struct.unpack('I', s)[0] & TIOCM_CTS) != 0)
def dsr(self):
if (not self.is_open):
raise PortNotOpenError()
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return ((struct.unpack('I', s)[0] & TIOCM_DSR) != 0)
def ri(self):
if (not self.is_open):
raise PortNotOpenError()
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return ((struct.unpack('I', s)[0] & TIOCM_RI) != 0)
def cd(self):
if (not self.is_open):
raise PortNotOpenError()
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return ((struct.unpack('I', s)[0] & TIOCM_CD) != 0)
def out_waiting(self):
s = fcntl.ioctl(self.fd, TIOCOUTQ, TIOCM_zero_str)
return struct.unpack('I', s)[0]
def fileno(self):
if (not self.is_open):
raise PortNotOpenError()
return self.fd
def set_input_flow_control(self, enable=True):
if (not self.is_open):
raise PortNotOpenError()
if enable:
termios.tcflow(self.fd, termios.TCION)
else:
termios.tcflow(self.fd, termios.TCIOFF)
def set_output_flow_control(self, enable=True):
if (not self.is_open):
raise PortNotOpenError()
if enable:
termios.tcflow(self.fd, termios.TCOON)
else:
termios.tcflow(self.fd, termios.TCOOFF)
def nonblocking(self):
import warnings
warnings.warn('nonblocking() has no effect, already nonblocking', DeprecationWarning) |
def basecompiledir_ls():
subdirs = []
others = []
for f in os.listdir(config.base_compiledir):
if os.path.isdir(os.path.join(config.base_compiledir, f)):
subdirs.append(f)
else:
others.append(f)
subdirs = sorted(subdirs)
others = sorted(others)
print(f'Base compile dir is {config.base_compiledir}')
print('Sub-directories (possible compile caches):')
for d in subdirs:
print(f' {d}')
if (not subdirs):
print(' (None)')
if others:
print()
print('Other files in base_compiledir:')
for f in others:
print(f' {f}') |
class _ModelFallbackWrapper(GenerationMixin):
__slots__ = ('_optimized', '_default')
def __init__(self, optimized, default):
self._optimized = optimized
self._default = default
def __call__(self, *args, **kwargs):
if (kwargs['past_key_values'] is None):
return self._default(*args, **kwargs)
trace_graph_inputs = []
kwargs.pop('position_ids', None)
for (k, v) in kwargs.items():
if ((v is not None) and (not isinstance(v, bool))):
trace_graph_inputs.append(v)
trace_graph_inputs = tuple(trace_graph_inputs)
outputs = self._optimized(*trace_graph_inputs)
lm_logits = outputs[0]
past_key_values = outputs[1]
fixed_output = CausalLMOutputWithPast(loss=None, logits=lm_logits, past_key_values=past_key_values, hidden_states=None, attentions=None)
return fixed_output
def __getattr__(self, item):
return getattr(self._default, item)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, use_cache=None, **kwargs):
return self._default.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, **kwargs)
def _reorder_cache(self, past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
return self._default._reorder_cache(past_key_values, beam_idx) |
def loadAWSServiceControlPolicy(neo4j_session, data_path, account_name):
logger.info("[*] Loading AWS Service Control Policy into neo4j instance for AWS account '%s'", account_name)
ingest_aws_service_control_policy = 'merge(scp:AWSPolicy:AWSServiceControlPolicy {Arn:$Arn}) \n set scp.Id=$Id,\n scp.Arn=$Arn,\n scp.Name=$Name,\n scp.Description=$Description,\n scp.AwsManaged=$AwsManaged,\n scp.DocumentVersion=$DocumentVersion,\n scp.DocumentId=$DocumentId \n with scp\n merge (resource {Arn: $ResourceArn}) set resource:AWSPolicyResource \n\t\t\t\t\t\t\t\t with scp,resource \n merge (scp)-[statement:AWSPolicyStatement \n { DocumentVersion:$DocumentVersion,\n\t\t\t\t\t\t\t\t DocumentId:$DocumentId,\n Effect:$Effect ,\n ActionKey:$ActionKey,\n Action:$Action,\n Condition:$Condition,\n Sid:$Sid,\n ResourceKey:$ResourceKey,\n Resource:$Resource,\n Principal:$Principal,\n PrincipalKey:$PrincipalKey,\n Aaia_ExpandedAction: $Aaia_ExpandedAction}]->(resource)\n\t\t\t\t\t\t\t\t'
policies = getAWSServiceControlPolicy(data_path, account_name)
for policy in policies:
policy_document_details = getPolicyDocumentDetails(policy['Content'])
for statement in policy_document_details['Statement']:
policy_statement_details = getPolicyStatementDetails(statement)
statement_principal = policy_statement_details['Principal']
statement_action = policy_statement_details['Action']
statement_resource = policy_statement_details['Resource']
if (statement_principal == set()):
statement_principal = ''
if (statement_action == set()):
statement_action = ''
if (statement_resource == set()):
statement_resource = ''
for resource in policy_statement_details['Resource']:
neo4j_session.run(ingest_aws_service_control_policy, Aaia_ExpandedAction=policy_statement_details['Aaia_ExpandedAction'], Name=policy['Name'], Id=policy['Id'], Arn=policy['Arn'], Description=policy['Description'], AwsManaged=policy['AwsManaged'], DocumentVersion=policy_document_details['Version'], DocumentId=policy_document_details['Id'], Effect=policy_statement_details['Effect'], ActionKey=policy_statement_details['ActionKey'], Action=str(statement_action).replace('[', '').replace(']', '').replace('{', '').replace('}', '').replace("'", ''), Condition=str(policy_statement_details['Condition']), Sid=policy_statement_details['Sid'], ResourceKey=policy_statement_details['ResourceKey'], Resource=str(statement_resource).replace('[', '').replace(']', '').replace('{', '').replace('}', '').replace("'", ''), Principal=str(statement_principal).replace('[', '').replace(']', '').replace('{', '').replace('}', '').replace("'", ''), PrincipalKey=policy_statement_details['PrincipalKey'], ResourceArn=resource)
logger.info("[*] Completed loading AWS Service Control Policy into neo4j instance for AWS account '%s'", account_name) |
class PedestrianAnimation(_AnimationType):
def __init__(self, motion=None, animation=None):
self.motion = convert_enum(motion, PedestrianMotionType, True)
self.animation = animation
self.gestures = []
def __eq__(self, other):
if isinstance(other, PedestrianAnimation):
if ((other.get_attributes() == self.get_attributes()) and (other.gestures == self.gestures)):
return True
return False
def parse(element):
motion = convert_enum(element.attrib['motion'], PedestrianMotionType)
animation = element.attrib['userDefinedPedestrianAnimation']
pa = PedestrianAnimation(motion, animation)
for gesture in element.findall('PedestrianGesture'):
pa.add_gesture(convert_enum(gesture.attrib['gesture'], PedestrianGestureType))
return pa
def add_gesture(self, gesture):
self.gestures.append(convert_enum(gesture, PedestrianGestureType))
return self
def get_attributes(self):
retdict = {}
retdict['motion'] = self.motion.get_name()
retdict['userDefinedPedestrianAnimation'] = str(self.animation)
return retdict
def get_element(self):
if (not self.isVersion(minor=2)):
raise OpenSCENARIOVersionError('PedestrianAnimation was introduced in OpenSCENARIO V1.2')
element = ET.Element('PedestrianAnimation', attrib=self.get_attributes())
for gesture in self.gestures:
ET.SubElement(element, 'PedestrianGesture', attrib={'gesture': gesture.get_name()})
return element |
class ImportWizard(QtWidgets.QWizard):
def __init__(self):
QtWidgets.QWizard.__init__(self)
self.setMinimumSize(500, 400)
self.resize(700, 500)
self.setPreviewData(None)
self.selectFilePage = SelectFilePage()
self.setParametersPage = SetParametersPage()
self.resultPage = ResultPage()
self.addPage(self.selectFilePage)
self.addPage(self.setParametersPage)
self.addPage(self.resultPage)
self.setWindowTitle(translate('importwizard', 'Import data'))
self.currentIdChanged.connect(self.onCurrentIdChanged)
def onCurrentIdChanged(self, id):
if (self.nextId() == (- 1)):
self.button(QtWidgets.QWizard.CancelButton).hide()
else:
self.button(QtWidgets.QWizard.CancelButton).show()
def open(self, filename):
if self.isVisible():
QtWidgets.QMessageBox.information(self, translate('importwizard', 'Import data wizard'), translate('importwizard', 'The import data wizard is already open'))
return
self.restart()
self.selectFilePage.txtFilename.setText(filename)
self.selectFilePage.updatePreview()
self.show()
def field(self, name):
if (name == 'usecols'):
return self.setParametersPage.selectedColumns()
elif (name == 'columnnames'):
return self.setParametersPage.columnNames()
else:
return QtWidgets.QWizard.field(self, name)
def setPreviewData(self, data):
self._previewData = data
def previewData(self):
if (self._previewData is None):
raise RuntimeError('Preview data not loaded')
return self._previewData |
(frozen=True)
class SuperMetroidPerGameOptions(PerGameOptions):
input_path: (Path | None) = None
output_directory: (Path | None) = None
output_format: str = 'smc'
def as_json(self):
return {**super().as_json, 'input_path': (str(self.input_path) if (self.input_path is not None) else None), 'output_directory': (str(self.output_directory) if (self.output_directory is not None) else None), 'output_format': self.output_format}
def from_json(cls, value: dict) -> SuperMetroidPerGameOptions:
game = RandovaniaGame.SUPER_METROID
cosmetic_patches = game.data.layout.cosmetic_patches.from_json(value['cosmetic_patches'])
return cls(cosmetic_patches=cosmetic_patches, input_path=decode_if_not_none(value['input_path'], Path), output_directory=decode_if_not_none(value['output_directory'], Path), output_format=value['output_format']) |
def event_data_generator_bert_mrc_mul(input_Xs, Ys, token_type_ids, query_lens):
for index in range(len(input_Xs)):
input_x = input_Xs[index]
y = Ys[index]
token_type_id = token_type_ids[index]
query_len = query_lens[index]
(yield ((input_x, len(input_x), query_len, token_type_id), y)) |
def StyleGAN2_FLOPCal(generator_dict):
styled_conv_FLOPs = Styled_Conv_FLOPCal(generator_dict, return_detail=False)
toRGB_FLOPs = ToRGB_Conv_FLOPCal(generator_dict, False)
mapping_network_FLOPs = Mapping_Network_FLOPCal(generator_dict)
style_mod_FLOPs = Style_Modulation_FLOPCal(generator_dict)
all_FLOPs = sum([styled_conv_FLOPs, toRGB_FLOPs, mapping_network_FLOPs, style_mod_FLOPs])
return all_FLOPs |
def parsexml_(infile, parser=None, **kwargs):
if (parser is None):
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
parser = etree_.XMLParser()
try:
if isinstance(infile, os.PathLike):
infile = os.path.join(infile)
except AttributeError:
pass
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc |
def console_progress():
def progress(totalhashed, totalsize):
msg = (' ' * 30)
if (totalhashed < totalsize):
msg = ('%5.1f%% complete' % ((totalhashed * 100.0) / totalsize))
sys.stdout.write((msg + ' \r'))
sys.stdout.flush()
try:
return (progress if sys.stdout.isatty() else None)
except AttributeError:
return None |
class DequantizeFunc(torch.autograd.Function):
def forward(ctx, tensor: torch.Tensor, scale: torch.Tensor, offset: torch.Tensor):
x_dequant = ((tensor + offset) * scale)
ctx.tensor_requires_grad = tensor.requires_grad
ctx.scale_requires_grad = scale.requires_grad
ctx.offset_requires_grad = offset.requires_grad
ctx.save_for_backward(tensor, scale, offset)
return x_dequant
def backward(ctx, grad):
(tensor, scale, offset) = ctx.saved_tensors
if (ctx.tensor_requires_grad or ctx.offset_requires_grad):
tensor_and_offset_grad = (grad * scale)
tensor_grad = (tensor_and_offset_grad if ctx.tensor_requires_grad else None)
scale_grad = ((grad * (tensor + offset)) if ctx.scale_requires_grad else None)
offset_grad = (tensor_and_offset_grad if ctx.offset_requires_grad else None)
return (tensor_grad, scale_grad, offset_grad) |
def str_for_dist(dist: TensorVariable, formatting: str='plain', include_params: bool=True) -> str:
if include_params:
if isinstance(dist.owner.op, RandomVariable):
dist_args = [_str_for_input_var(x, formatting=formatting) for x in dist.owner.inputs[3:]]
else:
dist_args = [_str_for_input_var(x, formatting=formatting) for x in dist.owner.inputs if (not isinstance(x, (RandomStateSharedVariable, RandomGeneratorSharedVariable)))]
print_name = dist.name
if ('latex' in formatting):
if (print_name is not None):
print_name = (('\\text{' + _latex_escape(dist.name.strip('$'))) + '}')
op_name = (dist.owner.op._print_name[1] if hasattr(dist.owner.op, '_print_name') else '\\\\operatorname{Unknown}')
if include_params:
if print_name:
return '${} \\sim {}({})$'.format(print_name, op_name, ',~'.join([d.strip('$') for d in dist_args]))
else:
return '${}({})$'.format(op_name, ',~'.join([d.strip('$') for d in dist_args]))
elif print_name:
return f'${print_name} \sim {op_name}$'
else:
return f'${op_name}$'
else:
dist_name = (dist.owner.op._print_name[0] if hasattr(dist.owner.op, '_print_name') else 'Unknown')
if include_params:
if print_name:
return '{} ~ {}({})'.format(print_name, dist_name, ', '.join(dist_args))
else:
return '{}({})'.format(dist_name, ', '.join(dist_args))
elif print_name:
return f'{print_name} ~ {dist_name}'
else:
return dist_name |
def init_output_database(output_c, subset):
schema._execute_sql(output_c, fragment_db.get_schema_template())
schema._execute_sql(output_c, '\nATTACH DATABASE ":memory:" AS merge;\n\nCREATE TABLE merge.required_constants (\n constant_smiles TEXT\n);\n\n ')
output_c.executemany('\nINSERT INTO merge.required_constants (constant_smiles) VALUES (?)\n', ((smiles,) for smiles in subset))
output_c.execute('CREATE INDEX merge.required_constants_idx ON required_constants(constant_smiles)') |
.parametrize(('package_repo', 'dependency_repo', 'result'), [('pypi', None, True), ('private', None, True), ('pypi', 'pypi', True), ('private', 'private', True), ('pypi', 'private', False), ('private', 'pypi', False)])
def test_package_satisfies_on_repositories(package_repo: str, dependency_repo: (str | None), result: bool) -> None:
source_type = (None if (package_repo == 'pypi') else 'legacy')
source_reference = (None if (package_repo == 'pypi') else package_repo)
package = Package('foo', '0.1.0', source_type=source_type, source_reference=source_reference)
dependency = Dependency('foo', '>=0.1.0')
dependency.source_name = dependency_repo
assert (package.satisfies(dependency) == result) |
def test_create_left_lane_split_second_lane():
lanedef = xodr.LaneDef(10, 20, 1, 2, 2)
lanes = xodr.create_lanes_merge_split(0, [lanedef], 30, xodr.std_roadmark_solid_solid(), 3, 3)
assert (len(lanes.lanesections) == 3)
assert (lanes.lanesections[0].s == 0)
assert (lanes.lanesections[1].s == 10)
assert (lanes.lanesections[2].s == 20)
assert (len(lanes.lanesections[0].rightlanes) == 0)
assert (len(lanes.lanesections[1].rightlanes) == 0)
assert (len(lanes.lanesections[2].rightlanes) == 0)
assert (len(lanes.lanesections[0].leftlanes) == 1)
assert (len(lanes.lanesections[1].leftlanes) == 2)
assert (len(lanes.lanesections[2].leftlanes) == 2)
assert (lanes.lanesections[0].leftlanes[0].roadmark[0].marking_type == xodr.RoadMarkType.solid)
assert (lanes.lanesections[0].leftlanes[0].widths[0].a == 3)
assert (lanes.lanesections[0].leftlanes[0].widths[0].c == 0)
assert (lanes.lanesections[1].leftlanes[0].roadmark[0].marking_type == xodr.RoadMarkType.broken)
assert (lanes.lanesections[1].leftlanes[0].widths[0].a == 3)
assert (lanes.lanesections[1].leftlanes[0].widths[0].c == 0)
assert (lanes.lanesections[1].leftlanes[1].roadmark[0].marking_type == xodr.RoadMarkType.solid)
assert (lanes.lanesections[1].leftlanes[1].widths[0].a == 0)
assert (lanes.lanesections[1].leftlanes[1].widths[0].c != 0)
assert (lanes.lanesections[2].leftlanes[0].roadmark[0].marking_type == xodr.RoadMarkType.broken)
assert (lanes.lanesections[2].leftlanes[1].roadmark[0].marking_type == xodr.RoadMarkType.solid)
assert (lanes.lanesections[2].leftlanes[0].widths[0].a == 3)
assert (lanes.lanesections[2].leftlanes[0].widths[0].c == 0)
assert (lanes.lanesections[2].leftlanes[1].widths[0].a == 3)
assert (lanes.lanesections[2].leftlanes[1].widths[0].c == 0) |
class Scenario(ScenarioGenerator):
def __init__(self):
super().__init__()
def road(self, **kwargs):
roads = []
roads.append(xodr.create_road(xodr.Line(100), id=0, left_lanes=1, right_lanes=2))
roads.append(xodr.create_road(xodr.Line(100), id=1, left_lanes=0, right_lanes=1))
roads.append(xodr.create_road(xodr.Line(100), id=2, left_lanes=1, right_lanes=3))
roads.append(xodr.create_road(xodr.Spiral(0.001, 0.02, 30), id=3, left_lanes=1, right_lanes=2, road_type=1))
roads.append(xodr.create_road(xodr.Spiral((- 0.001), (- 0.02), 30), id=4, left_lanes=0, right_lanes=1, road_type=1))
roads[0].add_successor(xodr.ElementType.junction, 1)
roads[1].add_successor(xodr.ElementType.junction, 1)
roads[2].add_predecessor(xodr.ElementType.junction, 1)
roads[3].add_predecessor(xodr.ElementType.road, 0, xodr.ContactPoint.end)
roads[3].add_successor(xodr.ElementType.road, 2, xodr.ContactPoint.start)
roads[4].add_predecessor(xodr.ElementType.road, 1, xodr.ContactPoint.end)
roads[4].add_successor(xodr.ElementType.road, 2, xodr.ContactPoint.start, lane_offset=(- 2))
junction = xodr.create_junction(roads[3:], 1, roads[0:3])
odr = xodr.OpenDrive('myroad')
for r in roads:
odr.add_road(r)
odr.adjust_roads_and_lanes()
odr.add_junction(junction)
return odr |
def determine_bpi(data, frames, EMPTY=(b'\x00' * 10)):
o = 0
asbpi = 0
while (o < (len(data) - 10)):
part = data[o:(o + 10)]
if (part == EMPTY):
bpioff = (- ((len(data) - o) % 10))
break
(name, size, flags) = struct.unpack('>4sLH', part)
size = BitPaddedInt(size)
o += (10 + size)
try:
name = name.decode('ascii')
except UnicodeDecodeError:
continue
if (name in frames):
asbpi += 1
else:
bpioff = (o - len(data))
o = 0
asint = 0
while (o < (len(data) - 10)):
part = data[o:(o + 10)]
if (part == EMPTY):
intoff = (- ((len(data) - o) % 10))
break
(name, size, flags) = struct.unpack('>4sLH', part)
o += (10 + size)
try:
name = name.decode('ascii')
except UnicodeDecodeError:
continue
if (name in frames):
asint += 1
else:
intoff = (o - len(data))
if ((asint > asbpi) or ((asint == asbpi) and ((bpioff >= 1) and (intoff <= 1)))):
return int
return BitPaddedInt |
class _SingleResponse():
def __init__(self, cert: x509.Certificate, issuer: x509.Certificate, algorithm: hashes.HashAlgorithm, cert_status: OCSPCertStatus, this_update: datetime.datetime, next_update: (datetime.datetime | None), revocation_time: (datetime.datetime | None), revocation_reason: (x509.ReasonFlags | None)):
if ((not isinstance(cert, x509.Certificate)) or (not isinstance(issuer, x509.Certificate))):
raise TypeError('cert and issuer must be a Certificate')
_verify_algorithm(algorithm)
if (not isinstance(this_update, datetime.datetime)):
raise TypeError('this_update must be a datetime object')
if ((next_update is not None) and (not isinstance(next_update, datetime.datetime))):
raise TypeError('next_update must be a datetime object or None')
self._cert = cert
self._issuer = issuer
self._algorithm = algorithm
self._this_update = this_update
self._next_update = next_update
if (not isinstance(cert_status, OCSPCertStatus)):
raise TypeError('cert_status must be an item from the OCSPCertStatus enum')
if (cert_status is not OCSPCertStatus.REVOKED):
if (revocation_time is not None):
raise ValueError('revocation_time can only be provided if the certificate is revoked')
if (revocation_reason is not None):
raise ValueError('revocation_reason can only be provided if the certificate is revoked')
else:
if (not isinstance(revocation_time, datetime.datetime)):
raise TypeError('revocation_time must be a datetime object')
revocation_time = _convert_to_naive_utc_time(revocation_time)
if (revocation_time < _EARLIEST_UTC_TIME):
raise ValueError('The revocation_time must be on or after 1950 January 1.')
if ((revocation_reason is not None) and (not isinstance(revocation_reason, x509.ReasonFlags))):
raise TypeError('revocation_reason must be an item from the ReasonFlags enum or None')
self._cert_status = cert_status
self._revocation_time = revocation_time
self._revocation_reason = revocation_reason |
def blank_fill(img: np.ndarray, start_coordination: (int, int)) -> np.ndarray:
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 9))
last_temp_img = np.zeros_like(img)
last_temp_img[start_coordination] = 1
current_temp_img = cv2.dilate(last_temp_img, kernel)
while (current_temp_img != last_temp_img).any():
last_temp_img = current_temp_img
current_temp_img = cv2.dilate(current_temp_img, kernel)
current_temp_img = np.logical_and((current_temp_img == 1), (current_temp_img == (1 - img))).astype(float)
return current_temp_img |
class OmniglotClassDataset(ClassDataset):
folder = 'omniglot'
download_url_prefix = '
zips_md5 = {'images_background': '68d2efa1b9178cc56df9314c21c6e718', 'images_evaluation': '6b91aef0f799c5bb55b94e3f2daec811'}
filename = 'data.hdf5'
filename_labels = '{0}_labels.json'
def __init__(self, root, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, class_augmentations=None, download=False):
super(OmniglotClassDataset, self).__init__(meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, class_augmentations=class_augmentations)
self.root = os.path.join(os.path.expanduser(root), self.folder)
self.transform = transform
self.split_filename = os.path.join(self.root, self.filename)
self.split_filename_labels = os.path.join(self.root, self.filename_labels.format(self.meta_split))
self._data = None
self._labels = None
if download:
self.download()
if (not self._check_integrity()):
raise RuntimeError('Omniglot integrity check failed')
self._num_classes = len(self.labels)
print(('# classes loaded for %s:' % self.meta_split), self._num_classes)
def __getitem__(self, index):
character_name = '/'.join(self.labels[(index % self.num_classes)])
data = self.data[character_name]
transform = self.get_transform(index, self.transform)
target_transform = self.get_target_transform(index)
return OmniglotDataset(data, character_name, transform=transform, target_transform=target_transform)
def num_classes(self):
return self._num_classes
def data(self):
if (self._data is None):
self._data = h5py.File(self.split_filename, 'r')
return self._data
def labels(self):
if (self._labels is None):
with open(self.split_filename_labels, 'r') as f:
self._labels = json.load(f)
return self._labels
def _check_integrity(self):
return (os.path.isfile(self.split_filename) and os.path.isfile(self.split_filename_labels))
def close(self):
if (self._data is not None):
self._data.close()
self._data = None
def download(self):
import zipfile
import shutil
if self._check_integrity():
return
for name in self.zips_md5:
zip_filename = '{0}.zip'.format(name)
filename = os.path.join(self.root, zip_filename)
if os.path.isfile(filename):
continue
url = '{0}/{1}'.format(self.download_url_prefix, zip_filename)
download_url(url, self.root, zip_filename, self.zips_md5[name])
with zipfile.ZipFile(filename, 'r') as f:
f.extractall(self.root)
filename = os.path.join(self.root, self.filename)
with h5py.File(filename, 'w') as f:
group = f.create_group('omniglot')
for name in self.zips_md5:
alphabets = list_dir(os.path.join(self.root, name))
characters = [(name, alphabet, character) for alphabet in alphabets for character in list_dir(os.path.join(self.root, name, alphabet))]
for (_, alphabet, character) in characters:
filenames = glob.glob(os.path.join(self.root, name, alphabet, character, '*.png'))
dataset = group.create_dataset('{0}/{1}'.format(alphabet, character), (len(filenames), 105, 105), dtype='uint8')
for (i, char_filename) in enumerate(filenames):
image = Image.open(char_filename, mode='r').convert('L')
dataset[i] = ImageOps.invert(image)
shutil.rmtree(os.path.join(self.root, name)) |
.fast
def test_normalisation_mode(plot=True, close_plots=True, verbose=True, *args, **kwargs):
from radis.test.utils import getTestFile
_clean(plot, close_plots)
(w, I) = np.loadtxt(getTestFile('calc_N2C_spectrum_Trot1200_Tvib3000.txt')).T
s = calculated_spectrum(w, I, conditions={'Tvib': 3000, 'Trot': 1200}, Iunit='mW/cm2/sr/m')
FWHM = 2
s.apply_slit(FWHM, norm_by='area')
(w_area, I_area) = s.get('radiance')
if plot:
fig = plt.figure((fig_prefix + 'Spectrum in nm + slit in nm'))
fig.clear()
ax = fig.gca()
s.plot(nfig=fig.number, wunit='nm', label='norm_by: area', lw=3)
s.apply_slit(FWHM, norm_by='max')
(w_max, I_max) = s.get('radiance', wunit='nm')
if plot:
ax.plot(w_max, (I_max / FWHM), 'r', label='(norm_by:max)/FWHM')
ax.legend(loc='best')
assert np.allclose(I_area, (I_max / FWHM), equal_nan=True)
if verbose:
print("equivalence of normalisation mode for spectrum in 'nm': OK")
s = load_spec(getTestFile('CO_Tgas1500K_mole_fraction0.01.spec'), binary=True)
s.update()
s.apply_slit(FWHM, norm_by='area', plot_slit=plot)
(w_area, I_area) = s.get('radiance', wunit='nm')
if plot:
fig = plt.figure((fig_prefix + 'Spectrum in cm-1 + slit in nm'))
fig.clear()
ax = fig.gca()
s.plot(nfig=fig.number, wunit='nm', label='norm_by: area', lw=3)
s.apply_slit(FWHM, norm_by='max', plot_slit=plot)
(w_max, I_max) = s.get('radiance', wunit='nm')
if plot:
ax.plot(w_max, (I_max / FWHM), 'r', label='(norm_by:max)/FWHM')
ax.legend(loc='best')
assert np.allclose(I_area, (I_max / FWHM), equal_nan=True)
if verbose:
print("equivalence of normalisation mode for spectrum in 'cm-1': {0}: OK")
assert is_homogeneous(s.units['radiance'], 'mW/cm2/sr')
if verbose:
print("radiance unit ({0}) is homogeneous to 'mW/cm2/sr': OK".format(s.units['radiance']))
return True |
def get_database(data, subset, root_path, video_path_formatter):
video_ids = []
video_paths = []
annotations = []
for (key, value) in data['database'].items():
this_subset = value['subset']
if (this_subset == subset):
video_ids.append(key)
annotations.append(value['annotations'])
if ('video_path' in value):
video_paths.append(Path(value['video_path']))
else:
label = value['annotations']['label']
video_paths.append(video_path_formatter(root_path, label, key))
return (video_ids, video_paths, annotations) |
def _get_proposal_section_choices(conference, action='edit'):
if (action == 'create'):
return [(str(cps.id), cps.name) for cps in ProposalSection.objects.filter(conferences=conference)]
else:
return [(str(cps.id), cps.name) for cps in ProposalSection.objects.filter(conferences=conference)] |
class GuiAddImplantSetCommand(wx.Command):
def __init__(self, fitID, itemIDs):
wx.Command.__init__(self, True, 'Add Implant Set')
self.internalHistory = InternalCommandHistory()
self.fitID = fitID
self.itemIDs = itemIDs
def Do(self):
results = []
for itemID in self.itemIDs:
cmd = CalcAddImplantCommand(fitID=self.fitID, implantInfo=ImplantInfo(itemID=itemID))
results.append(self.internalHistory.submit(cmd))
eos.db.flush()
sFit = Fit.getInstance()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)))
return any(results)
def Undo(self):
success = self.internalHistory.undoAll()
eos.db.flush()
sFit = Fit.getInstance()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)))
return success |
class Reaction():
def __init__(self, template=None, rxnname=None, smiles=None, reference=None):
if (template is not None):
self.smirks = template
self.rxnname = rxnname
self.smiles = smiles
self.reference = reference
rxn = AllChem.ReactionFromSmarts(self.smirks)
rdChemReactions.ChemicalReaction.Initialize(rxn)
self.num_reactant = rxn.GetNumReactantTemplates()
if ((self.num_reactant == 0) or (self.num_reactant > 2)):
raise ValueError('This reaction is neither uni- nor bi-molecular.')
self.num_agent = rxn.GetNumAgentTemplates()
self.num_product = rxn.GetNumProductTemplates()
if (self.num_reactant == 1):
self.reactant_template = list((self.smirks.split('>')[0],))
else:
self.reactant_template = list((self.smirks.split('>')[0].split('.')[0], self.smirks.split('>')[0].split('.')[1]))
self.product_template = self.smirks.split('>')[2]
self.agent_template = self.smirks.split('>')[1]
del rxn
else:
self.smirks = None
def load(self, smirks, num_reactant, num_agent, num_product, reactant_template, product_template, agent_template, available_reactants, rxnname, smiles, reference):
self.smirks = smirks
self.num_reactant = num_reactant
self.num_agent = num_agent
self.num_product = num_product
self.reactant_template = list(reactant_template)
self.product_template = product_template
self.agent_template = agent_template
self.available_reactants = list(available_reactants)
self.rxnname = rxnname
self.smiles = smiles
self.reference = reference
def get_mol(self, smi):
if isinstance(smi, str):
return Chem.MolFromSmiles(smi)
elif isinstance(smi, Chem.Mol):
return smi
else:
raise TypeError('The input should be either a SMILES string or an RDKit.Chem.Mol object.')
def visualize(self, name='./reaction1_highlight.o.png'):
rxn = AllChem.ReactionFromSmarts(self.smirks)
d2d = Draw.MolDraw2DCairo(800, 300)
d2d.DrawReaction(rxn, highlightByReactant=True)
png = d2d.GetDrawingText()
open(name, 'wb+').write(png)
del rxn
return name
def is_reactant(self, smi):
rxn = self.get_rxnobj()
smi = self.get_mol(smi)
result = rxn.IsMoleculeReactant(smi)
del rxn
return result
def is_agent(self, smi):
rxn = self.get_rxnobj()
smi = self.get_mol(smi)
result = rxn.IsMoleculeAgent(smi)
del rxn
return result
def is_product(self, smi):
rxn = self.get_rxnobj()
smi = self.get_mol(smi)
result = rxn.IsMoleculeProduct(smi)
del rxn
return result
def is_reactant_first(self, smi):
smi = self.get_mol(smi)
if smi.HasSubstructMatch(Chem.MolFromSmarts(self.get_reactant_template(0))):
return True
else:
return False
def is_reactant_second(self, smi):
smi = self.get_mol(smi)
if smi.HasSubstructMatch(Chem.MolFromSmarts(self.get_reactant_template(1))):
return True
else:
return False
def get_smirks(self):
return self.smirks
def get_rxnobj(self):
rxn = AllChem.ReactionFromSmarts(self.smirks)
rdChemReactions.ChemicalReaction.Initialize(rxn)
return rxn
def get_reactant_template(self, ind=0):
return self.reactant_template[ind]
def get_product_template(self):
return self.product_template
def run_reaction(self, reactants, keep_main=True):
rxn = self.get_rxnobj()
if (self.num_reactant == 1):
if isinstance(reactants, (tuple, list)):
if (len(reactants) == 1):
r = self.get_mol(reactants[0])
elif ((len(reactants) == 2) and (reactants[1] is None)):
r = self.get_mol(reactants[0])
else:
return None
elif isinstance(reactants, (str, Chem.Mol)):
r = self.get_mol(reactants)
else:
raise TypeError('The input of a uni-molecular reaction should be a SMILES, an rdkit.Chem.Mol object, or a tuple/list of length 1 or 2.')
if (not self.is_reactant(r)):
return None
ps = rxn.RunReactants((r,))
elif (self.num_reactant == 2):
if (isinstance(reactants, (tuple, list)) and (len(reactants) == 2)):
r1 = self.get_mol(reactants[0])
r2 = self.get_mol(reactants[1])
else:
raise TypeError('The input of a bi-molecular reaction should be a tuple/list of length 2.')
if (self.is_reactant_first(r1) and self.is_reactant_second(r2)):
pass
elif (self.is_reactant_first(r2) and self.is_reactant_second(r1)):
(r1, r2) = (r2, r1)
else:
return None
ps = rxn.RunReactants((r1, r2))
else:
raise ValueError('This reaction is neither uni- nor bi-molecular.')
uniqps = []
for p in ps:
smi = Chem.MolToSmiles(p[0])
uniqps.append(smi)
uniqps = list(set(uniqps))
assert (len(uniqps) >= 1)
del rxn
if keep_main:
return uniqps[0]
else:
return uniqps
def _filter_reactants(self, smi_list):
if (self.num_reactant == 1):
smi_w_patt = []
for smi in tqdm(smi_list):
if self.is_reactant_first(smi):
smi_w_patt.append(smi)
return (smi_w_patt,)
elif (self.num_reactant == 2):
smi_w_patt1 = []
smi_w_patt2 = []
for smi in tqdm(smi_list):
if self.is_reactant_first(smi):
smi_w_patt1.append(smi)
if self.is_reactant_second(smi):
smi_w_patt2.append(smi)
return (smi_w_patt1, smi_w_patt2)
else:
raise ValueError('This reaction is neither uni- nor bi-molecular.')
def set_available_reactants(self, building_block_list):
self.available_reactants = list(self._filter_reactants(building_block_list))
return None |
class TestGenerateFunction(unittest.TestCase):
def setUp(self) -> None:
self.arg = RuntimeArg('arg', int_rprimitive)
self.reg = Register(int_rprimitive, 'arg')
self.block = BasicBlock(0)
def test_simple(self) -> None:
self.block.ops.append(Return(self.reg))
fn = FuncIR(FuncDecl('myfunc', None, 'mod', FuncSignature([self.arg], int_rprimitive)), [self.reg], [self.block])
value_names = generate_names_for_ir(fn.arg_regs, fn.blocks)
emitter = Emitter(EmitterContext(NameGenerator([['mod']])), value_names)
generate_native_function(fn, emitter, 'prog.py', 'prog')
result = emitter.fragments
assert_string_arrays_equal(['CPyTagged CPyDef_myfunc(CPyTagged cpy_r_arg) {\n', ' return cpy_r_arg;\n', '}\n'], result, msg='Generated code invalid')
def test_register(self) -> None:
reg = Register(int_rprimitive)
op = Assign(reg, Integer(5))
self.block.ops.append(op)
self.block.ops.append(Unreachable())
fn = FuncIR(FuncDecl('myfunc', None, 'mod', FuncSignature([self.arg], list_rprimitive)), [self.reg], [self.block])
value_names = generate_names_for_ir(fn.arg_regs, fn.blocks)
emitter = Emitter(EmitterContext(NameGenerator([['mod']])), value_names)
generate_native_function(fn, emitter, 'prog.py', 'prog')
result = emitter.fragments
assert_string_arrays_equal(['PyObject *CPyDef_myfunc(CPyTagged cpy_r_arg) {\n', ' CPyTagged cpy_r_r0;\n', ' cpy_r_r0 = 10;\n', ' CPy_Unreachable();\n', '}\n'], result, msg='Generated code invalid') |
def _complex_compact(variables):
compact_form = ''
sequence = None
for x in variables:
if (sequence is None):
sequence = SequenceOfSuccessiveVariables(x)
elif (not sequence.can_be_extended_with(x.id)):
compact_form += (str(sequence) if (compact_form == '') else (' ' + str(sequence)))
sequence = SequenceOfSuccessiveVariables(x)
compact_form += (str(sequence) if (compact_form == '') else (' ' + str(sequence)))
return compact_form |
def main(args):
at_step = args.step
output_dir_name = args.output_dir
layer_name = args.layer_name
block_type = args.block_type
postfix = args.postfix
probe_type = args.probe_type
normalized = args.normalized
smoothed = args.smoothed
lasso = (True if (args.lasso == 'yes') else False)
l1_lambda = float(args.l1_lambda)
if (block_type == 'resnets'):
torch_layer_type = torch.nn.Conv2d
elif (block_type == 'attentions'):
torch_layer_type = torch.nn.Linear
print('At denoising step', (at_step + 1))
probe_checkpoints_dir = f'probe_checkpoints/large_syn_dataset_continuous_fully_permuted/at_step_{at_step}/'
if (not os.path.exists(probe_checkpoints_dir)):
os.makedirs(probe_checkpoints_dir)
probe_accuracy_dir = f'probe_accuracy/large_syn_dataset_continuous_fully_permuted/at_step_{at_step}/'
if (not os.path.exists(probe_accuracy_dir)):
os.makedirs(probe_accuracy_dir)
train_split_prompts_seeds = pd.read_csv('train_split_prompts_seeds.csv', encoding='ISO-8859-1')
test_split_prompts_seeds = pd.read_csv('test_split_prompts_seeds.csv', encoding='ISO-8859-1')
combo_df = pd.concat([train_split_prompts_seeds, test_split_prompts_seeds])
dataset_path = 'datasets/images/'
files = os.listdir(dataset_path)
files = [file for file in files if file.endswith('.png')]
prompt_indexes = [int(file[(file.find('prompt_') + 7):file.find('_seed')]) for file in files]
sample_seeds = [int(file[(file.find('seed_') + 5):file.find('.png')]) for file in files]
vae_pretrained = 'CompVis/stable-diffusion-v1-4'
CLIPtokenizer_pretrained = 'openai/clip-vit-large-patch14'
CLIPtext_encoder_pretrained = 'openai/clip-vit-large-patch14'
denoise_unet_pretrained = 'CompVis/stable-diffusion-v1-4'
(vae, tokenizer, text_encoder, unet, scheduler) = _init_models(vae_pretrained=vae_pretrained, CLIPtokenizer_pretrained=CLIPtokenizer_pretrained, CLIPtext_encoder_pretrained=CLIPtext_encoder_pretrained, denoise_unet_pretrained=denoise_unet_pretrained)
torch.manual_seed(10000)
permuted_unet = copy.deepcopy(unet)
with torch.no_grad():
for (name, module) in permuted_unet.named_modules():
if hasattr(module, 'weight'):
t = module.weight.clone()
idx = torch.randperm(t.nelement())
t = t.view((- 1))[idx].view(t.size())
module.weight = nn.Parameter(t)
for block in ['down', 'mid', 'up']:
if (block == 'down'):
i_s = 0
i_e = 3
layer_range = 2
elif (block == 'up'):
i_s = 1
i_e = 4
layer_range = 3
elif (block == 'mid'):
i_s = 0
i_e = 1
if (block_type == 'resnets'):
layer_range = 2
else:
layer_range = 1
for block_ind in range(i_s, i_e):
data_path = 'datasets'
for (prompt_ind, seed_num) in zip(prompt_indexes, sample_seeds):
features = OrderedDict()
for (name, module) in permuted_unet.named_modules():
if isinstance(module, torch_layer_type):
features[name] = ModuleHook(module)
prompt = combo_df.loc[(combo_df['prompt_inds'] == prompt_ind)]['prompts'].item()
image = generate_image(prompt, seed_num, num_inference_steps=15, net=permuted_unet, tokenizer=tokenizer, text_encoder=text_encoder, scheduler=scheduler, vae=vae, stop_at_step=(at_step + 1))
for feature in features.values():
feature.close()
for layer_ind in range(layer_range):
dataset_path = 'internal_repres/'
dataset_path += f'{block}_{block_ind}_{output_dir_name}_{layer_ind}'
if (block == 'mid'):
chosen_layer_name = f'mid_block.{block_type}.{layer_ind}.{layer_name}'
else:
chosen_layer_name = f'{block}_blocks.{block_ind}.{block_type}.{layer_ind}.{layer_name}'
sel_output = features[chosen_layer_name].features[at_step]
sel_output = sel_output.unsqueeze(0).cpu().detach()
if (not os.path.exists(os.path.join(data_path, dataset_path))):
os.makedirs(os.path.join(data_path, dataset_path))
with open(os.path.join(data_path, dataset_path, f'{block}_{block_ind}_layer_{layer_ind}_{prompt_ind}_{seed_num}.pkl'), 'wb') as outfile:
pickle.dump(sel_output, outfile)
for layer_ind in range(layer_range):
dataset_path = 'internal_repres/'
dataset_path += f'{block}_{block_ind}_{output_dir_name}_{layer_ind}'
layer = f'{block}_{block_ind}_{output_dir_name}_{layer_ind}'
dataset = ProbeDEDataset('datasets/images/', f'datasets/internal_repres/{layer}/', 'datasets/depth_gt/', pre_load=True, target_transform=scale_and_norm, transform=min_max_norm_image, scale_factor=1)
input_dim = input_dims_dict[f'{block}_{block_ind}']
scale = scale_dict[f'{block}_{block_ind}']
weights_postfix = ''
if (probe_type.lower() == 'linear'):
probe = probeLinearDense(input_dim, 1, scale, use_bias=True).to(torch_device)
weights_postfix = ''
elif (probe_type.lower() == 'linear-no-bias'):
probe = probeLinearDense(input_dim, 1, scale, use_bias=False).to(torch_device)
weights_postfix = '_linear_no_bias'
elif (probe_type.lower() == 'nonlinear'):
probe = probeTwoNonLinearDense(input_dim, 1, scale, use_bias=True, mid_channels=(input_dim // 2)).to(torch_device)
weights_postfix = '_nonlinear'
elif (probe_type.lower() == 'nonlinear-no-bias'):
probe = probeTwoNonLinearDense(input_dim, 1, scale, use_bias=False, mid_channels=(input_dim // 2)).to(torch_device)
weights_postfix = '_nonlinear_no_bias'
generator = torch.manual_seed(100)
with open('train_indices.pkl', 'rb') as infile:
train_indices = pickle.load(infile)
with open('test_indices.pkl', 'rb') as infile:
test_indices = pickle.load(infile)
training_data = torch.utils.data.Subset(dataset, train_indices)
test_data = torch.utils.data.Subset(dataset, test_indices)
train_dataloader = DataLoader(training_data, batch_size=4, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=32, shuffle=False)
optimizer = torch.optim.Adam(probe.parameters(), lr=0.001)
max_epoch = 30
loss_func = nn.HuberLoss()
if (smoothed.lower() == 'yes'):
smooth_loss_func = InverseDepthSmoothnessLoss()
weights_postfix += ''
elif (smoothed.lower() == 'no'):
smooth_loss_func = None
weights_postfix += '_unsmoothed'
min_loss = 1000000.0
for epoch in range(1, (max_epoch + 1)):
verbosity = False
if (epoch == max_epoch):
verbosity = True
print(f'''
{block} Block {block_ind} Layer {layer_ind} {layer_name}''')
train_results = train_continuous_depth(probe, torch_device, train_dataloader, optimizer, epoch, loss_func=loss_func, verbose_interval=None, head=None, verbosity=False, smooth_loss=smooth_loss_func, alpha=1)
test_results = test_continuous_depth(probe, torch_device, test_dataloader, loss_func=loss_func, return_raw_outputs=verbosity, head=None, scheduler=None, verbosity=verbosity, smooth_loss=smooth_loss_func, alpha=1)
if (test_results[0] < min_loss):
min_loss = test_results[0]
torch.save(probe.state_dict(), f'probe_checkpoints/large_syn_dataset_continuous_fully_permuted/at_step_{at_step}/regression_probe_{layer}{weights_postfix}.pth')
with open(f'probe_accuracy/large_syn_dataset_continuous_fully_permuted/at_step_{at_step}/saved_test_results_{block}_{block_ind}_layer_{layer_ind}_{postfix}{weights_postfix}.pkl', 'wb') as outfile:
pickle.dump(test_results[2], outfile)
torch.save(probe.state_dict(), f'probe_checkpoints/large_syn_dataset_continuous_fully_permuted/at_step_{at_step}/regression_probe_{layer}_final{weights_postfix}.pth')
plt_test_results_continuous_depth(probe, test_dataloader, test_data, loss_func, smooth_loss=smooth_loss_func, head=None, save_plt=True, save_filename=f'probe_accuracy/large_syn_dataset_continuous_fully_permuted/at_step_{at_step}/saved_test_results_{block}_{block_ind}_layer_{layer_ind}_{postfix}{weights_postfix}.png')
dataset_path = os.path.join(data_path, dataset_path)
clear_dir(dataset_path, file_extention='.pkl') |
class _FindFlags():
case_sensitive: bool = False
backward: bool = False
def to_qt(self):
flags: _FindFlagType = QWebEnginePage.FindFlag(0)
if self.case_sensitive:
flags |= QWebEnginePage.FindFlag.FindCaseSensitively
if self.backward:
flags |= QWebEnginePage.FindFlag.FindBackward
return flags
def __bool__(self):
return any(dataclasses.astuple(self))
def __str__(self):
names = {'case_sensitive': 'FindCaseSensitively', 'backward': 'FindBackward'}
d = dataclasses.asdict(self)
truthy = [names[key] for (key, value) in d.items() if value]
if (not truthy):
return '<no find flags>'
return '|'.join(truthy) |
def perturb_logistic(net, x_nat, target):
net.eval()
x = (x_nat.detach() + (0.001 * torch.randn(x_nat.shape).cuda().detach()))
for _ in range(args.num_steps):
x.requires_grad_()
with torch.enable_grad():
loss = torch.mean((1 + torch.exp((((- 1.0) * target.float()) * net(x).squeeze(1)))))
grad = torch.autograd.grad(loss, [x])[0]
x = (x.detach() + (args.step_size * torch.sign(grad.detach())))
x = torch.min(torch.max(x, (x_nat - args.epsilon)), (x_nat + args.epsilon))
x = torch.clamp(x, 0.0, 1.0)
net.train()
return x |
class TCovers(PluginTestCase):
def setUp(self) -> None:
self.song = A_SONG
self.blank_song = AudioFile()
def test_cover_path_lastfm(self):
plugin_cls = self.plugins['lastfm-cover'].cls
assert isinstance(plugin_cls(self.song).cover_path, fsnative)
assert isinstance(plugin_cls(self.blank_song).cover_path, fsnative)
def test_cover_path_musicbrainz(self):
plugin_cls = self.plugins['musicbrainz-cover'].cls
assert isinstance(plugin_cls(self.song).cover_path, fsnative)
assert isinstance(plugin_cls(self.blank_song).cover_path, fsnative)
def test_cover_path_discogs(self):
plugin_cls = self.plugins['discogs-cover'].cls
assert isinstance(plugin_cls(self.song).cover_path, fsnative)
assert isinstance(plugin_cls(self.blank_song).cover_path, fsnative) |
class IDirectSound3DBuffer(com.pIUnknown):
_methods_ = [('GetAllParameters', com.STDMETHOD(LPDS3DBUFFER)), ('GetConeAngles', com.STDMETHOD(LPDWORD, LPDWORD)), ('GetConeOrientation', com.STDMETHOD(PD3DVECTOR)), ('GetConeOutsideVolume', com.STDMETHOD(LPLONG)), ('GetMaxDistance', com.STDMETHOD(PD3DVALUE)), ('GetMinDistance', com.STDMETHOD(PD3DVALUE)), ('GetMode', com.STDMETHOD(LPDWORD)), ('GetPosition', com.STDMETHOD(PD3DVECTOR)), ('GetVelocity', com.STDMETHOD(PD3DVECTOR)), ('SetAllParameters', com.STDMETHOD(LPDS3DBUFFER, DWORD)), ('SetConeAngles', com.STDMETHOD(DWORD, DWORD, DWORD)), ('SetConeOrientation', com.STDMETHOD(D3DVALUE, D3DVALUE, D3DVALUE, DWORD)), ('SetConeOutsideVolume', com.STDMETHOD(LONG, DWORD)), ('SetMaxDistance', com.STDMETHOD(D3DVALUE, DWORD)), ('SetMinDistance', com.STDMETHOD(D3DVALUE, DWORD)), ('SetMode', com.STDMETHOD(DWORD, DWORD)), ('SetPosition', com.STDMETHOD(D3DVALUE, D3DVALUE, D3DVALUE, DWORD)), ('SetVelocity', com.STDMETHOD(D3DVALUE, D3DVALUE, D3DVALUE, DWORD))] |
_if_nothing_inferred
def const_infer_binary_op(self: nodes.Const, opnode: (nodes.AugAssign | nodes.BinOp), operator: str, other: InferenceResult, context: InferenceContext, _: SuccessfulInferenceResult) -> Generator[((ConstFactoryResult | util.UninferableBase), None, None)]:
not_implemented = nodes.Const(NotImplemented)
if isinstance(other, nodes.Const):
if ((operator == '**') and isinstance(self.value, (int, float)) and isinstance(other.value, (int, float)) and ((self.value > 100000.0) or (other.value > 100000.0))):
(yield not_implemented)
return
try:
impl = BIN_OP_IMPL[operator]
try:
(yield nodes.const_factory(impl(self.value, other.value)))
except TypeError:
(yield not_implemented)
except Exception:
(yield util.Uninferable)
except TypeError:
(yield not_implemented)
elif (isinstance(self.value, str) and (operator == '%')):
(yield util.Uninferable)
else:
(yield not_implemented) |
def gurobi_solve_problem(problem: Problem, initvals: Optional[np.ndarray]=None, verbose: bool=False, **kwargs) -> Solution:
if (initvals is not None):
warnings.warn('warm-start values are ignored by this wrapper')
model = gurobipy.Model()
if (not verbose):
model.setParam(GRB.Param.OutputFlag, 0)
for (param, value) in kwargs.items():
model.setParam(param, value)
(P, q, G, h, A, b, lb, ub) = problem.unpack()
num_vars = P.shape[0]
identity = spa.eye(num_vars)
x = model.addMVar(num_vars, lb=(- GRB.INFINITY), ub=GRB.INFINITY, vtype=GRB.CONTINUOUS)
(ineq_constr, eq_constr, lb_constr, ub_constr) = (None, None, None, None)
if (G is not None):
ineq_constr = model.addMConstr(G, x, GRB.LESS_EQUAL, h)
if (A is not None):
eq_constr = model.addMConstr(A, x, GRB.EQUAL, b)
if (lb is not None):
lb_constr = model.addMConstr(identity, x, GRB.GREATER_EQUAL, lb)
if (ub is not None):
ub_constr = model.addMConstr(identity, x, GRB.LESS_EQUAL, ub)
objective = ((0.5 * ((x P) x)) + (q x))
model.setObjective(objective, sense=GRB.MINIMIZE)
model.optimize()
solution = Solution(problem)
solution.extras['status'] = model.status
solution.found = (model.status in (GRB.OPTIMAL, GRB.SUBOPTIMAL))
if solution.found:
solution.x = x.X
__retrieve_dual(solution, ineq_constr, eq_constr, lb_constr, ub_constr)
return solution |
def parse_args():
parser = argparse.ArgumentParser(description='Finetune a transformers model on a text classification task')
parser.add_argument('--dataset_name', type=str, default=None, help='The name of the dataset to use (via the datasets library).')
parser.add_argument('--predict_with_generate', type=bool, default=True, help='')
parser.add_argument('--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).')
parser.add_argument('--train_file', type=str, default=None, help='A csv or a json file containing the training data.')
parser.add_argument('--num_beams', type=int, default=None, help='Number of beams to use for evaluation. This argument will be passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.')
parser.add_argument('--max_source_length', type=int, default=1024, help='The maximum total input sequence length after tokenization.Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--max_target_length', type=int, default=128, help='The maximum total sequence length for target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.during ``evaluate`` and ``predict``.')
parser.add_argument('--val_max_target_length', type=int, default=None, help='The maximum total sequence length for validation target text after tokenization.Sequences longer than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` param of ``model.generate``, which is used during ``evaluate`` and ``predict``.')
parser.add_argument('--pad_to_max_length', type=bool, default=False, help='Whether to pad all samples to model maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. Moreefficient on GPU but very bad for TPU.')
parser.add_argument('--validation_file', type=str, default=None, help='A csv or a json file containing the validation data.')
parser.add_argument('--ignore_pad_token_for_loss', type=bool, default=True, help='Whether to ignore the tokens corresponding to padded labels in the loss computation or not.')
parser.add_argument('--source_lang', type=str, default=None, help='Source language id for translation.')
parser.add_argument('--target_lang', type=str, default=None, help='Target language id for translation.')
parser.add_argument('--source_prefix', type=str, default=None, help='A prefix to add before every source text (useful for T5 models).')
parser.add_argument('--preprocessing_num_workers', type=int, default=None, help='The number of processes to use for the preprocessing.')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument('--max_length', type=int, default=128, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded if `--pad_to_max_lengh` is passed.')
parser.add_argument('--model_name_or_path', type=str, help='Path to pretrained model or model identifier from huggingface.co/models.', required=False)
parser.add_argument('--config_name', type=str, default=None, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', type=str, default=None, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--use_slow_tokenizer', action='store_true', help='If passed, will use a slow tokenizer (not backed by the Tokenizers library).')
parser.add_argument('--per_device_train_batch_size', type=int, default=8, help='Batch size (per device) for the training dataloader.')
parser.add_argument('--per_device_eval_batch_size', type=int, default=8, help='Batch size (per device) for the evaluation dataloader.')
parser.add_argument('--learning_rate', type=float, default=5e-05, help='Initial learning rate (after the potential warmup period) to use.')
parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay to use.')
parser.add_argument('--num_train_epochs', type=int, default=3, help='Total number of training epochs to perform.')
parser.add_argument('--max_train_steps', type=int, default=None, help='Total number of training steps to perform. If provided, overrides num_train_epochs.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--lr_scheduler_type', type=SchedulerType, default='linear', help='The scheduler type to use.', choices=['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup'])
parser.add_argument('--num_warmup_steps', type=int, default=0, help='Number of steps for the warmup in the lr scheduler.')
parser.add_argument('--output_dir', type=str, default=None, help='Where to store the final model.')
parser.add_argument('--seed', type=int, default=None, help='A seed for reproducible training.')
parser.add_argument('--model_type', type=str, default=None, help='Model type to use if training from scratch.', choices=MODEL_TYPES)
parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the model to the Hub.')
parser.add_argument('--hub_model_id', type=str, help='The name of the repository to keep in sync with the local `output_dir`.')
parser.add_argument('--hub_token', type=str, help='The token to use to push to the Model Hub.')
parser.add_argument('--checkpointing_steps', type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.")
parser.add_argument('--resume_from_checkpoint', type=str, default=None, help='If the training should continue from a checkpoint folder.')
parser.add_argument('--with_tracking', action='store_true', help='Whether to enable experiment trackers for logging.')
parser.add_argument('--report_to', type=str, default='all', help='The integration to report the results and logs to. Supported platforms are `"tensorboard"`, `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.Only applicable when `--with_tracking` is passed.')
args = parser.parse_args()
if ((args.dataset_name is None) and (args.train_file is None) and (args.validation_file is None)):
raise ValueError('Need either a task name or a training/validation file.')
if (args.train_file is not None):
extension = args.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`train_file` should be a csv or a json file.'
if (args.validation_file is not None):
extension = args.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`validation_file` should be a csv or a json file.'
if args.push_to_hub:
assert (args.output_dir is not None), 'Need an `output_dir` to create a repo when `--push_to_hub` is passed.'
return args |
def test_inferaugassign_picking_parent_instead_of_stmt() -> None:
code = "\n from collections import namedtuple\n SomeClass = namedtuple('SomeClass', ['name'])\n items = [SomeClass(name='some name')]\n\n some_str = ''\n some_str += ', '.join(__(item) for item in items)\n "
node = extract_node(code)
inferred = next(node.infer())
assert isinstance(inferred, Instance)
assert (inferred.name == 'SomeClass') |
class Effect4640(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
damageTypes = ('Em', 'Explosive', 'Kinetic', 'Thermal')
for damageType in damageTypes:
fit.ship.boostItemAttr('armor{0}DamageResonance'.format(damageType), ship.getModifiedItemAttr('shipBonusAC2'), skill='Amarr Cruiser', **kwargs) |
class ScalarBias(torch.autograd.Function):
def forward(ctx, input, dim, bias_init):
size = list(input.size())
size[dim] += 1
output = input.new(*size).fill_(bias_init)
output.narrow(dim, 1, (size[dim] - 1)).copy_(input)
ctx.dim = dim
return output
def backward(ctx, grad):
return (grad.narrow(ctx.dim, 1, (grad.size(ctx.dim) - 1)), None, None) |
class Freezer(object):
def param_to_buffer(module, name):
split_name = name.split('.')
module_name_hierarchy = split_name[:(- 1)]
param_name = split_name[(- 1)]
tgt_module = module
for module_name in module_name_hierarchy:
tgt_module = getattr(tgt_module, module_name)
param_data = getattr(tgt_module, param_name).data
delattr(tgt_module, param_name)
tgt_module.register_buffer(name=param_name, tensor=param_data)
def freeze_by_keywords(module, keywords=None, exclusive_keywords=None):
if (not keywords):
return
param_list = Freezer.get_module_param_names(module, exclusive_keywords)
if (keywords == '*'):
for name in param_list:
Freezer.param_to_buffer(module, name)
return
keywords = keywords.replace(' ', '').split(',')
for name in param_list:
if any(((keyword in name) for keyword in keywords)):
Freezer.param_to_buffer(module, name)
def get_module_param_names(module, exclusive_keywords=None):
if (not exclusive_keywords):
return [name for (name, _) in module.named_parameters()]
exclusive_keywords = exclusive_keywords.replace(' ', '').split(',')
return [name for (name, _) in module.named_parameters() if all(((key not in name) for key in exclusive_keywords))] |
class AbstractMonitor(metaclass=ABCMeta):
def real_time_update(self, timestamp: datetime):
raise NotImplementedError('Should implement real_time_update()')
def end_of_day_update(self, timestamp: datetime):
raise NotImplementedError('Should implement end_of_day_update()')
def end_of_trading_update(self, timestamp: datetime=None):
raise NotImplementedError('Should implement end_of_trading_update()')
def record_transaction(self, transaction: Transaction):
raise NotImplementedError('Should implement record_transaction()') |
class Bottleneck(nn.Module):
def __init__(self, in_channels, out_channels, expansion=4, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN')):
norm_cfg = copy.deepcopy(norm_cfg)
super().__init__()
assert (style in ['pytorch', 'caffe'])
self.in_channels = in_channels
self.out_channels = out_channels
self.expansion = expansion
assert ((out_channels % expansion) == 0)
self.mid_channels = (out_channels // expansion)
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
if (self.style == 'pytorch'):
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
(self.norm1_name, norm1) = build_norm_layer(norm_cfg, self.mid_channels, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(norm_cfg, self.mid_channels, postfix=2)
(self.norm3_name, norm3) = build_norm_layer(norm_cfg, out_channels, postfix=3)
self.conv1 = build_conv_layer(conv_cfg, in_channels, self.mid_channels, kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(conv_cfg, self.mid_channels, self.mid_channels, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(conv_cfg, self.mid_channels, out_channels, kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def norm1(self):
return getattr(self, self.norm1_name)
def norm2(self):
return getattr(self, self.norm2_name)
def norm3(self):
return getattr(self, self.norm3_name)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out |
('baseplate.lib.thrift_pool.RetryPolicy.new')
class MaxRetriesRenameTests(unittest.TestCase):
def test_default_is_3(self, new_retry_policy):
thrift_pool.ThriftConnectionPool(EXAMPLE_ENDPOINT)
new_retry_policy.assert_called_with(attempts=3)
def test_default_through_parser(self, new_retry_policy):
config = {'example.endpoint': '127.0.0.1:1234'}
thrift_pool.thrift_pool_from_config(config, prefix='example.')
new_retry_policy.assert_called_with(attempts=3)
def test_max_retries_config_error(self, new_retry_policy):
config = {'example.endpoint': '127.0.0.1:1234', 'example.max_retries': '5'}
with self.assertRaises(Exception):
thrift_pool.thrift_pool_from_config(config, prefix='example.')
def test_max_connection_attempts_works(self, new_retry_policy):
thrift_pool.ThriftConnectionPool(EXAMPLE_ENDPOINT, max_connection_attempts=5)
new_retry_policy.assert_called_with(attempts=5)
def test_max_connection_attempts_works_through_config(self, new_retry_policy):
config = {'example.endpoint': '127.0.0.1:1234', 'example.max_connection_attempts': '5'}
thrift_pool.thrift_pool_from_config(config, prefix='example.')
new_retry_policy.assert_called_with(attempts=5) |
def build(cfg, registry, default_args=None):
if (cfg is None):
return None
elif isinstance(cfg, (list, tuple)):
modules = [build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg]
return modules
else:
return build_from_cfg(cfg, registry, default_args) |
class GetFromCacheTests(unittest.TestCase):
def test_bogus_url(self):
url = '
with self.assertRaisesRegex(ValueError, 'Connection error'):
_ = get_from_cache(url)
def test_file_not_found(self):
url = hf_bucket_url(MODEL_ID, filename='missing.bin')
with self.assertRaisesRegex(EntryNotFoundError, '404 Client Error'):
_ = get_from_cache(url)
def test_model_not_found(self):
url = hf_bucket_url('bert-base', filename='pytorch_model.bin')
with self.assertRaisesRegex(RepositoryNotFoundError, '404 Client Error'):
_ = get_from_cache(url)
def test_revision_not_found(self):
url = hf_bucket_url(MODEL_ID, filename=CONFIG_NAME, revision=REVISION_ID_INVALID)
with self.assertRaisesRegex(RevisionNotFoundError, '404 Client Error'):
_ = get_from_cache(url)
def test_standard_object(self):
url = hf_bucket_url(MODEL_ID, filename=CONFIG_NAME, revision=REVISION_ID_DEFAULT)
filepath = get_from_cache(url, force_download=True)
metadata = filename_to_url(filepath)
self.assertEqual(metadata, (url, f'"{PINNED_SHA1}"'))
def test_standard_object_rev(self):
url = hf_bucket_url(MODEL_ID, filename=CONFIG_NAME, revision=REVISION_ID_ONE_SPECIFIC_COMMIT)
filepath = get_from_cache(url, force_download=True)
metadata = filename_to_url(filepath)
self.assertNotEqual(metadata[1], f'"{PINNED_SHA1}"')
def test_lfs_object(self):
url = hf_bucket_url(MODEL_ID, filename=WEIGHTS_NAME, revision=REVISION_ID_DEFAULT)
filepath = get_from_cache(url, force_download=True)
metadata = filename_to_url(filepath)
self.assertEqual(metadata, (url, f'"{PINNED_SHA256}"'))
def test_has_file(self):
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only', WEIGHTS_NAME))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only', TF2_WEIGHTS_NAME))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only', FLAX_WEIGHTS_NAME))
def test_get_file_from_repo_distant(self):
self.assertIsNone(get_file_from_repo('bert-base-cased', 'ahah.txt'))
with self.assertRaisesRegex(EnvironmentError, 'is not a valid model identifier'):
get_file_from_repo('bert-base-case', 'config.json')
with self.assertRaisesRegex(EnvironmentError, 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased', 'config.json', revision='ahaha')
resolved_file = get_file_from_repo('bert-base-cased', 'config.json')
config = json.loads(open(resolved_file, 'r').read())
self.assertEqual(config['hidden_size'], 768)
def test_get_file_from_repo_local(self):
with tempfile.TemporaryDirectory() as tmp_dir:
filename = (Path(tmp_dir) / 'a.txt')
filename.touch()
self.assertEqual(get_file_from_repo(tmp_dir, 'a.txt'), str(filename))
self.assertIsNone(get_file_from_repo(tmp_dir, 'b.txt')) |
class ChildFilterLALR(ChildFilter):
def __call__(self, children):
filtered = []
for (i, to_expand, add_none) in self.to_include:
if add_none:
filtered += ([None] * add_none)
if to_expand:
if filtered:
filtered += children[i].children
else:
filtered = children[i].children
else:
filtered.append(children[i])
if self.append_none:
filtered += ([None] * self.append_none)
return self.node_builder(filtered) |
class ROIBoxHead(torch.nn.Module):
def __init__(self, in_channels):
super().__init__()
self.feature_extractor = make_roi_box_feature_extractor()
self.predictor = make_roi_box_predictor(self.feature_extractor.out_channels)
self.post_processor = make_roi_box_post_processor()
self.loss_evaluator = make_roi_box_loss_evaluator()
def forward(self, features, proposals, targets=None):
if self.training:
with torch.no_grad():
proposals = self.loss_evaluator.subsample(proposals, targets)
x = self.feature_extractor(features, proposals)
(class_logits, box_regression) = self.predictor(x)
if (not self.training):
result = self.post_processor((class_logits, box_regression), proposals)
return (x, result, {})
(loss_classifier, loss_box_reg) = self.loss_evaluator([class_logits], [box_regression])
return (x, proposals, dict(loss_classifier=loss_classifier, loss_box_reg=loss_box_reg)) |
class CassandraConcurrentTests(unittest.TestCase):
def setUp(self):
self.baseplate_observer = TestBaseplateObserver()
baseplate = Baseplate({'cassandra.contact_points': cassandra_endpoint.address.host})
baseplate.register(self.baseplate_observer)
baseplate.configure_context({'cassandra': CassandraClient(keyspace='system')})
self.context = baseplate.make_context_object()
self.server_span = baseplate.make_server_span(self.context, 'test')
def test_execute_concurrent_with_args(self):
with self.server_span:
statement = self.context.cassandra.prepare('SELECT * FROM system.local WHERE "key"=?')
params = [(_key,) for _key in ['local', 'other']]
results = execute_concurrent_with_args(self.context.cassandra, statement, params)
server_span_observer = self.baseplate_observer.get_only_child()
self.assertEqual(len(server_span_observer.children), 3)
for span_observer in server_span_observer.children:
self.assertTrue(span_observer.on_start_called)
self.assertTrue(span_observer.on_finish_called)
self.assertIsNone(span_observer.on_finish_exc_info)
span_observer.assert_tag('statement', 'SELECT * FROM system.local WHERE "key"=?')
self.assertEqual(len(results), 2)
self.assertTrue(results[0].success)
self.assertTrue(results[1].success) |
class TestSuper_td():
N = 3
t1 = qutip.QobjEvo([(qutip.qeye(N) * (1 + 0.1j)), [(qutip.create(N) * (1 - 0.1j)), f]])
t2 = qutip.QobjEvo([(qutip.destroy(N) * (1 - 0.2j))])
t3 = qutip.QobjEvo([[(qutip.num(N) * (1 + 0.2j)), f]])
q1 = (qutip.qeye(N) * (1 + 0.3j))
q2 = (qutip.destroy(N) * (1 - 0.3j))
q3 = (qutip.num(N) * (1 + 0.4j))
def test_spre_td(self):
assert (qutip.spre(self.t1)(0.5) == qutip.spre(self.t1(0.5)))
def test_spost_td(self):
assert (qutip.spost(self.t1)(0.5) == qutip.spost(self.t1(0.5)))
def test_sprepost_td(self):
assert (qutip.sprepost(self.t1, self.q2)(0.5) == qutip.sprepost(self.t1(0.5), self.q2))
assert (qutip.sprepost(self.q2, self.t1)(0.5) == qutip.sprepost(self.q2, self.t1(0.5)))
assert (qutip.sprepost(self.t1, self.t2)(0.5) == qutip.sprepost(self.t1(0.5), self.t2(0.5)))
def test_operator_vector_td(self):
assert (qutip.operator_to_vector(self.t1)(0.5) == qutip.operator_to_vector(self.t1(0.5)))
vec = qutip.operator_to_vector(self.t1)
assert (qutip.vector_to_operator(vec)(0.5) == qutip.vector_to_operator(vec(0.5)))
def test_liouvillian_td(self):
assert (qutip.liouvillian(self.t1)(0.5) == qutip.liouvillian(self.t1(0.5)))
assert (qutip.liouvillian(None, [self.t2])(0.5) == qutip.liouvillian(None, [self.t2(0.5)]))
assert (qutip.liouvillian(self.t1, [self.t2, self.q1, self.t3], chi=[1, 2, 3])(0.5) == qutip.liouvillian(self.t1(0.5), [self.t2(0.5), self.q1, self.t3(0.5)], chi=[1, 2, 3]))
def test_lindblad_dissipator_td(self):
assert (qutip.lindblad_dissipator(self.t2)(0.5) == qutip.lindblad_dissipator(self.t2(0.5)))
assert (qutip.lindblad_dissipator(self.t2, self.q1)(0.5) == qutip.lindblad_dissipator(self.t2(0.5), self.q1))
assert (qutip.lindblad_dissipator(self.q1, self.t2)(0.5) == qutip.lindblad_dissipator(self.q1, self.t2(0.5))) |
class Index(CtrlNode):
nodeName = 'Index'
uiTemplate = [('axis', 'intSpin', {'value': 0, 'min': 0, 'max': 1000000}), ('index', 'intSpin', {'value': 0, 'min': 0, 'max': 1000000})]
def processData(self, data):
s = self.stateGroup.state()
ax = s['axis']
ind = s['index']
if (ax == 0):
return data[ind]
else:
return data.take(ind, axis=ax) |
class OdeSolverBase():
def __init__(self, allow_free_variables: bool=False, duplicate_starting_point: bool=False):
self.allow_free_variables = allow_free_variables
self.duplicate_starting_point = duplicate_starting_point
def integrator(self):
raise RuntimeError('This method should be implemented in the child class')
def is_direct_collocation(self) -> bool:
raise RuntimeError('This method should be implemented in the child class')
def is_direct_shooting(self) -> bool:
raise RuntimeError('This method should be implemented in the child class')
def n_required_cx(self) -> int:
raise RuntimeError('This method should be implemented in the child class')
def defects_type(self) -> DefectType:
raise RuntimeError('This method should be implemented in the child class')
def t_ode(self, nlp) -> list:
return vertcat(nlp.time_cx, nlp.dt)
def x_ode(self, nlp) -> MX:
raise RuntimeError('This method should be implemented in the child class')
def p_ode(self, nlp) -> MX:
raise RuntimeError('This method should be implemented in the child class')
def a_ode(self, nlp) -> MX:
raise RuntimeError('This method should be implemented in the child class')
def param_ode(self, nlp) -> MX:
return nlp.parameters.cx
def initialize_integrator(self, ocp, nlp, dynamics_index: int, node_index: int, allow_free_variables: bool=False, **extra_opt) -> Callable:
nlp.states.node_index = node_index
nlp.states_dot.node_index = node_index
nlp.controls.node_index = node_index
nlp.algebraic_states.node_index = node_index
ode_opt = {'model': nlp.model, 'cx': nlp.cx, 'control_type': nlp.control_type, 'defects_type': self.defects_type, 'allow_free_variables': allow_free_variables, 'param_scaling': vertcat(*[nlp.parameters[key].scaling.scaling for key in nlp.parameters.keys()]), 'ode_index': (node_index if (nlp.dynamics_func[dynamics_index].size2_out('xdot') > 1) else 0), 'duplicate_starting_point': self.duplicate_starting_point, **extra_opt}
ode = {'t': self.t_ode(nlp), 'x': self.x_ode(nlp), 'p': self.p_ode(nlp), 'a': self.a_ode(nlp), 'param': self.param_ode(nlp), 'ode': nlp.dynamics_func[dynamics_index], 'implicit_ode': (nlp.implicit_dynamics_func[dynamics_index] if (len(nlp.implicit_dynamics_func) > 0) else nlp.implicit_dynamics_func)}
return nlp.ode_solver.integrator(ode, ode_opt)
def prepare_dynamic_integrator(self, ocp, nlp):
dynamics = [nlp.ode_solver.initialize_integrator(ocp, nlp, dynamics_index=0, node_index=0, allow_free_variables=self.allow_free_variables)]
if (nlp.phase_dynamics == PhaseDynamics.SHARED_DURING_THE_PHASE):
dynamics = (dynamics * nlp.ns)
else:
for node_index in range(1, nlp.ns):
dynamics.append(nlp.ode_solver.initialize_integrator(ocp, nlp, dynamics_index=0, node_index=node_index, allow_free_variables=self.allow_free_variables))
nlp.dynamics = dynamics
extra_dynamics = []
for i in range(1, len(nlp.dynamics_func)):
extra_dynamics += [nlp.ode_solver.initialize_integrator(ocp, nlp, dynamics_index=i, node_index=0, allow_free_variables=True)]
if (nlp.phase_dynamics == PhaseDynamics.SHARED_DURING_THE_PHASE):
extra_dynamics = (extra_dynamics * nlp.ns)
else:
for node_index in range(1, nlp.ns):
extra_dynamics += [nlp.ode_solver.initialize_integrator(ocp, nlp, dynamics_index=i, node_index=0, allow_free_variables=True)]
nlp.extra_dynamics.append(extra_dynamics) |
class CodeWriter():
def __init__(self, project, templates, language='core'):
super().__init__()
self.project = project
self.templates = templates
self.language = language
self.comments = []
self.last_id = 0
self.current_sprite = ''
self.jinja_environment = Environment(trim_blocks=True, lstrip_blocks=True)
self.jinja_environment.filters['global_sound'] = (lambda name: self.global_sound(name))
self.jinja_environment.filters['global_costume'] = (lambda name: self.global_costume(name))
self.jinja_environment.filters['global_backdrop'] = (lambda name: self.global_backdrop(name))
self.jinja_environment.filters['global_sprite'] = (lambda name: self.get_sprite_var(unquoted(name)))
logger.debug('CodeWriter created.')
def set_sprite(self, name):
self.current_sprite = name
def get_sprite_var(self, name=None):
def to_python(name: str):
vname = name.lower().strip()
vname = to_underscore.sub('_', vname)
vname = multiple_underscores.sub('_', vname)
if (not valid_variable_start.fullmatch(vname[0])):
vname = ('_' + vname)
return vname
if (name is None):
return to_python(self.current_sprite)
else:
return to_python(name)
def get_sprite_or_stage(self, name=None):
if (not name):
name = self.current_sprite
if (self.project['stage']['name'] == name):
return self.project['stage']
for sprite in self.project['sprites']:
if (sprite['name'] == name):
return sprite
raise ValueError(f"No stage or sprite found with name '{name}'.")
def get_id(self):
self.last_id += 1
return self.last_id
def get_opcode_function(self, block):
cls = (CoreStage if block['stage'] else CoreSprite)
elsefunc = None
for (name, func) in inspect.getmembers(cls, predicate=inspect.isfunction):
if (name == block['opcode']):
return func
if hasattr(func, 'opcode'):
if (func.opcode == block['opcode']):
if hasattr(func, 'param'):
if (func.param in block['params']):
value = unquoted(resolve(block['params'][func.param]))
if (func.value == value):
return func
else:
elsefunc = func
if elsefunc:
return elsefunc
print(f'No API method for {block.opcode}')
return None
def get_translated_function(self, block, language):
corefunc = self.get_opcode_function(block)
if (language == 'core'):
return corefunc
if (corefunc is None):
return None
lang = importlib.import_module(f'pystage.{language}')
cls = (lang.stage_class if block['stage'] else lang.sprite_class)
for (name, func) in inspect.getmembers(cls, predicate=inspect.isfunction):
for i in dis.Bytecode(func):
if (((i.opname == 'LOAD_METHOD') or (i.opname == 'LOAD_ATTR')) and (i.argval == corefunc.__name__)):
return func
return None
def get_translated_call(self, block, language):
corefunc = self.get_opcode_function(block)
func = self.get_translated_function(block, language)
if (func is None):
return quoted(f'NO TRANSLATION: {block.opcode}')
res = (func.__name__ + '(')
fieldname = (corefunc.param if hasattr(corefunc, 'param') else None)
res += ', '.join([str(block.params[p]) for p in block.params if (p != fieldname)])
res += ')'
return res
def get_translated_template(self, block, language):
corefunc = self.get_opcode_function(block)
func = self.get_translated_function(block, language)
if (func is None):
return quoted(f'NO TRANSLATION: {block.opcode}')
res = f'self.{func.__name__}('
fieldname = (corefunc.param if hasattr(corefunc, 'param') else None)
res += ', '.join([(('{{' + p) + '}}') for p in block.params if (p != fieldname)])
res += ')'
return res
def render_comments(self):
res = ''
for c in self.comments:
for line in c.split('\n'):
res += f'''# {line}
'''
self.comments.clear()
return res
def global_sound(self, name: str, quoted=True):
name = unquoted(name)
sprite = self.get_sprite_or_stage()
for sound in sprite['sounds']:
if (sound['local_name'] == name):
q = ('"' if quoted else '')
return f"{q}{self.project['sounds'][sound['md5']]['global_name']}{q}"
raise ValueError(f"No sound with name '{name}' found for sprite '{sprite['name']}'")
def global_costume(self, name, quoted=True):
name = unquoted(name)
sprite = self.get_sprite_or_stage()
for costume in sprite['costumes']:
if (costume['local_name'] == name):
q = ('"' if quoted else '')
return f"{q}{self.project['costumes'][costume['md5']]['global_name']}{q}"
raise ValueError(f"No costume with name '{name}' found for sprite '{sprite['name']}'")
def global_backdrop(self, name, quoted=True):
name = unquoted(name)
sprite = self.project['stage']
for costume in sprite['costumes']:
if (costume['local_name'] == name):
q = ('"' if quoted else '')
return f"{q}{self.project['costumes'][costume['md5']]['global_name']}{q}"
raise ValueError(f"No backdrop with name '{name}' found for stage.")
def process(self, block):
if (not isinstance(block, dict)):
return str(block)
else:
if ('comments' in block):
self.comments.extend(block['comments'])
if (block['opcode'] in self.templates):
template = self.templates[block['opcode']]
context = {}
if ('{{func}}' in template):
func = self.get_translated_function(block, self.language)
context['func'] = (func.__name__ if (func is not None) else f"<<NO_FUNCTION-{block['opcode']}>>")
return self.render(block, template, context)
else:
default_template = self.get_translated_template(block, self.language)
return self.render(block, default_template)
def render(self, block, text, context={}):
text = textwrap.dedent(text)
if block['next']:
context['NEXT'] = self.process(block['next'])
if (not ('NEXT' in text)):
text += '\n{{NEXT}}'
for param in block['params']:
context[param] = self.process(block['params'][param])
context['CURRENT_SPRITE'] = self.get_sprite_var()
if ('{{ID}}' in text):
context['ID'] = self.get_id()
if ('indent(4)' in text):
if (('NEXT' in text) and (not context.get('NEXT'))):
context['NEXT'] = 'pass'
if (('SUBSTACK' in text) and (not context.get('SUBSTACK'))):
context['SUBSTACK'] = 'pass'
if (('SUBSTACK2' in text) and (not context.get('SUBSTACK2'))):
context['SUBSTACK2'] = 'pass'
if (('{{CONDITION}}' in text) and (not context.get('CONDITION'))):
context['CONDITION'] = 'None'
template = self.jinja_environment.from_string(text)
try:
text = template.render(context)
except UndefinedError as e:
print(f'Template variable not available: {e}')
return text |
class AlexOutputBlock(nn.Module):
def __init__(self, in_channels, classes):
super(AlexOutputBlock, self).__init__()
mid_channels = 4096
self.fc1 = AlexDense(in_channels=in_channels, out_channels=mid_channels)
self.fc2 = AlexDense(in_channels=mid_channels, out_channels=mid_channels)
self.fc3 = nn.Linear(in_features=mid_channels, out_features=classes)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x |
_auth
def db_del(request, pk):
if (request.method == 'DELETE'):
try:
DBConfig.objects.get(id=pk).delete()
return JsonResponse({'code': 200, 'data': None, 'msg': '!'})
except Exception as e:
return JsonResponse({'code': 500, 'data': None, 'msg': '!{}'.format(e)}) |
class PolyvoreModel(object):
def __init__(self, config, mode, train_inception=False):
assert (mode in ['train', 'eval', 'inference'])
self.config = config
self.mode = mode
self.train_inception = train_inception
self.reader = tf.TFRecordReader()
self.initializer = tf.random_uniform_initializer(minval=(- self.config.initializer_scale), maxval=self.config.initializer_scale)
self.images = None
self.f_input_seqs = None
self.f_target_seqs = None
self.b_input_seqs = None
self.b_target_seqs = None
self.input_mask = None
self.cap_seqs = None
self.cap_mask = None
self.seq_embeddings = None
self.image_embeddings = None
self.rnn_image_embeddings = None
self.embedding_map = None
self.total_loss = None
self.forward_losses = None
self.backward_losses = None
self.lstm_losses = None
self.loss_mask = None
self.emb_losses = None
self.target_weights = None
self.inception_variables = []
self.init_fn = None
self.global_step = None
self.target_embeddings = None
self.input_embeddings = None
self.set_ids = None
self.f_lstm_state = None
self.b_lstm_state = None
self.lstm_output = None
self.lstm_xent_loss = None
def is_training(self):
return (self.mode == 'train')
def process_image(self, encoded_image, thread_id=0, image_idx=0):
return image_processing.process_image(encoded_image, is_training=self.is_training(), height=self.config.image_height, width=self.config.image_width, image_format=self.config.image_format, image_idx=image_idx)
def build_inputs(self):
if (self.mode == 'inference'):
image_feed = tf.placeholder(dtype=tf.string, shape=[], name='image_feed')
image_feed = self.process_image(image_feed)
input_feed = tf.placeholder(dtype=tf.int64, shape=[None], name='input_feed')
image_seqs = tf.expand_dims(image_feed, 0)
cap_seqs = tf.expand_dims(input_feed, 1)
input_mask = tf.placeholder(dtype=tf.int64, shape=[1, 8], name='input_mask')
cap_mask = None
loss_mask = None
set_ids = None
else:
input_queue = input_ops.prefetch_input_data(self.reader, self.config.input_file_pattern, is_training=self.is_training(), batch_size=self.config.batch_size, values_per_shard=self.config.values_per_input_shard, input_queue_capacity_factor=self.config.input_queue_capacity_factor, num_reader_threads=self.config.num_input_reader_threads)
images_and_captions = []
for thread_id in range(self.config.num_preprocess_threads):
serialized_sequence_example = input_queue.dequeue()
(set_id, encoded_images, image_ids, captions, likes) = input_ops.parse_sequence_example(serialized_sequence_example, set_id=self.config.set_id_name, image_feature=self.config.image_feature_name, image_index=self.config.image_index_name, caption_feature=self.config.caption_feature_name, number_set_images=self.config.number_set_images)
images = []
for i in range(self.config.number_set_images):
images.append(self.process_image(encoded_images[i], image_idx=i))
images_and_captions.append([set_id, images, image_ids, captions, likes])
queue_capacity = ((5 * self.config.num_preprocess_threads) * self.config.batch_size)
(set_ids, image_seqs, image_ids, input_mask, loss_mask, cap_seqs, cap_mask, likes) = input_ops.batch_with_dynamic_pad(images_and_captions, batch_size=self.config.batch_size, queue_capacity=queue_capacity)
self.images = image_seqs
self.input_mask = input_mask
self.loss_mask = loss_mask
self.cap_seqs = cap_seqs
self.cap_mask = cap_mask
self.set_ids = set_ids
def build_image_embeddings(self):
images = tf.reshape(self.images, [(- 1), self.config.image_height, self.config.image_height, 3])
inception_output = image_embedding.inception_v3(images, trainable=self.train_inception, is_training=self.is_training())
self.inception_variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope='InceptionV3')
with tf.variable_scope('image_embedding') as scope:
image_embeddings = tf.contrib.layers.fully_connected(inputs=inception_output, num_outputs=self.config.embedding_size, activation_fn=None, weights_initializer=self.initializer, biases_initializer=None, scope=scope)
with tf.variable_scope('rnn_image_embedding') as scope:
rnn_image_embeddings = tf.contrib.layers.fully_connected(inputs=inception_output, num_outputs=self.config.embedding_size, activation_fn=None, weights_initializer=self.initializer, biases_initializer=None, scope=scope)
tf.constant(self.config.embedding_size, name='embedding_size')
self.image_embeddings = tf.reshape(image_embeddings, [tf.shape(self.images)[0], (- 1), self.config.embedding_size])
self.rnn_image_embeddings = tf.reshape(rnn_image_embeddings, [tf.shape(self.images)[0], (- 1), self.config.embedding_size])
def build_seq_embeddings(self):
with tf.variable_scope('seq_embedding'), tf.device('/cpu:0'):
embedding_map = tf.get_variable(name='map', shape=[self.config.vocab_size, self.config.embedding_size], initializer=self.initializer)
seq_embeddings = tf.nn.embedding_lookup(embedding_map, self.cap_seqs)
if (self.mode != 'inference'):
seq_embeddings = tf.batch_matmul(tf.cast(tf.expand_dims(self.cap_mask, 2), tf.float32), seq_embeddings)
seq_embeddings = tf.squeeze(seq_embeddings, [2])
self.embedding_map = embedding_map
self.seq_embeddings = seq_embeddings
def build_model(self):
norm_image_embeddings = tf.nn.l2_normalize(self.image_embeddings, 2, name='norm_image_embeddings')
norm_seq_embeddings = tf.nn.l2_normalize(self.seq_embeddings, 2)
norm_seq_embeddings = tf.pad(norm_seq_embeddings, [[0, 0], [0, (self.config.number_set_images - tf.shape(norm_seq_embeddings)[1])], [0, 0]], name='norm_seq_embeddings')
if (self.mode == 'inference'):
pass
else:
emb_loss_mask = tf.greater(tf.reduce_sum(self.cap_mask, 2), 1)
emb_loss_mask = tf.pad(emb_loss_mask, [[0, 0], [0, (self.config.number_set_images - tf.shape(emb_loss_mask)[1])]])
emb_loss_mask = tf.reshape(emb_loss_mask, [(- 1)])
norm_image_embeddings = tf.reshape(norm_image_embeddings, [(self.config.number_set_images * self.config.batch_size), self.config.embedding_size])
norm_image_embeddings = tf.boolean_mask(norm_image_embeddings, emb_loss_mask)
norm_seq_embeddings = tf.reshape(norm_seq_embeddings, [(self.config.number_set_images * self.config.batch_size), self.config.embedding_size])
norm_seq_embeddings = tf.boolean_mask(norm_seq_embeddings, emb_loss_mask)
scores = tf.matmul(norm_seq_embeddings, norm_image_embeddings, transpose_a=False, transpose_b=True, name='scores')
diagonal = tf.expand_dims(tf.diag_part(scores), 1)
cost_s = tf.maximum(0.0, ((self.config.emb_margin - diagonal) + scores))
cost_im = tf.maximum(0.0, ((self.config.emb_margin - tf.transpose(diagonal)) + scores))
cost_s = (cost_s - tf.diag(tf.diag_part(cost_s)))
cost_im = (cost_im - tf.diag(tf.diag_part(cost_im)))
emb_batch_loss = (tf.reduce_sum(cost_s) + tf.reduce_sum(cost_im))
emb_batch_loss = (emb_batch_loss / (tf.cast(tf.shape(norm_seq_embeddings)[0], tf.float32) ** 2))
if (self.config.emb_loss_factor > 0.0):
tf.contrib.losses.add_loss((emb_batch_loss * self.config.emb_loss_factor))
tf.logging.info(('Rnn_type: %s' % self.config.rnn_type))
if (self.config.rnn_type == 'lstm'):
tf.logging.info('----- RNN Type: LSTM ------')
f_lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=self.config.num_lstm_units, state_is_tuple=True)
b_lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=self.config.num_lstm_units, state_is_tuple=True)
elif (self.config.rnn_type == 'gru'):
tf.logging.info('----- RNN Type: GRU ------')
f_lstm_cell = tf.nn.rnn_cell.GRUCell(num_units=self.config.num_lstm_units)
b_lstm_cell = tf.nn.rnn_cell.GRUCell(num_units=self.config.num_lstm_units)
else:
tf.logging.info('----- RNN Type: RNN ------')
f_lstm_cell = tf.nn.rnn_cell.BasicRNNCell(num_units=self.config.num_lstm_units)
b_lstm_cell = tf.nn.rnn_cell.BasicRNNCell(num_units=self.config.num_lstm_units)
if (self.mode == 'train'):
f_lstm_cell = tf.nn.rnn_cell.DropoutWrapper(f_lstm_cell, input_keep_prob=self.config.lstm_dropout_keep_prob, output_keep_prob=self.config.lstm_dropout_keep_prob)
b_lstm_cell = tf.nn.rnn_cell.DropoutWrapper(b_lstm_cell, input_keep_prob=self.config.lstm_dropout_keep_prob, output_keep_prob=self.config.lstm_dropout_keep_prob)
with tf.variable_scope('lstm', initializer=self.initializer) as lstm_scope:
if (self.mode == 'inference'):
pred_feed = tf.placeholder(dtype=tf.float32, shape=[None, None], name='pred_feed')
next_index_feed = tf.placeholder(dtype=tf.int64, shape=[None], name='next_index_feed')
self.lstm_xent_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred_feed, labels=next_index_feed, name='lstm_xent')
if (self.config.rnn_type == 'lstm'):
f_state_feed = tf.placeholder(dtype=tf.float32, shape=[None, sum(f_lstm_cell.state_size)], name='f_state_feed')
f_input_feed = tf.placeholder(dtype=tf.float32, shape=[None, self.config.embedding_size], name='f_input_feed')
b_state_feed = tf.placeholder(dtype=tf.float32, shape=[None, sum(b_lstm_cell.state_size)], name='b_state_feed')
b_input_feed = tf.placeholder(dtype=tf.float32, shape=[None, self.config.embedding_size], name='b_input_feed')
f_state_tuple = tf.split(1, 2, f_state_feed)
with tf.variable_scope('FW'):
(f_lstm_outputs, f_state_tuple) = f_lstm_cell(inputs=f_input_feed, state=f_state_tuple)
self.f_lstm_state = tf.concat(1, f_state_tuple, name='f_state')
b_state_tuple = tf.split(1, 2, b_state_feed)
with tf.variable_scope('BW'):
(b_lstm_outputs, b_state_tuple) = b_lstm_cell(inputs=b_input_feed, state=b_state_tuple)
self.b_lstm_state = tf.concat(1, b_state_tuple, name='b_state')
else:
f_state_feed = tf.placeholder(dtype=tf.float32, shape=[None, f_lstm_cell.state_size], name='f_state_feed')
f_input_feed = tf.placeholder(dtype=tf.float32, shape=[None, self.config.embedding_size], name='f_input_feed')
b_state_feed = tf.placeholder(dtype=tf.float32, shape=[None, b_lstm_cell.state_size], name='b_state_feed')
b_input_feed = tf.placeholder(dtype=tf.float32, shape=[None, self.config.embedding_size], name='b_input_feed')
with tf.variable_scope('FW'):
(f_lstm_outputs, f_state_tuple) = f_lstm_cell(inputs=f_input_feed, state=f_state_feed)
f_state_tuple = tf.identity(f_state_tuple, name='f_state')
with tf.variable_scope('BW'):
(b_lstm_outputs, b_state_tuple) = b_lstm_cell(inputs=b_input_feed, state=b_state_feed)
b_state_tuple = tf.identity(b_state_tuple, name='b_state')
lstm_outputs = (f_lstm_outputs, b_lstm_outputs)
sequence_length = None
else:
sequence_length = tf.reduce_sum(self.input_mask, 1)
(lstm_outputs, _) = tf.nn.bidirectional_dynamic_rnn(cell_fw=f_lstm_cell, cell_bw=b_lstm_cell, inputs=self.rnn_image_embeddings, initial_state_fw=None, initial_state_bw=None, sequence_length=sequence_length, dtype=tf.float32, scope=lstm_scope)
f_lstm_outputs = tf.reshape(lstm_outputs[0], [(- 1), f_lstm_cell.output_size])
if (self.mode == 'inference'):
b_lstm_outputs = lstm_outputs[1]
else:
b_lstm_outputs = tf.reverse_sequence(lstm_outputs[1], seq_lengths=sequence_length, seq_dim=1, batch_dim=0)
b_lstm_outputs = tf.reshape(b_lstm_outputs, [(- 1), b_lstm_cell.output_size])
with tf.variable_scope('f_logits') as logits_scope:
f_input_embeddings = tf.contrib.layers.fully_connected(inputs=f_lstm_outputs, num_outputs=self.config.embedding_size, activation_fn=None, weights_initializer=self.initializer, scope=logits_scope)
with tf.variable_scope('b_logits') as logits_scope:
b_input_embeddings = tf.contrib.layers.fully_connected(inputs=b_lstm_outputs, num_outputs=self.config.embedding_size, activation_fn=None, weights_initializer=self.initializer, scope=logits_scope)
if (self.mode == 'inference'):
pass
else:
input_mask = tf.pad(self.input_mask, [[0, 0], [0, ((self.config.number_set_images + 1) - tf.shape(self.input_mask)[1])]])
input_mask = tf.to_float(tf.reshape(tf.slice(input_mask, [0, 1], [(- 1), (- 1)]), [(- 1), 1]))
loss_mask = tf.pad(self.loss_mask, [[0, 0], [0, (self.config.number_set_images - tf.shape(self.loss_mask)[1])]])
loss_mask = tf.reshape(tf.to_float(loss_mask), [(self.config.number_set_images * self.config.batch_size), 1])
f_target_embeddings = tf.slice(tf.pad(self.rnn_image_embeddings, [[0, 0], [0, 1], [0, 0]]), [0, 1, 0], [(- 1), (- 1), (- 1)])
f_target_embeddings = tf.reshape(f_target_embeddings, [(self.config.number_set_images * self.config.batch_size), self.config.embedding_size])
f_target_embeddings = tf.mul(f_target_embeddings, input_mask, name='target_embeddings')
loss_mask = tf.squeeze(loss_mask)
f_input_embeddings = tf.boolean_mask(f_input_embeddings, tf.cast(loss_mask, tf.bool))
f_target_embeddings = tf.boolean_mask(f_target_embeddings, tf.cast(loss_mask, tf.bool))
f_lstm_scores = tf.matmul(f_input_embeddings, f_target_embeddings, transpose_a=False, transpose_b=True)
f_lstm_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=f_lstm_scores, labels=tf.range(tf.shape(f_lstm_scores)[0]))
f_lstm_loss = tf.div(tf.reduce_sum(f_lstm_loss), tf.reduce_sum(loss_mask), name='f_lstm_loss')
reverse_embeddings = tf.reverse_sequence(self.rnn_image_embeddings, seq_lengths=sequence_length, seq_dim=1, batch_dim=0)
b_target_embeddings = tf.slice(tf.pad(reverse_embeddings, [[0, 0], [0, 1], [0, 0]]), [0, 1, 0], [(- 1), (- 1), (- 1)])
b_target_embeddings = tf.reshape(b_target_embeddings, [(self.config.number_set_images * self.config.batch_size), self.config.embedding_size])
b_target_embeddings = tf.mul(b_target_embeddings, input_mask, name='target_embeddings')
b_input_embeddings = tf.boolean_mask(b_input_embeddings, tf.cast(loss_mask, tf.bool))
b_target_embeddings = tf.boolean_mask(b_target_embeddings, tf.cast(loss_mask, tf.bool))
b_lstm_scores = tf.matmul(b_input_embeddings, b_target_embeddings, transpose_a=False, transpose_b=True)
b_lstm_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=b_lstm_scores, labels=tf.range(tf.shape(b_lstm_scores)[0]))
b_lstm_loss = tf.div(tf.reduce_sum(b_lstm_loss), tf.reduce_sum(loss_mask), name='b_lstm_loss')
if (self.config.f_rnn_loss_factor > 0):
tf.contrib.losses.add_loss((f_lstm_loss * self.config.f_rnn_loss_factor))
if (self.config.b_rnn_loss_factor > 0):
tf.contrib.losses.add_loss((b_lstm_loss * self.config.b_rnn_loss_factor))
total_loss = tf.contrib.losses.get_total_loss()
tf.scalar_summary('emb_batch_loss', emb_batch_loss)
tf.scalar_summary('f_lstm_loss', f_lstm_loss)
tf.scalar_summary('b_lstm_loss', b_lstm_loss)
tf.scalar_summary('lstm_loss', ((f_lstm_loss * self.config.f_rnn_loss_factor) + (b_lstm_loss * self.config.b_rnn_loss_factor)))
tf.scalar_summary('total_loss', total_loss)
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
weights = tf.to_float(tf.reshape(emb_loss_mask, [(- 1)]))
self.loss_mask = loss_mask
self.input_mask = input_mask
self.target_embeddings = (f_target_embeddings, b_target_embeddings)
self.input_embeddings = (f_input_embeddings, b_input_embeddings)
self.total_loss = total_loss
self.emb_losses = emb_batch_loss
self.lstm_losses = ((f_lstm_loss * self.config.f_rnn_loss_factor) + (b_lstm_loss * self.config.b_rnn_loss_factor))
self.target_weights = weights
def setup_inception_initializer(self):
if (self.mode != 'inference'):
saver = tf.train.Saver(self.inception_variables)
def restore_fn(sess):
tf.logging.info(('Restoring Inception variables from checkpoint %s' % self.config.inception_checkpoint_file))
saver.restore(sess, self.config.inception_checkpoint_file)
self.init_fn = restore_fn
def setup_global_step(self):
global_step = tf.Variable(initial_value=0, name='global_step', trainable=False, collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.VARIABLES])
self.global_step = global_step
def build(self):
self.build_inputs()
self.build_image_embeddings()
self.build_seq_embeddings()
self.build_model()
self.setup_inception_initializer()
self.setup_global_step() |
def online_kurtosis(data):
n = 0
mean = 0
M2 = 0
M3 = 0
M4 = 0
for x in data:
n1 = n
n = (n + 1)
delta = (x - mean)
delta_n = (delta / n)
delta_n2 = (delta_n * delta_n)
term1 = ((delta * delta_n) * n1)
mean = (mean + delta_n)
M4 = (((M4 + ((term1 * delta_n2) * (((n * n) - (3 * n)) + 3))) + ((6 * delta_n2) * M2)) - ((4 * delta_n) * M3))
M3 = ((M3 + ((term1 * delta_n) * (n - 2))) - ((3 * delta_n) * M2))
M2 = (M2 + term1)
if (M2 == 0):
return None
kurtosis = (((n * M4) / (M2 * M2)) - 3)
return kurtosis |
def _create_random_dataset(shape, channel_per_class):
tmp = NamedTemporaryFile(delete=False)
with h5py.File(tmp.name, 'w') as f:
l_shape = w_shape = shape
if (len(shape) == 4):
l_shape = shape[1:]
w_shape = shape[1:]
if channel_per_class:
l_shape = ((2,) + l_shape)
f.create_dataset('raw', data=np.random.rand(*shape))
f.create_dataset('label', data=np.random.randint(0, 2, l_shape))
f.create_dataset('weight_map', data=np.random.rand(*w_shape))
return tmp.name |
def list_ready_nodes(cli, label_selector=None):
nodes = []
try:
if label_selector:
ret = cli.list_node(pretty=True, label_selector=label_selector)
else:
ret = cli.list_node(pretty=True)
except ApiException as e:
logging.error(('Exception when calling CoreV1Api->list_node: %s\n' % e))
raise e
for node in ret.items:
for cond in node.status.conditions:
if ((str(cond.type) == 'Ready') and (str(cond.status) == 'True')):
nodes.append(node.metadata.name)
return nodes |
def save_coeffs(coeffs, out_dir=''):
for platform in coeffs.keys():
fname = os.path.join(out_dir, ('%s_calibration_data.h5' % platform))
fid = h5py.File(fname, 'w')
for chan in coeffs[platform].keys():
fid.create_group(chan)
fid[chan]['datetime'] = coeffs[platform][chan]['datetime']
fid[chan]['slope1'] = coeffs[platform][chan]['slope1']
fid[chan]['intercept1'] = coeffs[platform][chan]['intercept1']
fid[chan]['slope2'] = coeffs[platform][chan]['slope2']
fid[chan]['intercept2'] = coeffs[platform][chan]['intercept2']
fid.close()
print(('Calibration coefficients saved for %s' % platform)) |
class SelectiveKernelAttn(nn.Module):
def __init__(self, channels, num_paths=2, attn_channels=32, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d):
super(SelectiveKernelAttn, self).__init__()
self.num_paths = num_paths
self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False)
self.bn = norm_layer(attn_channels)
self.act = act_layer(inplace=True)
self.fc_select = nn.Conv2d(attn_channels, (channels * num_paths), kernel_size=1, bias=False)
def forward(self, x):
_assert((x.shape[1] == self.num_paths), '')
x = x.sum(1).mean((2, 3), keepdim=True)
x = self.fc_reduce(x)
x = self.bn(x)
x = self.act(x)
x = self.fc_select(x)
(B, C, H, W) = x.shape
x = x.view(B, self.num_paths, (C // self.num_paths), H, W)
x = torch.softmax(x, dim=1)
return x |
class TestForceDocumentEnd():
def _get_script(self, *, namespace, name):
source = textwrap.dedent('\n // ==UserScript==\n // {}\n // {}\n // ==/UserScript==\n '.format(namespace, name))
_save_script(source, 'force.user.js')
gm_manager = greasemonkey.GreasemonkeyManager()
gm_manager.load_scripts()
scripts = gm_manager.all_scripts()
assert (len(scripts) == 1)
return scripts[0]
.parametrize('namespace, name, force', [(' 'foobar', True), (' 'Iridium', True), (' 'Foo', False), (' 'Iridium', False)])
def test_matching(self, monkeypatch, namespace, name, force):
monkeypatch.setattr(objects, 'backend', usertypes.Backend.QtWebEngine)
script = self._get_script(namespace=namespace, name=name)
assert (script.needs_document_end_workaround() == force)
.parametrize('namespace, name', [(' 'foobar'), (' 'Iridium'), (' 'Foo'), (' 'Iridium')])
def test_webkit(self, monkeypatch, namespace, name):
monkeypatch.setattr(objects, 'backend', usertypes.Backend.QtWebKit)
script = self._get_script(namespace=namespace, name=name)
assert (not script.needs_document_end_workaround()) |
def verify_message_with_address(address: str, sig65: bytes, message: bytes, *, net=None):
from .bitcoin import pubkey_to_address
assert_bytes(sig65, message)
if (net is None):
net = constants.net
try:
h = sha256d(msg_magic(message))
(public_key, compressed) = ECPubkey.from_signature65(sig65, h)
pubkey_hex = public_key.get_public_key_hex(compressed)
for txin_type in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
addr = pubkey_to_address(txin_type, pubkey_hex, net=net)
if (address == addr):
break
else:
raise Exception('Bad signature')
public_key.verify_message_hash(sig65[1:], h)
return True
except Exception as e:
_logger.info(f'Verification error: {repr(e)}')
return False |
class LMDBDataset(data.Dataset):
def __init__(self, db_path, noise_model=None, size=None, repeat=1, ratio_used_list=None):
import lmdb
self.db_path = db_path
self.env = lmdb.open(db_path, max_readers=1, readonly=True, lock=False, readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
length = txn.stat()['entries']
self.length = (size or length)
if (ratio_used_list is not None):
idx_used = []
for i in range(self.length):
if (int(self.meta[i][3]) in ratio_used_list):
idx_used.append(i)
print(f'Ratio used to train: {ratio_used_list}')
print(f'Used pairs: {len(idx_used)} out of {self.length}')
self.repeat = repeat
self.meta = pickle.load(open(join(db_path, 'meta_info.pkl'), 'rb'))
self.shape = self.meta['shape']
self.dtype = self.meta['dtype']
self.noise_model = noise_model
def __getitem__(self, index):
env = self.env
index = (index % self.length)
with env.begin(write=False) as txn:
raw_data = txn.get('{:08}'.format(index).encode('ascii'))
flat_x = np.frombuffer(raw_data, self.dtype)
x = flat_x.reshape(*self.shape)
if (self.dtype == np.uint16):
x = np.clip((x / 65535), 0, 1).astype(np.float32)
if (len(self.meta[index]) == 2):
(wb, color_matrix) = self.meta[index]
(ratio, K) = ((- 1), (- 1))
else:
(wb, color_matrix, ISO, ratio) = self.meta[index]
if (self.noise_model is not None):
K = self.noise_model.ISO_to_K(ISO)
else:
K = (- 1)
return (x, {'ratio': ratio, 'K': K})
def __len__(self):
return int((self.length * self.repeat))
def __repr__(self):
return (((self.__class__.__name__ + ' (') + self.db_path) + ')') |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 83