repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
680k
|
---|---|---|---|---|
anirudhbhashyam/911-Calls-Seattle-Predictions | src/train_nn.py | 8c975ab6c6a85d514ad74388778e1b635ed3e63d | import os
from typing import Union
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, KFold
import utility as ut
from variables import *
# Read the data.
train_data = pd.read_csv(os.path.join(DATA_PATH, ".".join([DATA_TRAIN, DATA_EXT])), header = 0)
# Get the labels.
Y = train_data.pop(LABEL)
sample_weights = np.ones(Y.shape[0])
for i in range(10, 24):
sample_weights[train_data["_".join(("hour", str(i)))] == 1] = 1.5
# -- For classification -- #
# CLASSES = np.unique(Y)
# N_CLASSES = len(CLASSES)
# Y = Y.replace(dict(zip(CLASSES, range(0, len(CLASSES)))))
# Data shape parameters.
N_FEATURES = train_data.shape[1]
N_SAMPLES = train_data.shape[0]
# Split the training data.
X_train, X_val, Y_train, Y_val = train_test_split(train_data, Y, shuffle = True, random_state = 7919)
def build_and_compile(input_: tuple = (WB_SIZE, N_FEATURES),
loss_func: str = "mae") -> tf.keras.Model:
"""
Build and compile a TensorFLow LSTM network.
Parameters
----------
input_ :
Shape of the trainining data. Should specify
`(batch_size` or `window_size, n_features)`
loss_func :
Loss function to use for training.
Returns
-------
`tf.keras.Model` :
A compiled TensorFlow model.
"""
# Seqential keras model.
model = tf.keras.models.Sequential([
tf.keras.layers.LSTM(50, input_shape = input_, return_sequences = True),
tf.keras.layers.LSTM(50, return_sequences = False),
tf.keras.layers.GaussianNoise(1.0),
tf.keras.layers.Dense(1024, activation = "relu"),
tf.keras.layers.Dropout(0.7),
tf.keras.layers.Dense(128, activation = "relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(64, activation = "relu"),
tf.keras.layers.GaussianNoise(0.2),
# tf.keras.layers.Dense(32, activation = "relu"),
# tf.keras.layers.GaussianNoise(0.7),
tf.keras.layers.Dense(1, activation = "relu")
])
# Compile the model.
model.compile(
loss = loss_func,
optimizer = "adam"
)
return model
def train(model: tf.keras.Model,
train_data: np.ndarray,
train_labels: np.ndarray,
val_data: np.ndarray,
val_labels: np.ndarray,
epochs: int = 200,
sample_weights: np.array = None,
cross_val = False) -> pd.DataFrame:
"""
Trains the TensorFlow `model`.
Parameters
----------
model :
A TensorFlow compiled model.
train_data :
The data to be trained. Shape must be consistent with what is passed during model compilation.
train_labels :
The ground truth predictions.
val_data :
The data to be used as validation.
val_labels :
The ground truth validation predictions.
epochs :
Total number of epochs to train.
sample_weights :
Weights for `train_data` to use during training.
Returns
-------
pd.DataFrame:
Training information.
"""
# Check for overfitting.
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor = "val_loss",
min_delta = 0.001,
patience = 100,
restore_best_weights = False)
history = model.fit(
train_data.reshape(-1, WB_SIZE, N_FEATURES),
train_labels,
sample_weight = sample_weights,
validation_data = (val_data.reshape(-1, WB_SIZE, N_FEATURES), val_labels),
verbose = 1,
epochs = epochs,
callbacks = early_stopping)
return pd.DataFrame(history.history)
# def cross_validate(train_data: pd.DataFrame,
# train_labels: pd.DataFrame,
# epochs: int = 50,
# sample_weights: np.array = None,
# folds: int = 2) -> pd.DataFrame:
# splits = KFold(n_splits = folds, shuffle = True)
# print("Starting cross validation.")
# accuracy = list()
# val_loss = list()
# models = list()
# for i, (train_index, test_index) in enumerate(splits.split(train_data, train_labels)):
# print(f"Iteration {i}\n")
# X_train, X_val, Y_train, Y_val = train_data[train_index], train_data[test_index], train_data[train_index], train_labels[test_index]
# model = build_and_compile((WB_SIZE, N_FEATURES), "mae")
# history_df = train(model, X_train, Y_train, epochs)
# # train_stats(history_df, i)
# scores = model.evaluate(X_val.reshape(-1, WB_SIZE, N_FEATURES), Y_val)
# print(f"Validation loss: {scores}\n")
# #of {scores[0]} {model.metrics_names[1]} of {scores[1] * 100:.2f}%")
# # accuracy.append(scores[1] * 100)
# val_loss.append(scores)
# models.append(model)
# return models[np.argmin(val_loss)]
def train_stats(history_df: pd.DataFrame, it: int = None) -> None:
"""
Produces training statistics once training has run its course.
Parameters
----------
history_df :
The history as returned by Keras `fit` method.
it :
To be used with cross validation. Specifies the name of the learning curve based on the cross validation itertation `it`.
Returns
-------
`None`
"""
# Learning curve.
plt.rcParams["figure.dpi"] = 160
history_df.loc[:, ["loss", "val_loss"]].plot()
plt.title("Model Loss")
plt.ylabel("Loss")
plt.xlabel("Epoch")
name = TRAIN_FIG_SAVE_NAME
if it is not None:
name = "_".join([name, str(it)])
plt.savefig(os.path.join(TRAIN_FIG_SAVE_PATH, ".".join([name, FIG_EXT])))
# Stats
print(f"Minimum validation loss: {history_df['val_loss'].min()}")
# plt.plot(f"Accuracy: {history_df['train_accuracy']}")
# plt.plot(f"Validation Accuracy: {history_df['val_accuracy']}")
return None
def main():
model = build_and_compile((WB_SIZE, N_FEATURES))
# model = cross_validate(np.array(train_data), np.array(Y))
history_df = train(model, np.array(X_train), np.array(Y_train), np.array(X_val), np.array(Y_val))
# train_stats(history_df)
# Save trained model (better to use checkpoints).
model.save(os.path.join(NN_MODEL_SAVE_PATH, NN_MODEL_SAVE_NAME))
if __name__ == "__main__":
main()
| [((416, 435), 'numpy.ones', 'np.ones', (['Y.shape[0]'], {}), '(Y.shape[0])\n', (423, 435), True, 'import numpy as np\n'), ((825, 889), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_data', 'Y'], {'shuffle': '(True)', 'random_state': '(7919)'}), '(train_data, Y, shuffle=True, random_state=7919)\n', (841, 889), False, 'from sklearn.model_selection import train_test_split, KFold\n'), ((2926, 3041), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0.001)', 'patience': '(100)', 'restore_best_weights': '(False)'}), "(monitor='val_loss', min_delta=0.001,\n patience=100, restore_best_weights=False)\n", (2958, 3041), True, 'import tensorflow as tf\n'), ((3333, 3362), 'pandas.DataFrame', 'pd.DataFrame', (['history.history'], {}), '(history.history)\n', (3345, 3362), True, 'import pandas as pd\n'), ((5005, 5028), 'matplotlib.pyplot.title', 'plt.title', (['"""Model Loss"""'], {}), "('Model Loss')\n", (5014, 5028), True, 'import matplotlib.pyplot as plt\n'), ((5030, 5048), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (5040, 5048), True, 'import matplotlib.pyplot as plt\n'), ((5050, 5069), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (5060, 5069), True, 'import matplotlib.pyplot as plt\n'), ((5596, 5613), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (5604, 5613), True, 'import numpy as np\n'), ((5615, 5632), 'numpy.array', 'np.array', (['Y_train'], {}), '(Y_train)\n', (5623, 5632), True, 'import numpy as np\n'), ((5634, 5649), 'numpy.array', 'np.array', (['X_val'], {}), '(X_val)\n', (5642, 5649), True, 'import numpy as np\n'), ((5651, 5666), 'numpy.array', 'np.array', (['Y_val'], {}), '(Y_val)\n', (5659, 5666), True, 'import numpy as np\n'), ((5762, 5814), 'os.path.join', 'os.path.join', (['NN_MODEL_SAVE_PATH', 'NN_MODEL_SAVE_NAME'], {}), '(NN_MODEL_SAVE_PATH, NN_MODEL_SAVE_NAME)\n', (5774, 5814), False, 'import os\n'), ((1398, 1465), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['(50)'], {'input_shape': 'input_', 'return_sequences': '(True)'}), '(50, input_shape=input_, return_sequences=True)\n', (1418, 1465), True, 'import tensorflow as tf\n'), ((1473, 1521), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['(50)'], {'return_sequences': '(False)'}), '(50, return_sequences=False)\n', (1493, 1521), True, 'import tensorflow as tf\n'), ((1527, 1561), 'tensorflow.keras.layers.GaussianNoise', 'tf.keras.layers.GaussianNoise', (['(1.0)'], {}), '(1.0)\n', (1556, 1561), True, 'import tensorflow as tf\n'), ((1565, 1611), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (1586, 1611), True, 'import tensorflow as tf\n'), ((1617, 1645), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.7)'], {}), '(0.7)\n', (1640, 1645), True, 'import tensorflow as tf\n'), ((1649, 1694), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (1670, 1694), True, 'import tensorflow as tf\n'), ((1700, 1728), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (1723, 1728), True, 'import tensorflow as tf\n'), ((1732, 1776), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (1753, 1776), True, 'import tensorflow as tf\n'), ((1782, 1816), 'tensorflow.keras.layers.GaussianNoise', 'tf.keras.layers.GaussianNoise', (['(0.2)'], {}), '(0.2)\n', (1811, 1816), True, 'import tensorflow as tf\n'), ((1912, 1955), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': '"""relu"""'}), "(1, activation='relu')\n", (1933, 1955), True, 'import tensorflow as tf\n')] |
Gustavo6046/polydung | pdserver/objects.py | e8626c67b0f59e00a2400b5a5c644e3f6b925e00 | import base64
import random
import string
import netbyte
import numpy as np
try:
import simplejson as json
except ImportError:
import json
kinds = {}
class PDObject(object):
def __init__(self, game, kind, id, pos, properties):
self.game = game
self.kind = kind
self.id = id or ''.join([random.choice(string.ascii_letters + string.digits + "#$%*") for _ in range(100)])
self.pos = np.array(pos)
self.properties = properties
self.game.handle_object_creation(self)
def __getitem__(self, key): # a shortcut for Netbyte
return self.properties[key]
def __setitem__(self, key, value): # not only a shortcut for Netbyte
self.properties[key] = value
self.game.update_object(self)
def __call__(self, key, **kwargs):
nbe = netbyte.Netbyte()
nbe['self'] = self
nbe['game'] = self.game
for k, v in kwargs.items():
nbe[k] = v
nbe.execute_instructions(*self.kind.functions[key])
def tick(self, timedelta):
self('tick', timedelta=timedelta)
def serialize(self):
return json.dumps({
"kind": self.kind.name,
'id': self.id,
'pos': self.pos.tolist(),
"properties": self.properties
})
@classmethod
def deserialize(cls, game, js):
data = json.loads(js)
return cls(game, kinds[data['kind']], data['id'], data['pos'], data['properties'])
class PDClass(object):
def __init__(self, game, name, functions=()):
self.functions = dict(functions)
self.name = name
kinds[name] = self
nbe = netbyte.Netbyte()
def serializable(self):
return {
'name': self.name,
'functions': {k: nbe.dump(v, name="{}.{}".format(self.name, k)) for k, v in self.functions.items()}
} | [((437, 450), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (445, 450), True, 'import numpy as np\n'), ((865, 882), 'netbyte.Netbyte', 'netbyte.Netbyte', ([], {}), '()\n', (880, 882), False, 'import netbyte\n'), ((1442, 1456), 'json.loads', 'json.loads', (['js'], {}), '(js)\n', (1452, 1456), False, 'import json\n'), ((1743, 1760), 'netbyte.Netbyte', 'netbyte.Netbyte', ([], {}), '()\n', (1758, 1760), False, 'import netbyte\n'), ((335, 395), 'random.choice', 'random.choice', (["(string.ascii_letters + string.digits + '#$%*')"], {}), "(string.ascii_letters + string.digits + '#$%*')\n", (348, 395), False, 'import random\n')] |
EEdwardsA/DS-OOP-Review | football/football_test.py | 2352866c5d0ea6a09802c29c17366450f35c75ae | import unittest
from players import Player, Quarterback
from possible_values import *
from game import Game
from random import randint, uniform, sample
from season import *
# TODO - some things you can add...
class FootballGameTest(unittest.TestCase):
'''test the class'''
def test_field_goal_made(self):
teams = sample(team_names, k=2)
game = Game(teams=teams)
team_prev_points = game.score[teams[0]]
game.field_goal(teams[0])
team_post_points = game.score[teams[0]]
self.assertEqual(team_post_points, team_prev_points + 3)
def test_get_winner(self):
teams = sample(team_names, k=2)
game = Game(teams=teams)
game.field_goal(teams[0])
t1_points = game.score[teams[0]]
t2_points = game.score[teams[1]]
if t1_points >= t2_points:
win, lose = teams
else:
lose, win = teams
self.assertEqual((win,lose), game.get_winning_team())
class FootballPlayerTest(unittest.TestCase):
'''Check the default values for Player and Quarterback
yards=120, touchdowns=5, safety=1,
interceptions=0
'''
def test_default_player_yards(self):
player = Player(name='Dude')
self.assertEqual(player.yards, 120)
def test_player_yards_set_to(self):
player = Player(name='OtherDude', yards=150)
self.assertEqual(player.yards, 150)
def test_default_qb_interceptions(self):
qb = Quarterback(name='FancyDude')
self.assertEqual(qb.interceptions, 4)
def test_default_qb_completed_passes(self):
qb = Quarterback()
self.assertEqual(qb.completed_passes, 20)
def test_passing_score(self):
qb = Quarterback()
self.assertEqual((20 - (2 * 4)), qb.passing_score())
if __name__ == '__main__':
unittest.main()
| [((1843, 1858), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1856, 1858), False, 'import unittest\n'), ((332, 355), 'random.sample', 'sample', (['team_names'], {'k': '(2)'}), '(team_names, k=2)\n', (338, 355), False, 'from random import randint, uniform, sample\n'), ((371, 388), 'game.Game', 'Game', ([], {'teams': 'teams'}), '(teams=teams)\n', (375, 388), False, 'from game import Game\n'), ((633, 656), 'random.sample', 'sample', (['team_names'], {'k': '(2)'}), '(team_names, k=2)\n', (639, 656), False, 'from random import randint, uniform, sample\n'), ((672, 689), 'game.Game', 'Game', ([], {'teams': 'teams'}), '(teams=teams)\n', (676, 689), False, 'from game import Game\n'), ((1224, 1243), 'players.Player', 'Player', ([], {'name': '"""Dude"""'}), "(name='Dude')\n", (1230, 1243), False, 'from players import Player, Quarterback\n'), ((1346, 1381), 'players.Player', 'Player', ([], {'name': '"""OtherDude"""', 'yards': '(150)'}), "(name='OtherDude', yards=150)\n", (1352, 1381), False, 'from players import Player, Quarterback\n'), ((1485, 1514), 'players.Quarterback', 'Quarterback', ([], {'name': '"""FancyDude"""'}), "(name='FancyDude')\n", (1496, 1514), False, 'from players import Player, Quarterback\n'), ((1623, 1636), 'players.Quarterback', 'Quarterback', ([], {}), '()\n', (1634, 1636), False, 'from players import Player, Quarterback\n'), ((1735, 1748), 'players.Quarterback', 'Quarterback', ([], {}), '()\n', (1746, 1748), False, 'from players import Player, Quarterback\n')] |
shayanthrn/AGAIN-VC | preprocessor/base.py | 41934f710d117d524b4a0bfdee7e9b845a56d422 | import os
import logging
import numpy as np
from tqdm import tqdm
from functools import partial
from multiprocessing.pool import ThreadPool
import pyworld as pw
from util.dsp import Dsp
logger = logging.getLogger(__name__)
def preprocess_one(input_items, module, output_path=''):
input_path, basename = input_items
y = module.load_wav(input_path)
if module.config.dtype == 'wav':
ret = y
elif module.config.dtype == 'melspectrogram':
ret = module.wav2mel(y)
elif module.config.dtype == 'f0':
f0, sp, ap = pw.wav2world(y.astype(np.float64), module.config.sample_rate)
ret = f0
if (f0 == 0).all():
logger.warn(f'f0 returns all zeros: {input_path}')
elif module.config.dtype == 's3prl_spec':
ret = module.wav2s3prl_spec(y)
if ret is None:
logger.warn(f'S3PRL spectrogram returns NoneType: {input_path}')
elif module.config.dtype == 'resemblyzer':
y = resemblyzer.preprocess_wav(input_path)
ret = module.wav2resemblyzer(y)
else:
logger.warn(f'Not implement feature type {module.config.dtype}')
if output_path == '':
return ret
else:
if type(ret) is np.ndarray:
np.save(os.path.join(output_path, f'{basename}.npy'), ret)
else:
logger.warn(f'Feature {module.config.dtype} is not saved: {input_path}.')
return 1
class BasePreproceccor():
def __init__(self, config):
self.dsp_modules = {}
for feat in config.feat_to_preprocess:
self.dsp_modules[feat] = Dsp(config.feat[feat])
def preprocess(self, input_path, output_path, feat, njobs):
file_dict = self.gen_file_dict(input_path)
logger.info(f'Starting to preprocess from {input_path}.')
self.preprocess_from_file_dict(file_dict=file_dict, output_path=output_path, feat=feat, njobs=njobs)
logger.info(f'Saving processed file to {output_path}.')
return
def preprocess_from_file_dict(self, file_dict, output_path, feat, njobs):
os.makedirs(os.path.join(output_path, feat), exist_ok=True)
module = self.dsp_modules[feat]
task = partial(preprocess_one, module=module, output_path=os.path.join(output_path, feat))
with ThreadPool(njobs) as pool:
_ = list(tqdm(pool.imap(task, file_dict.items()), total=len(file_dict), desc=f'Preprocessing '))
def gen_file_dict(self, input_path):
raise NotImplementedError
| [((197, 224), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (214, 224), False, 'import logging\n'), ((1582, 1604), 'util.dsp.Dsp', 'Dsp', (['config.feat[feat]'], {}), '(config.feat[feat])\n', (1585, 1604), False, 'from util.dsp import Dsp\n'), ((2074, 2105), 'os.path.join', 'os.path.join', (['output_path', 'feat'], {}), '(output_path, feat)\n', (2086, 2105), False, 'import os\n'), ((2274, 2291), 'multiprocessing.pool.ThreadPool', 'ThreadPool', (['njobs'], {}), '(njobs)\n', (2284, 2291), False, 'from multiprocessing.pool import ThreadPool\n'), ((1240, 1284), 'os.path.join', 'os.path.join', (['output_path', 'f"""{basename}.npy"""'], {}), "(output_path, f'{basename}.npy')\n", (1252, 1284), False, 'import os\n'), ((2228, 2259), 'os.path.join', 'os.path.join', (['output_path', 'feat'], {}), '(output_path, feat)\n', (2240, 2259), False, 'import os\n')] |
fjruizruano/SatIntExt | divsum_stats.py | 90b39971ee6ea3d7cfa63fbb906df3df714a5012 | #!/usr/bin/python
import sys
from subprocess import call
print "divsum_count.py ListOfDivsumFiles\n"
try:
files = sys.argv[1]
except:
files = raw_input("Introduce RepeatMasker's list of Divsum files with library size (tab separated): ")
files = open(files).readlines()
to_join = []
header = "Coverage for each repeat class and divergence (Kimura)\n"
results = {}
for line in files:
line = line.split("\t")
file = line[0]
size = int(line[1])
data = open(file).readlines()
matrix_start = data.index(header)
matrix = data[matrix_start+1:]
li= []
names_line = matrix[0]
info = names_line.split()
for fam in info:
li.append([fam])
info_len = len(li)
for line in matrix[1:]:
info = line.split()
for i in range(0,info_len):
li[i].append(info[i])
out = open(file+".counts","w")
out.write("Sequence\tAbundance\n")
stats = open(file+".stats","w")
stats.write("Sequence\tDivergence\tTotalAbundance\tMaxAbundance\tMaxPeak\tRPS\tDIVPEAK\n")
for el in li[1:]:
numbers = el[1:]
numbers = [int(x) for x in numbers]
numbers_prop = [1.0*x/size for x in numbers]
prop_dict = {}
prop_li = []
for prop in range(0,len(numbers_prop)):
prop_dict[prop] = numbers_prop[prop]
prop_li.append(numbers_prop[prop])
prop_dict_sorted = sorted(prop_dict.items(), key=lambda x: x[1], reverse=True)
total = sum(numbers_prop)
top = prop_dict_sorted[0]
top_div = top[0]
top_ab = top[1]
peak = []
if top_div >= 2:
for div in range(top_div-2,top_div+3):
peak.append(prop_dict[div])
else:
for div in range(0,5):
peak.append(prop_dict[div])
sum_peak = sum(peak)
rps = sum_peak/total
divpeak = top_div
out.write(el[0]+"\t"+str(sum(numbers))+"\n")
all_divs = []
for d in li[0][1:]:
all_divs.append(int(d)+0.5)
div_sumproduct = 0
for x,y in zip(all_divs,prop_li):
div_sumproduct += x * y
divergence = div_sumproduct/total
data = "%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (el[0],str(divergence),str(total),str(top_ab),str(sum_peak),str(rps),str(divpeak))
stats.write(data)
data2 = "%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (file, str(divergence),str(total),str(top_ab),str(sum_peak),str(rps),str(divpeak))
if el[0] in results:
results[el[0]].append(data2)
else:
results[el[0]] = [data2]
out.close()
stats.close()
to_join.append(file+".counts")
out = open("results.txt", "w")
for el in sorted(results):
info = results[el]
out.write("%s\tDivergence\tTotalAbundance\tMaxAbundance\tMaxPeak\tRPS\tDIVPEAK\n" % (el))
for i in info:
out.write(i)
out.write("\n\n\n")
out.close()
call("join_multiple_lists.py %s" % (" ".join(to_join)), shell=True)
| [] |
onyxfish/fever | agatecharts/charts/__init__.py | 8aef0cd4adff7fdde1f5950ffb1d01db9137e3b7 | #!/usr/bin/env python
from agatecharts.charts.bars import Bars
from agatecharts.charts.columns import Columns
from agatecharts.charts.lines import Lines
from agatecharts.charts.scatter import Scatter
| [] |
rossm6/accounts | users/views.py | 74633ce4038806222048d85ef9dfe97a957a6a71 | from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.contrib.auth.views import (LoginView, PasswordResetConfirmView,
PasswordResetView)
from django.http import HttpResponse, HttpResponseNotAllowed
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import CreateView, DeleteView, UpdateView
from users.forms import (SignInForm, SignUpForm, UserPasswordResetForm,
UserProfileForm, UserSetPasswordForm)
from users.mixins import LockDuringEditMixin
from users.models import Lock, UserSession
class SignUp(CreateView):
model = User
form_class = SignUpForm
template_name = "registration/signup.html"
success_url = reverse_lazy("dashboard:dashboard")
class SignIn(LoginView):
form_class = SignInForm
class Profile(LoginRequiredMixin, LockDuringEditMixin, UpdateView):
model = User
form_class = UserProfileForm
template_name = "registration/profile.html"
success_url = reverse_lazy("users:profile")
def get_object(self):
return self.request.user
def form_valid(self, form):
response = super().form_valid(form)
update_session_auth_hash(self.request, self.object) # this will delete the current user session
# and create anew
UserSession.objects.create(user=self.object, session_id=self.request.session.session_key)
return response
class UserPasswordResetView(PasswordResetView):
form_class = UserPasswordResetForm
class UserPasswordResetConfirmView(PasswordResetConfirmView):
form_class = UserSetPasswordForm
def unlock(request, pk):
if request.method == "POST":
lock = Lock.objects.filter(pk=pk).delete()
return HttpResponse('')
return HttpResponseNotAllowed(["POST"])
| [((857, 892), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""dashboard:dashboard"""'], {}), "('dashboard:dashboard')\n", (869, 892), False, 'from django.urls import reverse_lazy\n'), ((1134, 1163), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""users:profile"""'], {}), "('users:profile')\n", (1146, 1163), False, 'from django.urls import reverse_lazy\n'), ((1897, 1929), 'django.http.HttpResponseNotAllowed', 'HttpResponseNotAllowed', (["['POST']"], {}), "(['POST'])\n", (1919, 1929), False, 'from django.http import HttpResponse, HttpResponseNotAllowed\n'), ((1309, 1360), 'django.contrib.auth.update_session_auth_hash', 'update_session_auth_hash', (['self.request', 'self.object'], {}), '(self.request, self.object)\n', (1333, 1360), False, 'from django.contrib.auth import update_session_auth_hash\n'), ((1439, 1533), 'users.models.UserSession.objects.create', 'UserSession.objects.create', ([], {'user': 'self.object', 'session_id': 'self.request.session.session_key'}), '(user=self.object, session_id=self.request.\n session.session_key)\n', (1465, 1533), False, 'from users.models import Lock, UserSession\n'), ((1869, 1885), 'django.http.HttpResponse', 'HttpResponse', (['""""""'], {}), "('')\n", (1881, 1885), False, 'from django.http import HttpResponse, HttpResponseNotAllowed\n'), ((1818, 1844), 'users.models.Lock.objects.filter', 'Lock.objects.filter', ([], {'pk': 'pk'}), '(pk=pk)\n', (1837, 1844), False, 'from users.models import Lock, UserSession\n')] |
adidas/m3d-api | test/core/s3_table_test_base.py | 755d676452e4b10075fa65f9acfdbf30a6ee828e | import os
from test.core.emr_system_unit_test_base import EMRSystemUnitTestBase
from test.core.tconx_helper import TconxHelper
class S3TableTestBase(EMRSystemUnitTestBase):
default_tconx = \
"test/resources/s3_table_test_base/tconx-bdp-emr_test-dev-bi_test101.json"
multi_partition_tconx = \
"test/resources/s3_table_test_base/tconx-bdp-emr_test-dev-bi_test102.json"
single_partition_tconx = \
"test/resources/s3_table_test_base/tconx-bdp-emr_test-dev-bi_test103.json"
def env_setup(
self,
tmpdir,
destination_system,
destination_database,
destination_environment,
destination_table
):
"""
This function builds on top of EMRSystemUnitTestBase.env_setup() and adds test-specific tconx file.
:param tmpdir: test case specific temporary directory where configuration files will be created.
:param destination_system: destination system code
:param destination_database: destination database code
:param destination_environment: destination environment code
:param destination_table: destination table code
:return: Function will return several parameters:
m3d_config_path: paths of test-specific config.json. Should be passed to M3D API calls.
scon_emr_path: paths of test-specific scon_emr
tconx_path: paths of test-specific tconx
m3d_config_dict: contents of test-specific config.json as dict
scon_emr_dict: contents of test-specific scon_emr as dict
"""
m3d_config_file, scon_emr_file, m3d_config_dict, scon_emr_dict = \
super(S3TableTestBase, self).env_setup(
tmpdir,
destination_system,
destination_database,
destination_environment
)
# tconx specific part
tconx_file = TconxHelper.setup_tconx_from_file(
m3d_config_dict["tags"]["config"],
destination_system,
destination_database,
destination_environment,
destination_table,
S3TableTestBase.default_tconx
)
return m3d_config_file, scon_emr_file, tconx_file, \
m3d_config_dict, scon_emr_dict
@staticmethod
def assert_one_hql_sent(dump_dir, expected_hql):
generated_files = map(lambda f: os.path.join(dump_dir, f), os.listdir(dump_dir))
hql_files = list(filter(lambda f: os.path.isfile(f) and f.endswith(".hql"), generated_files))
assert len(hql_files) == 1
hql_file = hql_files[0]
with open(hql_file, 'r') as hql_f:
generated_hql = hql_f.read()
generated_hql_processed = generated_hql.strip().lower()
expected_hql_processed = expected_hql.strip().lower()
assert generated_hql_processed == expected_hql_processed
| [((1988, 2181), 'test.core.tconx_helper.TconxHelper.setup_tconx_from_file', 'TconxHelper.setup_tconx_from_file', (["m3d_config_dict['tags']['config']", 'destination_system', 'destination_database', 'destination_environment', 'destination_table', 'S3TableTestBase.default_tconx'], {}), "(m3d_config_dict['tags']['config'],\n destination_system, destination_database, destination_environment,\n destination_table, S3TableTestBase.default_tconx)\n", (2021, 2181), False, 'from test.core.tconx_helper import TconxHelper\n'), ((2500, 2520), 'os.listdir', 'os.listdir', (['dump_dir'], {}), '(dump_dir)\n', (2510, 2520), False, 'import os\n'), ((2473, 2498), 'os.path.join', 'os.path.join', (['dump_dir', 'f'], {}), '(dump_dir, f)\n', (2485, 2498), False, 'import os\n'), ((2564, 2581), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (2578, 2581), False, 'import os\n')] |
BrianWaganerSTL/RocketDBaaS | metrics/serializers.py | d924589188411371842513060a5e08b1be3cdccf | from rest_framework import serializers
from metrics.models import Metrics_Cpu, Metrics_PingServer, Metrics_MountPoint, \
Metrics_CpuLoad, Metrics_PingDb
class Metrics_CpuSerializer(serializers.ModelSerializer):
class Meta:
model = Metrics_Cpu
fields = '__all__'
depth = 0
class Metrics_MountPointSerializer(serializers.ModelSerializer):
class Meta:
model = Metrics_MountPoint
fields = '__all__'
depth = 0
class Metrics_CpuLoadSerializer(serializers.ModelSerializer):
class Meta:
model = Metrics_CpuLoad
fields = '__all__'
depth = 0
class Metrics_PingServerSerializer(serializers.ModelSerializer):
class Meta:
model = Metrics_PingServer
fields = '__all__'
depth = 0
class Metrics_PingDbSerializer(serializers.ModelSerializer):
class Meta:
model = Metrics_PingDb
fields = '__all__'
depth = 0 | [] |
dmayle/rules_sqlc | sqlc/private/sqlc_toolchain.bzl | c465542827a086994e9427e2c792bbc4355c3e70 | # Copyright 2020 Plezentek, Inc. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"//sqlc/private:providers.bzl",
"SQLCRelease",
)
load(
"//sqlc/private/rules_go/lib:platforms.bzl",
"PLATFORMS",
)
def _sqlc_toolchain_impl(ctx):
release = ctx.attr.release[SQLCRelease]
cross_compile = ctx.attr.goos != release.goos or ctx.attr.goarch != release.goarch
return [platform_common.ToolchainInfo(
name = ctx.label.name,
cross_compile = cross_compile,
default_goos = ctx.attr.goos,
default_goarch = ctx.attr.goarch,
actions = struct(),
flags = struct(),
release = release,
)]
sqlc_toolchain = rule(
_sqlc_toolchain_impl,
attrs = {
"goos": attr.string(
mandatory = True,
doc = "Default target OS",
),
"goarch": attr.string(
mandatory = True,
doc = "Default target architecture",
),
"release": attr.label(
mandatory = True,
providers = [SQLCRelease],
cfg = "exec",
doc = "The SQLC release this toolchain is based on",
),
},
doc = "Defines a SQLC toolchain based on a release",
provides = [platform_common.ToolchainInfo],
)
def declare_toolchains(host, release):
host_goos, _, host_goarch = host.partition("_")
for p in PLATFORMS:
toolchain_name = "sqlc_" + p.name
impl_name = toolchain_name + "-impl"
cgo_constraints = (
"@com_plezentek_rules_sqlc//sqlc/toolchain:cgo_off",
"@com_plezentek_rules_sqlc//sqlc/toolchain:cgo_on",
)
constraints = [c for c in p.constraints if c not in cgo_constraints]
sqlc_toolchain(
name = impl_name,
goos = p.goos,
goarch = p.goarch,
release = release,
tags = ["manual"],
visibility = ["//visibility:public"],
)
native.toolchain(
name = toolchain_name,
toolchain_type = "@com_plezentek_rules_sqlc//sqlc:toolchain",
exec_compatible_with = [
"@com_plezentek_rules_sqlc//sqlc/toolchain:" + host_goos,
"@com_plezentek_rules_sqlc//sqlc/toolchain:" + host_goarch,
],
target_compatible_with = constraints,
toolchain = ":" + impl_name,
)
| [] |
nolanzzz/mtmct | configs/tracker_configs/new_test_20e_cam_1_new_short.py | 8bbbc7ff2fa53ab8af424feaac3cf7424b87fff0 | root = {
"general" : {
"display_viewer" : False,
#The visible GPUS will be restricted to the numbers listed here. The pytorch (cuda:0) numeration will start at 0
#This is a trick to get everything onto the wanted gpus because just setting cuda:4 in the function calls will
#not work for mmdetection. There will still be things on gpu cuda:0.
"cuda_visible_devices" : "1",
"save_track_results" : True
},
"data" : {
# To increase the speed while developing an specific interval of all frames can be set.
"selection_interval" : [0,10000],
"source" : {
"base_folder" : "/u40/zhanr110/MTA_ext_short/test",
# "base_folder" : "/Users/nolanzhang/Projects/mtmct/data/MTA_ext_short/test",
"cam_ids" : [1]
}
},
"detector" : {
# "mmdetection_config" : "detectors/mmdetection/configs/faster_rcnn_r50_fpn_1x_gta.py",
"mmdetection_config" : "detectors/mmdetection/configs/mta/faster_rcnn_r50_mta.py",
# "mmdetection_checkpoint_file" : "work_dirs/detector/faster_rcnn_gta22.07_epoch_5.pth",
"mmdetection_checkpoint_file" : "detectors/mmdetection/work_dirs/GtaDataset_30e/epoch_20.pth",
"device" : "cuda:0",
#Remove all detections with a confidence less than min_confidence
"min_confidence" : 0.8,
},
"feature_extractor" : {
"feature_extractor_name" : "abd_net_extractor"
,"reid_strong_extractor": {
"reid_strong_baseline_config": "feature_extractors/reid_strong_baseline/configs/softmax_triplet.yml",
"checkpoint_file": "work_dirs/feature_extractor/strong_reid_baseline/resnet50_model_reid_GTA_softmax_triplet.pth",
"device": "cuda:0,1"
,"visible_device" : "0,1"}
,"abd_net_extractor" : dict(abd_dan=['cam', 'pam'], abd_dan_no_head=False, abd_dim=1024, abd_np=2, adam_beta1=0.9,
adam_beta2=0.999, arch='resnet50', branches=['global', 'abd'], compatibility=False, criterion='htri',
cuhk03_classic_split=False, cuhk03_labeled=False, dan_dan=[], dan_dan_no_head=False, dan_dim=1024,
data_augment=['crop,random-erase'], day_only=False, dropout=0.5, eval_freq=5, evaluate=False,
fixbase=False, fixbase_epoch=10, flip_eval=False, gamma=0.1, global_dim=1024,
global_max_pooling=False, gpu_devices='1', height=384, htri_only=False, label_smooth=True,
lambda_htri=0.1, lambda_xent=1, lr=0.0003, margin=1.2, max_epoch=80, min_height=-1,
momentum=0.9, night_only=False, np_dim=1024, np_max_pooling=False, np_np=2, np_with_global=False,
num_instances=4, of_beta=1e-06, of_position=['before', 'after', 'cam', 'pam', 'intermediate'],
of_start_epoch=23, open_layers=['classifier'], optim='adam', ow_beta=0.001,
pool_tracklet_features='avg', print_freq=10, resume='', rmsprop_alpha=0.99
, load_weights='work_dirs/feature_extractor/abd-net/checkpoint_ep30_non_clean.pth.tar'
# , load_weights='work_dirs/feature_extractor/abd-net/resnet50-19c8e357.pth'
, root='work_dirs/datasets'
, sample_method='evenly'
, save_dir='work_dirs/feature_extractor/abd-net/log/eval-resnet50'
, seed=1, seq_len=15,
sgd_dampening=0, sgd_nesterov=False, shallow_cam=True, source_names=['mta_ext'], split_id=0,
start_epoch=0, start_eval=0, stepsize=[20, 40], target_names=['market1501'],
test_batch_size=100, train_batch_size=64, train_sampler='', use_avai_gpus=False, use_cpu=False,
use_metric_cuhk03=False, use_of=True, use_ow=True, visualize_ranks=False, weight_decay=0.0005,
width=128, workers=4)
},
"tracker" : {
"type" : "DeepSort",
"nn_budget" : 100
}
}
| [] |
cherub96/voc | tests/structures/test_generator.py | 2692d56059e4d4a52768270feaf5179b23609b04 | from ..utils import TranspileTestCase
class GeneratorTests(TranspileTestCase):
def test_simple_generator(self):
self.assertCodeExecution("""
def multiplier(first, second):
y = first * second
yield y
y *= second
yield y
y *= second
yield y
y *= second
yield y
print(list(multiplier(1, 20)))
""")
def test_loop_generator(self):
self.assertCodeExecution("""
def fizz_buzz(start, stop):
for i in range(start, stop):
found = False
if i % 2 == 0:
yield 'fizz'
found = True
if i % 3 == 0:
yield 'buzz'
found = True
if not found:
yield i
print(list(fizz_buzz(1, 20)))
""")
| [] |
hdoupe/OG-USA | ogusa/tax.py | f7e4d600b7a2993c7d1b53e23bfe29cfccaea770 | '''
------------------------------------------------------------------------
Functions for taxes in the steady state and along the transition path.
------------------------------------------------------------------------
'''
# Packages
import numpy as np
from ogusa import utils
'''
------------------------------------------------------------------------
Functions
------------------------------------------------------------------------
'''
def replacement_rate_vals(nssmat, wss, factor_ss, j, p):
'''
Calculates replacement rate values for the social security system.
Args:
nssmat (Numpy array): initial guess at labor supply, size = SxJ
new_w (scalar): steady state real wage rate
factor_ss (scalar): scaling factor converting model units to
dollars
j (int): index of lifetime income group
p (OG-USA Specifications object): model parameters
Returns:
theta (Numpy array): social security replacement rate value for
lifetime income group j
'''
if j is not None:
e = p.e[:, j]
else:
e = p.e
# adjust number of calendar years AIME computed from int model periods
equiv_periods = int(round((p.S / 80.0) * p.AIME_num_years)) - 1
if e.ndim == 2:
dim2 = e.shape[1]
else:
dim2 = 1
earnings = (e * (wss * nssmat * factor_ss)).reshape(p.S, dim2)
# get highest earning years for number of years AIME computed from
highest_earn =\
(-1.0 * np.sort(-1.0 * earnings[:p.retire[-1], :],
axis=0))[:equiv_periods]
AIME = highest_earn.sum(0) / ((12.0 * (p.S / 80.0)) * equiv_periods)
PIA = np.zeros(dim2)
# Compute level of replacement using AIME brackets and PIA rates
for j in range(dim2):
if AIME[j] < p.AIME_bkt_1:
PIA[j] = p.PIA_rate_bkt_1 * AIME[j]
elif AIME[j] < p.AIME_bkt_2:
PIA[j] = (p.PIA_rate_bkt_1 * p.AIME_bkt_1 +
p.PIA_rate_bkt_2 * (AIME[j] - p.AIME_bkt_1))
else:
PIA[j] = (p.PIA_rate_bkt_1 * p.AIME_bkt_1 +
p.PIA_rate_bkt_2 * (p.AIME_bkt_2 - p.AIME_bkt_1) +
p.PIA_rate_bkt_3 * (AIME[j] - p.AIME_bkt_2))
# Set the maximum monthly replacment rate from SS benefits tables
PIA[PIA > p.PIA_maxpayment] = p.PIA_maxpayment
if p.PIA_minpayment != 0.0:
PIA[PIA < p.PIA_minpayment] = p.PIA_minpayment
theta = (PIA * (12.0 * p.S / 80.0)) / (factor_ss * wss)
return theta
def ETR_wealth(b, h_wealth, m_wealth, p_wealth):
r'''
Calculates the effective tax rate on wealth.
.. math::
T_{j,s,t}^{w} = \frac{h^{w}p_{w}b_{j,s,t}}{h^{w}b_{j,s,t} + m^{w}}
Args:
b (Numpy array): savings
h_wealth (scalar): parameter of wealth tax function
p_wealth (scalar): parameter of wealth tax function
m_wealth (scalar): parameter of wealth tax function
Returns:
tau_w (Numpy array): effective tax rate on wealth, size = SxJ
'''
tau_w = (p_wealth * h_wealth * b) / (h_wealth * b + m_wealth)
return tau_w
def MTR_wealth(b, h_wealth, m_wealth, p_wealth):
r'''
Calculates the marginal tax rate on wealth from the wealth tax.
.. math::
\frac{\partial T_{j,s,t}^{w}}{\partial b_{j,s,t}} = \frac{h^{w}m^{w}p_{w}}{(b_{j,s,t}h^{w}m^{w})^{2}}
Args:
b (Numpy array): savings
h_wealth (scalar): parameter of wealth tax function
p_wealth (scalar): parameter of wealth tax function
m_wealth (scalar): parameter of wealth tax function
Returns:
tau_prime (Numpy array): marginal tax rate on wealth, size = SxJ
'''
tau_prime = ((b * h_wealth * m_wealth * p_wealth) /
((b * h_wealth + m_wealth) ** 2) +
ETR_wealth(b, h_wealth, m_wealth, p_wealth))
return tau_prime
def ETR_income(r, w, b, n, factor, e, etr_params, p):
'''
Calculates effective personal income tax rate.
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
factor (scalar): scaling factor converting model units to
dollars
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
tau (Numpy array): effective tax rate on total income
'''
X = (w * e * n) * factor
Y = (r * b) * factor
X2 = X ** 2
Y2 = Y ** 2
income = X + Y
income2 = income ** 2
if p.tax_func_type == 'GS':
phi0 = np.squeeze(etr_params[..., 0])
phi1 = np.squeeze(etr_params[..., 1])
phi2 = np.squeeze(etr_params[..., 2])
tau = ((phi0 * (income - ((income ** -phi1) + phi2) **
(-1 / phi1))) / income)
elif p.tax_func_type == 'DEP_totalinc':
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
max_income = np.squeeze(etr_params[..., 4])
min_income = np.squeeze(etr_params[..., 5])
shift_income = np.squeeze(etr_params[..., 8])
shift = np.squeeze(etr_params[..., 10])
tau_income = (((max_income - min_income) *
(A * income2 + B * income) /
(A * income2 + B * income + 1)) + min_income)
tau = tau_income + shift_income + shift
else: # DEP or linear
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
C = np.squeeze(etr_params[..., 2])
D = np.squeeze(etr_params[..., 3])
max_x = np.squeeze(etr_params[..., 4])
min_x = np.squeeze(etr_params[..., 5])
max_y = np.squeeze(etr_params[..., 6])
min_y = np.squeeze(etr_params[..., 7])
shift_x = np.squeeze(etr_params[..., 8])
shift_y = np.squeeze(etr_params[..., 9])
shift = np.squeeze(etr_params[..., 10])
share = np.squeeze(etr_params[..., 11])
tau_x = ((max_x - min_x) * (A * X2 + B * X) /
(A * X2 + B * X + 1) + min_x)
tau_y = ((max_y - min_y) * (C * Y2 + D * Y) /
(C * Y2 + D * Y + 1) + min_y)
tau = (((tau_x + shift_x) ** share) *
((tau_y + shift_y) ** (1 - share))) + shift
return tau
def MTR_income(r, w, b, n, factor, mtr_capital, e, etr_params,
mtr_params, p):
r'''
Generates the marginal tax rate on labor income for households.
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
factor (scalar): scaling factor converting model units to
dollars
mtr_capital (bool): whether to compute the marginal tax rate on
capital income or labor income
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
tau (Numpy array): marginal tax rate on income source
'''
X = (w * e * n) * factor
Y = (r * b) * factor
X2 = X ** 2
Y2 = Y ** 2
income = X + Y
income2 = income ** 2
if p.tax_func_type == 'GS':
if p.analytical_mtrs:
phi0 = np.squeeze(etr_params[..., 0])
phi1 = np.squeeze(etr_params[..., 1])
phi2 = np.squeeze(etr_params[..., 2])
else:
phi0 = np.squeeze(mtr_params[..., 0])
phi1 = np.squeeze(mtr_params[..., 1])
phi2 = np.squeeze(mtr_params[..., 2])
tau = (phi0*(1 - (income ** (-phi1 - 1) *
((income ** -phi1) + phi2) **
((-1 - phi1) / phi1))))
elif p.tax_func_type == 'DEP_totalinc':
if p.analytical_mtrs:
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
max_income = np.squeeze(etr_params[..., 4])
min_income = np.squeeze(etr_params[..., 5])
shift_income = np.squeeze(etr_params[..., 8])
shift = np.squeeze(etr_params[..., 10])
d_etr = ((max_income - min_income) * ((2 * A * income + B) /
((A * income2 + B * income + 1) ** 2)))
etr = (((max_income - min_income) *
((A * income2 + B * income) /
(A * income2 + B * income + 1)) + min_income) +
shift_income + shift)
tau = (d_etr * income) + (etr)
else:
A = np.squeeze(mtr_params[..., 0])
B = np.squeeze(mtr_params[..., 1])
max_income = np.squeeze(mtr_params[..., 4])
min_income = np.squeeze(mtr_params[..., 5])
shift_income = np.squeeze(mtr_params[..., 8])
shift = np.squeeze(mtr_params[..., 10])
tau_income = (((max_income - min_income) *
(A * income2 + B * income) /
(A * income2 + B * income + 1)) + min_income)
tau = tau_income + shift_income + shift
else: # DEP or linear
if p.analytical_mtrs:
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
C = np.squeeze(etr_params[..., 2])
D = np.squeeze(etr_params[..., 3])
max_x = np.squeeze(etr_params[..., 4])
min_x = np.squeeze(etr_params[..., 5])
max_y = np.squeeze(etr_params[..., 6])
min_y = np.squeeze(etr_params[..., 7])
shift_x = np.squeeze(etr_params[..., 8])
shift_y = np.squeeze(etr_params[..., 9])
shift = np.squeeze(etr_params[..., 10])
share = np.squeeze(etr_params[..., 11])
tau_x = ((max_x - min_x) * (A * X2 + B * X) /
(A * X2 + B * X + 1) + min_x)
tau_y = ((max_y - min_y) * (C * Y2 + D * Y) /
(C * Y2 + D * Y + 1) + min_y)
etr = (((tau_x + shift_x) ** share) *
((tau_y + shift_y) ** (1 - share))) + shift
if mtr_capital:
d_etr = ((1-share) * ((tau_y + shift_y) ** (-share)) *
(max_y - min_y) * ((2 * C * Y + D) /
((C * Y2 + D * Y + 1)
** 2)) *
((tau_x + shift_x) ** share))
tau = d_etr * income + etr
else:
d_etr = (share * ((tau_x + shift_x) ** (share - 1)) *
(max_x - min_x) * ((2 * A * X + B) /
((A * X2 + B * X + 1)
** 2)) *
((tau_y + shift_y) ** (1 - share)))
tau = d_etr * income + etr
else:
A = np.squeeze(mtr_params[..., 0])
B = np.squeeze(mtr_params[..., 1])
C = np.squeeze(mtr_params[..., 2])
D = np.squeeze(mtr_params[..., 3])
max_x = np.squeeze(mtr_params[..., 4])
min_x = np.squeeze(mtr_params[..., 5])
max_y = np.squeeze(mtr_params[..., 6])
min_y = np.squeeze(mtr_params[..., 7])
shift_x = np.squeeze(mtr_params[..., 8])
shift_y = np.squeeze(mtr_params[..., 9])
shift = np.squeeze(mtr_params[..., 10])
share = np.squeeze(mtr_params[..., 11])
tau_x = ((max_x - min_x) * (A * X2 + B * X) /
(A * X2 + B * X + 1) + min_x)
tau_y = ((max_y - min_y) * (C * Y2 + D * Y) /
(C * Y2 + D * Y + 1) + min_y)
tau = (((tau_x + shift_x) ** share) *
((tau_y + shift_y) ** (1 - share))) + shift
return tau
def get_biz_tax(w, Y, L, K, p, method):
r'''
Finds total business income tax revenue.
.. math::
R_{t}^{b} = \tau_{t}^{b}(Y_{t} - w_{t}L_{t}) - \tau_{t}^{b}\delta_{t}^{\tau}K_{t}^{\tau}
Args:
r (array_like): real interest rate
Y (array_like): aggregate output
L (array_like): aggregate labor demand
K (array_like): aggregate capital demand
Returns:
business_revenue (array_like): aggregate business tax revenue
'''
if method == 'SS':
delta_tau = p.delta_tau[-1]
tau_b = p.tau_b[-1]
else:
delta_tau = p.delta_tau[:p.T]
tau_b = p.tau_b[:p.T]
business_revenue = tau_b * (Y - w * L) - tau_b * delta_tau * K
return business_revenue
def net_taxes(r, w, b, n, bq, factor, tr, theta, t, j, shift, method,
e, etr_params, p):
'''
Calculate net taxes paid for each household.
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
bq (Numpy array): bequests received
factor (scalar): scaling factor converting model units to
dollars
tr (Numpy array): government transfers to the household
theta (Numpy array): social security replacement rate value for
lifetime income group j
t (int): time period
j (int): index of lifetime income group
shift (bool): whether computing for periods 0--s or 1--(s+1),
=True for 1--(s+1)
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
net_tax (Numpy array): net taxes paid for each household
'''
T_I = income_tax_liab(r, w, b, n, factor, t, j, method, e, etr_params, p)
pension = pension_amount(w, n, theta, t, j, shift, method, e, p)
T_BQ = bequest_tax_liab(r, b, bq, t, j, method, p)
T_W = wealth_tax_liab(r, b, t, j, method, p)
net_tax = T_I - pension + T_BQ + T_W - tr
return net_tax
def income_tax_liab(r, w, b, n, factor, t, j, method, e, etr_params, p):
'''
Calculate income and payroll tax liability for each household
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
factor (scalar): scaling factor converting model units to
dollars
t (int): time period
j (int): index of lifetime income group
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
T_I (Numpy array): total income and payroll taxes paid for each
household
'''
if j is not None:
if method == 'TPI':
if b.ndim == 2:
r = r.reshape(r.shape[0], 1)
w = w.reshape(w.shape[0], 1)
else:
if method == 'TPI':
r = utils.to_timepath_shape(r)
w = utils.to_timepath_shape(w)
income = r * b + w * e * n
labor_income = w * e * n
T_I = ETR_income(r, w, b, n, factor, e, etr_params, p) * income
if method == 'SS':
T_P = p.tau_payroll[-1] * labor_income
elif method == 'TPI':
length = w.shape[0]
if len(b.shape) == 1:
T_P = p.tau_payroll[t: t + length] * labor_income
elif len(b.shape) == 2:
T_P = (p.tau_payroll[t: t + length].reshape(length, 1) *
labor_income)
else:
T_P = (p.tau_payroll[t:t + length].reshape(length, 1, 1) *
labor_income)
elif method == 'TPI_scalar':
T_P = p.tau_payroll[0] * labor_income
income_payroll_tax_liab = T_I + T_P
return income_payroll_tax_liab
def pension_amount(w, n, theta, t, j, shift, method, e, p):
'''
Calculate public pension benefit amounts for each household.
Args:
w (array_like): real wage rate
n (Numpy array): labor supply
theta (Numpy array): social security replacement rate value for
lifetime income group j
t (int): time period
j (int): index of lifetime income group
shift (bool): whether computing for periods 0--s or 1--(s+1),
=True for 1--(s+1)
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
e (Numpy array): effective labor units
p (OG-USA Specifications object): model parameters
Returns:
pension (Numpy array): pension amount for each household
'''
if j is not None:
if method == 'TPI':
if n.ndim == 2:
w = w.reshape(w.shape[0], 1)
else:
if method == 'TPI':
w = utils.to_timepath_shape(w)
pension = np.zeros_like(n)
if method == 'SS':
# Depending on if we are looking at b_s or b_s+1, the
# entry for retirement will change (it shifts back one).
# The shift boolean makes sure we start replacement rates
# at the correct age.
if shift is False:
pension[p.retire[-1]:] = theta * w
else:
pension[p.retire[-1] - 1:] = theta * w
elif method == 'TPI':
length = w.shape[0]
if not shift:
# retireTPI is different from retire, because in TP income
# we are counting backwards with different length lists.
# This will always be the correct location of retirement,
# depending on the shape of the lists.
retireTPI = (p.retire[t: t + length] - p.S)
else:
retireTPI = (p.retire[t: t + length] - 1 - p.S)
if len(n.shape) == 1:
if not shift:
retireTPI = p.retire[t] - p.S
else:
retireTPI = p.retire[t] - 1 - p.S
pension[retireTPI:] = (
theta[j] * p.replacement_rate_adjust[t] * w[retireTPI:])
elif len(n.shape) == 2:
for tt in range(pension.shape[0]):
pension[tt, retireTPI[tt]:] = (
theta * p.replacement_rate_adjust[t + tt] * w[tt])
else:
for tt in range(pension.shape[0]):
pension[tt, retireTPI[tt]:, :] = (
theta.reshape(1, p.J) *
p.replacement_rate_adjust[t + tt] * w[tt])
elif method == 'TPI_scalar':
# The above methods won't work if scalars are used. This option
# is only called by the SS_TPI_firstdoughnutring function in TPI.
pension = theta * p.replacement_rate_adjust[0] * w
return pension
def wealth_tax_liab(r, b, t, j, method, p):
'''
Calculate wealth tax liability for each household.
Args:
r (array_like): real interest rate
b (Numpy array): savings
t (int): time period
j (int): index of lifetime income group
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
p (OG-USA Specifications object): model parameters
Returns:
T_W (Numpy array): wealth tax liability for each household
'''
if j is not None:
if method == 'TPI':
if b.ndim == 2:
r = r.reshape(r.shape[0], 1)
else:
if method == 'TPI':
r = utils.to_timepath_shape(r)
if method == 'SS':
T_W = (ETR_wealth(b, p.h_wealth[-1], p.m_wealth[-1],
p.p_wealth[-1]) * b)
elif method == 'TPI':
length = r.shape[0]
if len(b.shape) == 1:
T_W = (ETR_wealth(b, p.h_wealth[t:t + length],
p.m_wealth[t:t + length],
p.p_wealth[t:t + length]) * b)
elif len(b.shape) == 2:
T_W = (ETR_wealth(b, p.h_wealth[t:t + length],
p.m_wealth[t:t + length],
p.p_wealth[t:t + length]) * b)
else:
T_W = (ETR_wealth(
b, p.h_wealth[t:t + length].reshape(length, 1, 1),
p.m_wealth[t:t + length].reshape(length, 1, 1),
p.p_wealth[t:t + length].reshape(length, 1, 1)) * b)
elif method == 'TPI_scalar':
T_W = (ETR_wealth(b, p.h_wealth[0], p.m_wealth[0],
p.p_wealth[0]) * b)
return T_W
def bequest_tax_liab(r, b, bq, t, j, method, p):
'''
Calculate liability due from taxes on bequests for each household.
Args:
r (array_like): real interest rate
b (Numpy array): savings
bq (Numpy array): bequests received
t (int): time period
j (int): index of lifetime income group
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
p (OG-USA Specifications object): model parameters
Returns:
T_BQ (Numpy array): bequest tax liability for each household
'''
if j is not None:
lambdas = p.lambdas[j]
if method == 'TPI':
if b.ndim == 2:
r = r.reshape(r.shape[0], 1)
else:
lambdas = np.transpose(p.lambdas)
if method == 'TPI':
r = utils.to_timepath_shape(r)
if method == 'SS':
T_BQ = p.tau_bq[-1] * bq
elif method == 'TPI':
length = r.shape[0]
if len(b.shape) == 1:
T_BQ = p.tau_bq[t:t + length] * bq
elif len(b.shape) == 2:
T_BQ = p.tau_bq[t:t + length].reshape(length, 1) * bq / lambdas
else:
T_BQ = p.tau_bq[t:t + length].reshape(length, 1, 1) * bq
elif method == 'TPI_scalar':
# The above methods won't work if scalars are used. This option
# is only called by the SS_TPI_firstdoughnutring function in TPI.
T_BQ = p.tau_bq[0] * bq
return T_BQ
| [((1684, 1698), 'numpy.zeros', 'np.zeros', (['dim2'], {}), '(dim2)\n', (1692, 1698), True, 'import numpy as np\n'), ((17058, 17074), 'numpy.zeros_like', 'np.zeros_like', (['n'], {}), '(n)\n', (17071, 17074), True, 'import numpy as np\n'), ((4703, 4733), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 0]'], {}), '(etr_params[..., 0])\n', (4713, 4733), True, 'import numpy as np\n'), ((4749, 4779), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 1]'], {}), '(etr_params[..., 1])\n', (4759, 4779), True, 'import numpy as np\n'), ((4795, 4825), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 2]'], {}), '(etr_params[..., 2])\n', (4805, 4825), True, 'import numpy as np\n'), ((21354, 21377), 'numpy.transpose', 'np.transpose', (['p.lambdas'], {}), '(p.lambdas)\n', (21366, 21377), True, 'import numpy as np\n'), ((1509, 1559), 'numpy.sort', 'np.sort', (['(-1.0 * earnings[:p.retire[-1], :])'], {'axis': '(0)'}), '(-1.0 * earnings[:p.retire[-1], :], axis=0)\n', (1516, 1559), True, 'import numpy as np\n'), ((4993, 5023), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 0]'], {}), '(etr_params[..., 0])\n', (5003, 5023), True, 'import numpy as np\n'), ((5036, 5066), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 1]'], {}), '(etr_params[..., 1])\n', (5046, 5066), True, 'import numpy as np\n'), ((5088, 5118), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 4]'], {}), '(etr_params[..., 4])\n', (5098, 5118), True, 'import numpy as np\n'), ((5140, 5170), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 5]'], {}), '(etr_params[..., 5])\n', (5150, 5170), True, 'import numpy as np\n'), ((5194, 5224), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 8]'], {}), '(etr_params[..., 8])\n', (5204, 5224), True, 'import numpy as np\n'), ((5241, 5272), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 10]'], {}), '(etr_params[..., 10])\n', (5251, 5272), True, 'import numpy as np\n'), ((5532, 5562), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 0]'], {}), '(etr_params[..., 0])\n', (5542, 5562), True, 'import numpy as np\n'), ((5575, 5605), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 1]'], {}), '(etr_params[..., 1])\n', (5585, 5605), True, 'import numpy as np\n'), ((5618, 5648), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 2]'], {}), '(etr_params[..., 2])\n', (5628, 5648), True, 'import numpy as np\n'), ((5661, 5691), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 3]'], {}), '(etr_params[..., 3])\n', (5671, 5691), True, 'import numpy as np\n'), ((5708, 5738), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 4]'], {}), '(etr_params[..., 4])\n', (5718, 5738), True, 'import numpy as np\n'), ((5755, 5785), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 5]'], {}), '(etr_params[..., 5])\n', (5765, 5785), True, 'import numpy as np\n'), ((5802, 5832), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 6]'], {}), '(etr_params[..., 6])\n', (5812, 5832), True, 'import numpy as np\n'), ((5849, 5879), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 7]'], {}), '(etr_params[..., 7])\n', (5859, 5879), True, 'import numpy as np\n'), ((5898, 5928), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 8]'], {}), '(etr_params[..., 8])\n', (5908, 5928), True, 'import numpy as np\n'), ((5947, 5977), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 9]'], {}), '(etr_params[..., 9])\n', (5957, 5977), True, 'import numpy as np\n'), ((5994, 6025), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 10]'], {}), '(etr_params[..., 10])\n', (6004, 6025), True, 'import numpy as np\n'), ((6042, 6073), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 11]'], {}), '(etr_params[..., 11])\n', (6052, 6073), True, 'import numpy as np\n'), ((7413, 7443), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 0]'], {}), '(etr_params[..., 0])\n', (7423, 7443), True, 'import numpy as np\n'), ((7463, 7493), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 1]'], {}), '(etr_params[..., 1])\n', (7473, 7493), True, 'import numpy as np\n'), ((7513, 7543), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 2]'], {}), '(etr_params[..., 2])\n', (7523, 7543), True, 'import numpy as np\n'), ((7577, 7607), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 0]'], {}), '(mtr_params[..., 0])\n', (7587, 7607), True, 'import numpy as np\n'), ((7627, 7657), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 1]'], {}), '(mtr_params[..., 1])\n', (7637, 7657), True, 'import numpy as np\n'), ((7677, 7707), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 2]'], {}), '(mtr_params[..., 2])\n', (7687, 7707), True, 'import numpy as np\n'), ((15225, 15251), 'ogusa.utils.to_timepath_shape', 'utils.to_timepath_shape', (['r'], {}), '(r)\n', (15248, 15251), False, 'from ogusa import utils\n'), ((15268, 15294), 'ogusa.utils.to_timepath_shape', 'utils.to_timepath_shape', (['w'], {}), '(w)\n', (15291, 15294), False, 'from ogusa import utils\n'), ((17016, 17042), 'ogusa.utils.to_timepath_shape', 'utils.to_timepath_shape', (['w'], {}), '(w)\n', (17039, 17042), False, 'from ogusa import utils\n'), ((19569, 19595), 'ogusa.utils.to_timepath_shape', 'utils.to_timepath_shape', (['r'], {}), '(r)\n', (19592, 19595), False, 'from ogusa import utils\n'), ((21422, 21448), 'ogusa.utils.to_timepath_shape', 'utils.to_timepath_shape', (['r'], {}), '(r)\n', (21445, 21448), False, 'from ogusa import utils\n'), ((7954, 7984), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 0]'], {}), '(etr_params[..., 0])\n', (7964, 7984), True, 'import numpy as np\n'), ((8001, 8031), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 1]'], {}), '(etr_params[..., 1])\n', (8011, 8031), True, 'import numpy as np\n'), ((8057, 8087), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 4]'], {}), '(etr_params[..., 4])\n', (8067, 8087), True, 'import numpy as np\n'), ((8113, 8143), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 5]'], {}), '(etr_params[..., 5])\n', (8123, 8143), True, 'import numpy as np\n'), ((8171, 8201), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 8]'], {}), '(etr_params[..., 8])\n', (8181, 8201), True, 'import numpy as np\n'), ((8222, 8253), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 10]'], {}), '(etr_params[..., 10])\n', (8232, 8253), True, 'import numpy as np\n'), ((8669, 8699), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 0]'], {}), '(mtr_params[..., 0])\n', (8679, 8699), True, 'import numpy as np\n'), ((8716, 8746), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 1]'], {}), '(mtr_params[..., 1])\n', (8726, 8746), True, 'import numpy as np\n'), ((8772, 8802), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 4]'], {}), '(mtr_params[..., 4])\n', (8782, 8802), True, 'import numpy as np\n'), ((8828, 8858), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 5]'], {}), '(mtr_params[..., 5])\n', (8838, 8858), True, 'import numpy as np\n'), ((8886, 8916), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 8]'], {}), '(mtr_params[..., 8])\n', (8896, 8916), True, 'import numpy as np\n'), ((8937, 8968), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 10]'], {}), '(mtr_params[..., 10])\n', (8947, 8968), True, 'import numpy as np\n'), ((9278, 9308), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 0]'], {}), '(etr_params[..., 0])\n', (9288, 9308), True, 'import numpy as np\n'), ((9325, 9355), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 1]'], {}), '(etr_params[..., 1])\n', (9335, 9355), True, 'import numpy as np\n'), ((9372, 9402), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 2]'], {}), '(etr_params[..., 2])\n', (9382, 9402), True, 'import numpy as np\n'), ((9419, 9449), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 3]'], {}), '(etr_params[..., 3])\n', (9429, 9449), True, 'import numpy as np\n'), ((9470, 9500), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 4]'], {}), '(etr_params[..., 4])\n', (9480, 9500), True, 'import numpy as np\n'), ((9521, 9551), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 5]'], {}), '(etr_params[..., 5])\n', (9531, 9551), True, 'import numpy as np\n'), ((9572, 9602), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 6]'], {}), '(etr_params[..., 6])\n', (9582, 9602), True, 'import numpy as np\n'), ((9623, 9653), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 7]'], {}), '(etr_params[..., 7])\n', (9633, 9653), True, 'import numpy as np\n'), ((9676, 9706), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 8]'], {}), '(etr_params[..., 8])\n', (9686, 9706), True, 'import numpy as np\n'), ((9729, 9759), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 9]'], {}), '(etr_params[..., 9])\n', (9739, 9759), True, 'import numpy as np\n'), ((9780, 9811), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 10]'], {}), '(etr_params[..., 10])\n', (9790, 9811), True, 'import numpy as np\n'), ((9832, 9863), 'numpy.squeeze', 'np.squeeze', (['etr_params[..., 11]'], {}), '(etr_params[..., 11])\n', (9842, 9863), True, 'import numpy as np\n'), ((10979, 11009), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 0]'], {}), '(mtr_params[..., 0])\n', (10989, 11009), True, 'import numpy as np\n'), ((11026, 11056), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 1]'], {}), '(mtr_params[..., 1])\n', (11036, 11056), True, 'import numpy as np\n'), ((11073, 11103), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 2]'], {}), '(mtr_params[..., 2])\n', (11083, 11103), True, 'import numpy as np\n'), ((11120, 11150), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 3]'], {}), '(mtr_params[..., 3])\n', (11130, 11150), True, 'import numpy as np\n'), ((11171, 11201), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 4]'], {}), '(mtr_params[..., 4])\n', (11181, 11201), True, 'import numpy as np\n'), ((11222, 11252), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 5]'], {}), '(mtr_params[..., 5])\n', (11232, 11252), True, 'import numpy as np\n'), ((11273, 11303), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 6]'], {}), '(mtr_params[..., 6])\n', (11283, 11303), True, 'import numpy as np\n'), ((11324, 11354), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 7]'], {}), '(mtr_params[..., 7])\n', (11334, 11354), True, 'import numpy as np\n'), ((11377, 11407), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 8]'], {}), '(mtr_params[..., 8])\n', (11387, 11407), True, 'import numpy as np\n'), ((11430, 11460), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 9]'], {}), '(mtr_params[..., 9])\n', (11440, 11460), True, 'import numpy as np\n'), ((11481, 11512), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 10]'], {}), '(mtr_params[..., 10])\n', (11491, 11512), True, 'import numpy as np\n'), ((11533, 11564), 'numpy.squeeze', 'np.squeeze', (['mtr_params[..., 11]'], {}), '(mtr_params[..., 11])\n', (11543, 11564), True, 'import numpy as np\n')] |
baireutherjonas/muse-for-anything | muse_for_anything/api/v1_api/taxonomy_items.py | a625b4fc6468d74fa12886dc465d5694eed86e04 | """Module containing the taxonomy items API endpoints of the v1 API."""
from datetime import datetime
from sqlalchemy.sql.schema import Sequence
from muse_for_anything.db.models.taxonomies import (
Taxonomy,
TaxonomyItem,
TaxonomyItemRelation,
TaxonomyItemVersion,
)
from marshmallow.utils import INCLUDE
from flask_babel import gettext
from muse_for_anything.api.util import template_url_for
from typing import Any, Callable, Dict, List, Optional, Union, cast
from flask.helpers import url_for
from flask.views import MethodView
from sqlalchemy.sql.expression import asc, desc, literal
from sqlalchemy.orm.query import Query
from sqlalchemy.orm import selectinload
from flask_smorest import abort
from http import HTTPStatus
from .root import API_V1
from ..base_models import (
ApiLink,
ApiResponse,
ChangedApiObject,
ChangedApiObjectSchema,
CursorPage,
CursorPageArgumentsSchema,
CursorPageSchema,
DynamicApiResponseSchema,
NewApiObject,
NewApiObjectSchema,
)
from ...db.db import DB
from ...db.pagination import get_page_info
from ...db.models.namespace import Namespace
from ...db.models.ontology_objects import OntologyObjectType, OntologyObjectTypeVersion
from .models.ontology import (
TaxonomyItemRelationPostSchema,
TaxonomyItemRelationSchema,
TaxonomyItemSchema,
TaxonomySchema,
)
from .namespace_helpers import (
query_params_to_api_key,
)
from .taxonomy_helpers import (
action_links_for_taxonomy_item,
action_links_for_taxonomy_item_relation,
create_action_link_for_taxonomy_item_relation_page,
nav_links_for_taxonomy_item,
nav_links_for_taxonomy_item_relation,
taxonomy_item_relation_to_api_link,
taxonomy_item_relation_to_api_response,
taxonomy_item_relation_to_taxonomy_item_relation_data,
taxonomy_item_to_api_link,
taxonomy_item_to_api_response,
taxonomy_item_to_taxonomy_item_data,
taxonomy_to_api_response,
taxonomy_to_items_links,
taxonomy_to_taxonomy_data,
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/"
)
class TaxonomyItemView(MethodView):
"""Endpoint for a single taxonomy item."""
def _check_path_params(self, namespace: str, taxonomy: str, taxonomy_item: str):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
def _get_taxonomy_item(
self, namespace: str, taxonomy: str, taxonomy_item: str
) -> TaxonomyItem:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
found_taxonomy_item: Optional[TaxonomyItem] = (
TaxonomyItem.query.options(selectinload(TaxonomyItem.current_ancestors))
.filter(
TaxonomyItem.id == taxonomy_item_id,
TaxonomyItem.taxonomy_id == taxonomy_id,
)
.first()
)
if (
found_taxonomy_item is None
or found_taxonomy_item.taxonomy.namespace_id != namespace_id
):
abort(HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item not found."))
return found_taxonomy_item # is not None because abort raises exception
def _check_if_taxonomy_modifiable(self, taxonomy: Taxonomy):
if taxonomy.namespace.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Namespace is marked as deleted and cannot be modified further."
),
)
if taxonomy.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy is marked as deleted and cannot be modified further."
),
)
def _check_if_modifiable(self, taxonomy_item: TaxonomyItem):
self._check_if_taxonomy_modifiable(taxonomy=taxonomy_item.taxonomy)
if taxonomy_item.deleted_on is not None:
# cannot modify deleted taxonomy!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item is marked as deleted and cannot be modified further."
),
)
@API_V1.response(DynamicApiResponseSchema(TaxonomyItemSchema()))
def get(self, namespace: str, taxonomy: str, taxonomy_item: str, **kwargs: Any):
"""Get a single taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
embedded: List[ApiResponse] = []
for relation in found_taxonomy_item.current_ancestors:
embedded.append(taxonomy_item_to_api_response(relation.taxonomy_item_source))
for relation in found_taxonomy_item.current_related:
embedded.append(taxonomy_item_relation_to_api_response(relation))
embedded.append(taxonomy_item_to_api_response(relation.taxonomy_item_target))
return ApiResponse(
links=[
ApiLink(
href=url_for(
"api-v1.NamespacesView",
_external=True,
**{"item-count": 50},
sort="name",
),
rel=("first", "page", "collection", "nav"),
resource_type="ont-namespace",
schema=url_for(
"api-v1.ApiSchemaView", schema_id="Namespace", _external=True
),
),
*nav_links_for_taxonomy_item(found_taxonomy_item),
*action_links_for_taxonomy_item(found_taxonomy_item),
],
embedded=embedded,
data=taxonomy_item_to_taxonomy_item_data(found_taxonomy_item),
)
@API_V1.arguments(TaxonomyItemSchema())
@API_V1.response(DynamicApiResponseSchema(NewApiObjectSchema()))
def put(self, data, namespace: str, taxonomy: str, taxonomy_item: str):
"""Update a taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_modifiable(found_taxonomy_item)
taxonomy_item_version = TaxonomyItemVersion(
taxonomy_item=found_taxonomy_item,
version=found_taxonomy_item.current_version.version + 1,
name=data["name"],
description=data.get("description", ""),
sort_key=data.get("sort_key", 10),
)
found_taxonomy_item.current_version = taxonomy_item_version
DB.session.add(found_taxonomy_item)
DB.session.add(taxonomy_item_version)
DB.session.commit()
taxonomy_item_link = taxonomy_item_to_taxonomy_item_data(found_taxonomy_item).self
taxonomy_item_data = taxonomy_item_to_api_response(found_taxonomy_item)
return ApiResponse(
links=[taxonomy_item_link],
embedded=[taxonomy_item_data],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
_external=True,
),
rel=(
"update",
"put",
"ont-taxonomy-item",
),
resource_type="changed",
),
changed=taxonomy_item_link,
),
)
@API_V1.response(DynamicApiResponseSchema(ChangedApiObjectSchema()))
def post(self, namespace: str, taxonomy: str, taxonomy_item: str): # restore action
"""Restore a deleted taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_taxonomy_modifiable(found_taxonomy_item.taxonomy)
changed_links: List[ApiLink] = []
embedded: List[ApiResponse] = []
# only actually restore when not already restored
if found_taxonomy_item.deleted_on is not None:
# restore taxonomy item
deleted_timestamp = found_taxonomy_item.deleted_on
found_taxonomy_item.deleted_on = None
# also restore relations
ancestors: Sequence[TaxonomyItemRelation] = TaxonomyItemRelation.query.filter(
TaxonomyItemRelation.taxonomy_item_target_id == found_taxonomy_item.id,
TaxonomyItemRelation.deleted_on == deleted_timestamp,
).all()
ancestor_ids = set()
relation: TaxonomyItemRelation
for relation in ancestors:
if relation.taxonomy_item_source.deleted_on is not None:
continue # do not restore relations to deleted items
ancestor_ids.add(relation.taxonomy_item_source_id)
relation.deleted_on = None
DB.session.add(relation)
def produces_circle(relation: TaxonomyItemRelation) -> bool:
if relation.taxonomy_item_target_id in ancestor_ids:
return True
for rel in relation.taxonomy_item_target.current_related:
if produces_circle(rel):
return True
return False
children: Sequence[TaxonomyItemRelation] = TaxonomyItemRelation.query.filter(
TaxonomyItemRelation.taxonomy_item_source_id == found_taxonomy_item.id,
TaxonomyItemRelation.deleted_on == deleted_timestamp,
).all()
for relation in children:
if relation.taxonomy_item_target.deleted_on is not None:
continue # do not restore relations to deleted items
if produces_circle(relation):
continue
relation.deleted_on = None
DB.session.add(relation)
DB.session.add(found_taxonomy_item)
DB.session.commit()
# add changed items to be embedded into the response
for relation in found_taxonomy_item.current_ancestors:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_source)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_source)
)
for relation in found_taxonomy_item.current_related:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_target)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_target)
)
taxonomy_item_link = taxonomy_item_to_taxonomy_item_data(found_taxonomy_item).self
taxonomy_item_data = taxonomy_item_to_api_response(found_taxonomy_item)
taxonomy_link = taxonomy_to_taxonomy_data(found_taxonomy_item.taxonomy).self
taxonomy_data = taxonomy_to_api_response(found_taxonomy_item.taxonomy)
return ApiResponse(
links=[taxonomy_item_link, taxonomy_link, *changed_links],
embedded=[taxonomy_item_data, taxonomy_data, *embedded],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
_external=True,
),
rel=(
"restore",
"post",
"ont-taxonomy-item",
),
resource_type="changed",
),
changed=taxonomy_item_link,
),
)
@API_V1.response(DynamicApiResponseSchema(ChangedApiObjectSchema()))
def delete(self, namespace: str, taxonomy: str, taxonomy_item: str): # restore action
"""Delete a taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_taxonomy_modifiable(found_taxonomy_item.taxonomy)
changed_links: List[ApiLink] = []
embedded: List[ApiResponse] = []
# only actually delete when not already deleted
if found_taxonomy_item.deleted_on is None:
# delete taxonomy item
deleted_timestamp = datetime.utcnow()
found_taxonomy_item.deleted_on = deleted_timestamp
# also delete incoming and outgoing relations to remove them
# from relations of existing items
ancestors = found_taxonomy_item.current_ancestors
for relation in found_taxonomy_item.current_ancestors:
relation.deleted_on = deleted_timestamp
DB.session.add(relation)
related = found_taxonomy_item.current_related
for relation in found_taxonomy_item.current_related:
relation.deleted_on = deleted_timestamp
DB.session.add(relation)
DB.session.add(found_taxonomy_item)
DB.session.commit()
# add changed items to be embedded into the response
for relation in ancestors:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_source)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_source)
)
for relation in related:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_target)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_target)
)
taxonomy_item_link = taxonomy_item_to_taxonomy_item_data(found_taxonomy_item).self
taxonomy_item_data = taxonomy_item_to_api_response(found_taxonomy_item)
taxonomy_link = taxonomy_to_taxonomy_data(found_taxonomy_item.taxonomy).self
taxonomy_data = taxonomy_to_api_response(found_taxonomy_item.taxonomy)
return ApiResponse(
links=[taxonomy_item_link, taxonomy_link, *changed_links],
embedded=[taxonomy_item_data, taxonomy_data, *embedded],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
_external=True,
),
rel=(
"delete",
"ont-taxonomy-item",
),
resource_type="changed",
),
changed=taxonomy_item_link,
),
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/relations/"
)
class TaxonomyItemRelationsView(MethodView):
"""Endpoint for manipulating taxonomy item relations."""
def _check_path_params(self, namespace: str, taxonomy: str, taxonomy_item: str):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
def _get_taxonomy_item(
self, namespace: str, taxonomy: str, taxonomy_item: str
) -> TaxonomyItem:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
found_taxonomy_item: Optional[TaxonomyItem] = TaxonomyItem.query.filter(
TaxonomyItem.id == taxonomy_item_id,
TaxonomyItem.taxonomy_id == taxonomy_id,
).first()
if (
found_taxonomy_item is None
or found_taxonomy_item.taxonomy.namespace_id != namespace_id
):
abort(HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item not found."))
return found_taxonomy_item # is not None because abort raises exception
def _check_if_modifiable(self, taxonomy_item: TaxonomyItem):
taxonomy = taxonomy_item.taxonomy
if taxonomy.namespace.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Namespace is marked as deleted and cannot be modified further."
),
)
if taxonomy.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy is marked as deleted and cannot be modified further."
),
)
if taxonomy_item.deleted_on is not None:
# cannot modify deleted taxonomy!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item is marked as deleted and cannot be modified further."
),
)
def _check_item_circle(
self,
item_target: TaxonomyItem,
item_source: TaxonomyItem,
original_target: Optional[TaxonomyItem] = None,
):
"""Check for a path from target to source which would form a circular dependency. Abort if such a path is found!"""
if original_target is None:
original_target = item_target
relation: TaxonomyItemRelation
for relation in item_target.current_related:
if relation.taxonomy_item_target.deleted_on is not None:
continue # exclude deleted items as targets
if relation.taxonomy_item_target_id == item_source.id:
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Cannot add a relation from %(target)s to %(source)s as it would create a circle!",
target=original_target.name,
source=item_source.name,
),
)
else:
self._check_item_circle(
item_target=relation.taxonomy_item_target,
item_source=item_source,
original_target=original_target,
)
@API_V1.arguments(TaxonomyItemRelationPostSchema())
@API_V1.response(DynamicApiResponseSchema(NewApiObjectSchema()))
def post(
self,
data: Dict[str, str],
namespace: str,
taxonomy: str,
taxonomy_item: str,
):
"""Create a new relation to a taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
if namespace != data["namespace_id"] or taxonomy != data["taxonomy_id"]:
abort(
HTTPStatus.BAD_REQUEST,
message=gettext(
"Cannot create a relation to a taxonomy item of a different taxonomy!"
),
)
found_taxonomy_item = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_modifiable(found_taxonomy_item)
found_taxonomy_item_target = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=data["taxonomy_item_id"]
)
self._check_item_circle(found_taxonomy_item_target, found_taxonomy_item)
relation = TaxonomyItemRelation(
taxonomy_item_source=found_taxonomy_item,
taxonomy_item_target=found_taxonomy_item_target,
)
DB.session.add(relation)
DB.session.commit()
taxonomy_item_relation_link = (
taxonomy_item_relation_to_taxonomy_item_relation_data(relation).self
)
taxonomy_item_relation_data = taxonomy_item_relation_to_api_response(relation)
taxonomy_item_source_link = taxonomy_item_to_api_link(found_taxonomy_item)
taxonomy_item_source_data = taxonomy_item_to_api_response(found_taxonomy_item)
taxonomy_item_target_link = taxonomy_item_to_api_link(found_taxonomy_item_target)
taxonomy_item_target_data = taxonomy_item_to_api_response(
found_taxonomy_item_target
)
self_link = create_action_link_for_taxonomy_item_relation_page(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self_link.rel = (*self_link.rel, "ont-taxonomy-item-relation")
self_link.resource_type = "new"
return ApiResponse(
links=[
taxonomy_item_relation_link,
taxonomy_item_source_link,
taxonomy_item_target_link,
],
embedded=[
taxonomy_item_relation_data,
taxonomy_item_source_data,
taxonomy_item_target_data,
],
data=NewApiObject(
self=self_link,
new=taxonomy_item_relation_link,
),
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/relations/<string:relation>/"
)
class TaxonomyItemRelationView(MethodView):
"""Endpoint for removing taxonomy item relations."""
def _check_path_params(
self, namespace: str, taxonomy: str, taxonomy_item: str, relation: str
):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
if not relation or not relation.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext(
"The requested taxonomy item relation id has the wrong format!"
),
)
def _get_taxonomy_item_relation(
self, namespace: str, taxonomy: str, taxonomy_item: str, relation: str
) -> TaxonomyItemRelation:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
relation_id = int(relation)
found_taxonomy_item_relation: Optional[
TaxonomyItemRelation
] = TaxonomyItemRelation.query.filter(
TaxonomyItemRelation.id == relation_id,
TaxonomyItemRelation.taxonomy_item_source_id == taxonomy_item_id,
).first()
if (
found_taxonomy_item_relation is None
or found_taxonomy_item_relation.taxonomy_item_source.taxonomy_id
!= taxonomy_id
or found_taxonomy_item_relation.taxonomy_item_source.taxonomy.namespace_id
!= namespace_id
):
abort(
HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item relation not found.")
)
return found_taxonomy_item_relation # is not None because abort raises exception
def _check_if_modifiable(self, relation: TaxonomyItemRelation):
taxonomy_item = relation.taxonomy_item_source
taxonomy = taxonomy_item.taxonomy
if taxonomy.namespace.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Namespace is marked as deleted and cannot be modified further."
),
)
if taxonomy.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy is marked as deleted and cannot be modified further."
),
)
if taxonomy_item.deleted_on is not None:
# cannot modify deleted taxonomy item!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item is marked as deleted and cannot be modified further."
),
)
if relation.deleted_on is not None:
# cannot modify deleted item relation!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item relation is marked as deleted and cannot be modified further."
),
)
@API_V1.response(DynamicApiResponseSchema(TaxonomyItemRelationSchema()))
def get(
self,
namespace: str,
taxonomy: str,
taxonomy_item: str,
relation: str,
**kwargs: Any
):
"""Get a single relation."""
self._check_path_params(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
found_relation = self._get_taxonomy_item_relation(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
return ApiResponse(
links=(
*nav_links_for_taxonomy_item_relation(found_relation),
*action_links_for_taxonomy_item_relation(found_relation),
),
data=taxonomy_item_relation_to_taxonomy_item_relation_data(found_relation),
)
@API_V1.response(DynamicApiResponseSchema(ChangedApiObjectSchema()))
def delete(
self,
namespace: str,
taxonomy: str,
taxonomy_item: str,
relation: str,
**kwargs: Any
):
"""Delete an existing relation."""
self._check_path_params(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
found_relation = self._get_taxonomy_item_relation(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
self._check_if_modifiable(found_relation)
# only actually delete when not already deleted
if found_relation.deleted_on is None:
# delete taxonomy item relation
found_relation.deleted_on = datetime.utcnow()
DB.session.add(found_relation)
DB.session.commit()
relation_link = taxonomy_item_relation_to_taxonomy_item_relation_data(
found_relation
).self
relation_data = taxonomy_item_relation_to_api_response(found_relation)
source_item_link = taxonomy_item_to_api_link(found_relation.taxonomy_item_source)
source_item_data = taxonomy_item_to_api_response(
found_relation.taxonomy_item_source
)
target_item_link = taxonomy_item_to_api_link(found_relation.taxonomy_item_target)
target_item_data = taxonomy_item_to_api_response(
found_relation.taxonomy_item_target
)
return ApiResponse(
links=[relation_link, source_item_link, target_item_link],
embedded=[relation_data, source_item_data, target_item_data],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemRelationView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
_external=True,
),
rel=(
"delete",
"ont-taxonomy-item-relation",
),
resource_type="changed",
),
changed=relation_link,
),
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/versions/"
)
class TaxonomyItemVersionsView(MethodView):
"""Endpoint for all versions of a taxonomy item."""
def get(self, namespace: str, taxonomy: str, taxonomy_item: str, **kwargs: Any):
"""TODO."""
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/versions/<string:version>/"
)
class TaxonomyItemVersionView(MethodView):
"""Endpoint for a single version of a taxonomy item."""
def _check_path_params(
self, namespace: str, taxonomy: str, taxonomy_item: str, version: str
):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
if not version or not version.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext(
"The requested taxonomy item version has the wrong format!"
),
)
def _get_taxonomy_item_version(
self, namespace: str, taxonomy: str, taxonomy_item: str, version: str
) -> TaxonomyItemVersion:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
version_nr = int(version)
found_taxonomy_item_version: Optional[
TaxonomyItemVersion
] = TaxonomyItemVersion.query.filter(
TaxonomyItemVersion.version == version_nr,
TaxonomyItemVersion.taxonomy_item_id == taxonomy_item_id,
).first()
if (
found_taxonomy_item_version is None
or found_taxonomy_item_version.taxonomy_item.taxonomy_id != taxonomy_id
or found_taxonomy_item_version.taxonomy_item.taxonomy.namespace_id
!= namespace_id
):
abort(
HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item version not found.")
)
return found_taxonomy_item_version # is not None because abort raises exception
@API_V1.response(DynamicApiResponseSchema(TaxonomyItemSchema()))
def get(
self,
namespace: str,
taxonomy: str,
taxonomy_item: str,
version: str,
**kwargs: Any
):
"""Get a single taxonomy item version."""
self._check_path_params(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
version=version,
)
found_taxonomy_item_version = self._get_taxonomy_item_version(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
version=version,
)
return ApiResponse(
links=[
ApiLink(
href=url_for(
"api-v1.NamespacesView",
_external=True,
**{"item-count": 50},
sort="name",
),
rel=("first", "page", "collection", "nav"),
resource_type="ont-namespace",
schema=url_for(
"api-v1.ApiSchemaView", schema_id="Namespace", _external=True
),
),
*nav_links_for_taxonomy_item_version(found_taxonomy_item_version),
*action_links_for_taxonomy_item_version(found_taxonomy_item_version),
],
data=taxonomy_item_to_taxonomy_item_data(found_taxonomy_item_version),
)
| [((22415, 22530), 'muse_for_anything.db.models.taxonomies.TaxonomyItemRelation', 'TaxonomyItemRelation', ([], {'taxonomy_item_source': 'found_taxonomy_item', 'taxonomy_item_target': 'found_taxonomy_item_target'}), '(taxonomy_item_source=found_taxonomy_item,\n taxonomy_item_target=found_taxonomy_item_target)\n', (22435, 22530), False, 'from muse_for_anything.db.models.taxonomies import Taxonomy, TaxonomyItem, TaxonomyItemRelation, TaxonomyItemVersion\n'), ((14380, 14397), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (14395, 14397), False, 'from datetime import datetime\n'), ((29591, 29608), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (29606, 29608), False, 'from datetime import datetime\n'), ((18479, 18587), 'muse_for_anything.db.models.taxonomies.TaxonomyItem.query.filter', 'TaxonomyItem.query.filter', (['(TaxonomyItem.id == taxonomy_item_id)', '(TaxonomyItem.taxonomy_id == taxonomy_id)'], {}), '(TaxonomyItem.id == taxonomy_item_id, TaxonomyItem\n .taxonomy_id == taxonomy_id)\n', (18504, 18587), False, 'from muse_for_anything.db.models.taxonomies import Taxonomy, TaxonomyItem, TaxonomyItemRelation, TaxonomyItemVersion\n'), ((25651, 25795), 'muse_for_anything.db.models.taxonomies.TaxonomyItemRelation.query.filter', 'TaxonomyItemRelation.query.filter', (['(TaxonomyItemRelation.id == relation_id)', '(TaxonomyItemRelation.taxonomy_item_source_id == taxonomy_item_id)'], {}), '(TaxonomyItemRelation.id == relation_id, \n TaxonomyItemRelation.taxonomy_item_source_id == taxonomy_item_id)\n', (25684, 25795), False, 'from muse_for_anything.db.models.taxonomies import Taxonomy, TaxonomyItem, TaxonomyItemRelation, TaxonomyItemVersion\n'), ((33116, 33253), 'muse_for_anything.db.models.taxonomies.TaxonomyItemVersion.query.filter', 'TaxonomyItemVersion.query.filter', (['(TaxonomyItemVersion.version == version_nr)', '(TaxonomyItemVersion.taxonomy_item_id == taxonomy_item_id)'], {}), '(TaxonomyItemVersion.version == version_nr,\n TaxonomyItemVersion.taxonomy_item_id == taxonomy_item_id)\n', (33148, 33253), False, 'from muse_for_anything.db.models.taxonomies import Taxonomy, TaxonomyItem, TaxonomyItemRelation, TaxonomyItemVersion\n'), ((2440, 2499), 'flask_babel.gettext', 'gettext', (['"""The requested namespace id has the wrong format!"""'], {}), "('The requested namespace id has the wrong format!')\n", (2447, 2499), False, 'from flask_babel import gettext\n'), ((2649, 2707), 'flask_babel.gettext', 'gettext', (['"""The requested taxonomy id has the wrong format!"""'], {}), "('The requested taxonomy id has the wrong format!')\n", (2656, 2707), False, 'from flask_babel import gettext\n'), ((2867, 2930), 'flask_babel.gettext', 'gettext', (['"""The requested taxonomy item id has the wrong format!"""'], {}), "('The requested taxonomy item id has the wrong format!')\n", (2874, 2930), False, 'from flask_babel import gettext\n'), ((3685, 3720), 'flask_babel.gettext', 'gettext', (['"""Taxonomy item not found."""'], {}), "('Taxonomy item not found.')\n", (3692, 3720), False, 'from flask_babel import gettext\n'), ((4050, 4123), 'flask_babel.gettext', 'gettext', (['"""Namespace is marked as deleted and cannot be modified further."""'], {}), "('Namespace is marked as deleted and cannot be modified further.')\n", (4057, 4123), False, 'from flask_babel import gettext\n'), ((4348, 4420), 'flask_babel.gettext', 'gettext', (['"""Taxonomy is marked as deleted and cannot be modified further."""'], {}), "('Taxonomy is marked as deleted and cannot be modified further.')\n", (4355, 4420), False, 'from flask_babel import gettext\n'), ((4791, 4868), 'flask_babel.gettext', 'gettext', (['"""Taxonomy item is marked as deleted and cannot be modified further."""'], {}), "('Taxonomy item is marked as deleted and cannot be modified further.')\n", (4798, 4868), False, 'from flask_babel import gettext\n'), ((9673, 9842), 'muse_for_anything.db.models.taxonomies.TaxonomyItemRelation.query.filter', 'TaxonomyItemRelation.query.filter', (['(TaxonomyItemRelation.taxonomy_item_target_id == found_taxonomy_item.id)', '(TaxonomyItemRelation.deleted_on == deleted_timestamp)'], {}), '(TaxonomyItemRelation.\n taxonomy_item_target_id == found_taxonomy_item.id, TaxonomyItemRelation\n .deleted_on == deleted_timestamp)\n', (9706, 9842), False, 'from muse_for_anything.db.models.taxonomies import Taxonomy, TaxonomyItem, TaxonomyItemRelation, TaxonomyItemVersion\n'), ((10714, 10883), 'muse_for_anything.db.models.taxonomies.TaxonomyItemRelation.query.filter', 'TaxonomyItemRelation.query.filter', (['(TaxonomyItemRelation.taxonomy_item_source_id == found_taxonomy_item.id)', '(TaxonomyItemRelation.deleted_on == deleted_timestamp)'], {}), '(TaxonomyItemRelation.\n taxonomy_item_source_id == found_taxonomy_item.id, TaxonomyItemRelation\n .deleted_on == deleted_timestamp)\n', (10747, 10883), False, 'from muse_for_anything.db.models.taxonomies import Taxonomy, TaxonomyItem, TaxonomyItemRelation, TaxonomyItemVersion\n'), ((17683, 17742), 'flask_babel.gettext', 'gettext', (['"""The requested namespace id has the wrong format!"""'], {}), "('The requested namespace id has the wrong format!')\n", (17690, 17742), False, 'from flask_babel import gettext\n'), ((17892, 17950), 'flask_babel.gettext', 'gettext', (['"""The requested taxonomy id has the wrong format!"""'], {}), "('The requested taxonomy id has the wrong format!')\n", (17899, 17950), False, 'from flask_babel import gettext\n'), ((18110, 18173), 'flask_babel.gettext', 'gettext', (['"""The requested taxonomy item id has the wrong format!"""'], {}), "('The requested taxonomy item id has the wrong format!')\n", (18117, 18173), False, 'from flask_babel import gettext\n'), ((18812, 18847), 'flask_babel.gettext', 'gettext', (['"""Taxonomy item not found."""'], {}), "('Taxonomy item not found.')\n", (18819, 18847), False, 'from flask_babel import gettext\n'), ((19219, 19292), 'flask_babel.gettext', 'gettext', (['"""Namespace is marked as deleted and cannot be modified further."""'], {}), "('Namespace is marked as deleted and cannot be modified further.')\n", (19226, 19292), False, 'from flask_babel import gettext\n'), ((19517, 19589), 'flask_babel.gettext', 'gettext', (['"""Taxonomy is marked as deleted and cannot be modified further."""'], {}), "('Taxonomy is marked as deleted and cannot be modified further.')\n", (19524, 19589), False, 'from flask_babel import gettext\n'), ((19818, 19895), 'flask_babel.gettext', 'gettext', (['"""Taxonomy item is marked as deleted and cannot be modified further."""'], {}), "('Taxonomy item is marked as deleted and cannot be modified further.')\n", (19825, 19895), False, 'from flask_babel import gettext\n'), ((21815, 21894), 'flask_babel.gettext', 'gettext', (['"""Cannot create a relation to a taxonomy item of a different taxonomy!"""'], {}), "('Cannot create a relation to a taxonomy item of a different taxonomy!')\n", (21822, 21894), False, 'from flask_babel import gettext\n'), ((24488, 24547), 'flask_babel.gettext', 'gettext', (['"""The requested namespace id has the wrong format!"""'], {}), "('The requested namespace id has the wrong format!')\n", (24495, 24547), False, 'from flask_babel import gettext\n'), ((24697, 24755), 'flask_babel.gettext', 'gettext', (['"""The requested taxonomy id has the wrong format!"""'], {}), "('The requested taxonomy id has the wrong format!')\n", (24704, 24755), False, 'from flask_babel import gettext\n'), ((24915, 24978), 'flask_babel.gettext', 'gettext', (['"""The requested taxonomy item id has the wrong format!"""'], {}), "('The requested taxonomy item id has the wrong format!')\n", (24922, 24978), False, 'from flask_babel import gettext\n'), ((25128, 25200), 'flask_babel.gettext', 'gettext', (['"""The requested taxonomy item relation id has the wrong format!"""'], {}), "('The requested taxonomy item relation id has the wrong format!')\n", (25135, 25200), False, 'from flask_babel import gettext\n'), ((26192, 26236), 'flask_babel.gettext', 'gettext', (['"""Taxonomy item relation not found."""'], {}), "('Taxonomy item relation not found.')\n", (26199, 26236), False, 'from flask_babel import gettext\n'), ((26687, 26760), 'flask_babel.gettext', 'gettext', (['"""Namespace is marked as deleted and cannot be modified further."""'], {}), "('Namespace is marked as deleted and cannot be modified further.')\n", (26694, 26760), False, 'from flask_babel import gettext\n'), ((26985, 27057), 'flask_babel.gettext', 'gettext', (['"""Taxonomy is marked as deleted and cannot be modified further."""'], {}), "('Taxonomy is marked as deleted and cannot be modified further.')\n", (26992, 27057), False, 'from flask_babel import gettext\n'), ((27291, 27368), 'flask_babel.gettext', 'gettext', (['"""Taxonomy item is marked as deleted and cannot be modified further."""'], {}), "('Taxonomy item is marked as deleted and cannot be modified further.')\n", (27298, 27368), False, 'from flask_babel import gettext\n'), ((27597, 27693), 'flask_babel.gettext', 'gettext', (['"""Taxonomy item relation is marked as deleted and cannot be modified further."""'], {}), "(\n 'Taxonomy item relation is marked as deleted and cannot be modified further.'\n )\n", (27604, 27693), False, 'from flask_babel import gettext\n'), ((31966, 32025), 'flask_babel.gettext', 'gettext', (['"""The requested namespace id has the wrong format!"""'], {}), "('The requested namespace id has the wrong format!')\n", (31973, 32025), False, 'from flask_babel import gettext\n'), ((32175, 32233), 'flask_babel.gettext', 'gettext', (['"""The requested taxonomy id has the wrong format!"""'], {}), "('The requested taxonomy id has the wrong format!')\n", (32182, 32233), False, 'from flask_babel import gettext\n'), ((32393, 32456), 'flask_babel.gettext', 'gettext', (['"""The requested taxonomy item id has the wrong format!"""'], {}), "('The requested taxonomy item id has the wrong format!')\n", (32400, 32456), False, 'from flask_babel import gettext\n'), ((32604, 32672), 'flask_babel.gettext', 'gettext', (['"""The requested taxonomy item version has the wrong format!"""'], {}), "('The requested taxonomy item version has the wrong format!')\n", (32611, 32672), False, 'from flask_babel import gettext\n'), ((33622, 33665), 'flask_babel.gettext', 'gettext', (['"""Taxonomy item version not found."""'], {}), "('Taxonomy item version not found.')\n", (33629, 33665), False, 'from flask_babel import gettext\n'), ((20708, 20863), 'flask_babel.gettext', 'gettext', (['"""Cannot add a relation from %(target)s to %(source)s as it would create a circle!"""'], {'target': 'original_target.name', 'source': 'item_source.name'}), "(\n 'Cannot add a relation from %(target)s to %(source)s as it would create a circle!'\n , target=original_target.name, source=item_source.name)\n", (20715, 20863), False, 'from flask_babel import gettext\n'), ((3277, 3321), 'sqlalchemy.orm.selectinload', 'selectinload', (['TaxonomyItem.current_ancestors'], {}), '(TaxonomyItem.current_ancestors)\n', (3289, 3321), False, 'from sqlalchemy.orm import selectinload\n'), ((5925, 6013), 'flask.helpers.url_for', 'url_for', (['"""api-v1.NamespacesView"""'], {'_external': '(True)', 'sort': '"""name"""'}), "('api-v1.NamespacesView', _external=True, **{'item-count': 50}, sort\n ='name')\n", (5932, 6013), False, 'from flask.helpers import url_for\n'), ((6271, 6341), 'flask.helpers.url_for', 'url_for', (['"""api-v1.ApiSchemaView"""'], {'schema_id': '"""Namespace"""', '_external': '(True)'}), "('api-v1.ApiSchemaView', schema_id='Namespace', _external=True)\n", (6278, 6341), False, 'from flask.helpers import url_for\n'), ((34533, 34621), 'flask.helpers.url_for', 'url_for', (['"""api-v1.NamespacesView"""'], {'_external': '(True)', 'sort': '"""name"""'}), "('api-v1.NamespacesView', _external=True, **{'item-count': 50}, sort\n ='name')\n", (34540, 34621), False, 'from flask.helpers import url_for\n'), ((34879, 34949), 'flask.helpers.url_for', 'url_for', (['"""api-v1.ApiSchemaView"""'], {'schema_id': '"""Namespace"""', '_external': '(True)'}), "('api-v1.ApiSchemaView', schema_id='Namespace', _external=True)\n", (34886, 34949), False, 'from flask.helpers import url_for\n'), ((8112, 8235), 'flask.helpers.url_for', 'url_for', (['"""api-v1.TaxonomyItemView"""'], {'namespace': 'namespace', 'taxonomy': 'taxonomy', 'taxonomy_item': 'taxonomy_item', '_external': '(True)'}), "('api-v1.TaxonomyItemView', namespace=namespace, taxonomy=taxonomy,\n taxonomy_item=taxonomy_item, _external=True)\n", (8119, 8235), False, 'from flask.helpers import url_for\n'), ((13005, 13128), 'flask.helpers.url_for', 'url_for', (['"""api-v1.TaxonomyItemView"""'], {'namespace': 'namespace', 'taxonomy': 'taxonomy', 'taxonomy_item': 'taxonomy_item', '_external': '(True)'}), "('api-v1.TaxonomyItemView', namespace=namespace, taxonomy=taxonomy,\n taxonomy_item=taxonomy_item, _external=True)\n", (13012, 13128), False, 'from flask.helpers import url_for\n'), ((16705, 16828), 'flask.helpers.url_for', 'url_for', (['"""api-v1.TaxonomyItemView"""'], {'namespace': 'namespace', 'taxonomy': 'taxonomy', 'taxonomy_item': 'taxonomy_item', '_external': '(True)'}), "('api-v1.TaxonomyItemView', namespace=namespace, taxonomy=taxonomy,\n taxonomy_item=taxonomy_item, _external=True)\n", (16712, 16828), False, 'from flask.helpers import url_for\n'), ((30562, 30713), 'flask.helpers.url_for', 'url_for', (['"""api-v1.TaxonomyItemRelationView"""'], {'namespace': 'namespace', 'taxonomy': 'taxonomy', 'taxonomy_item': 'taxonomy_item', 'relation': 'relation', '_external': '(True)'}), "('api-v1.TaxonomyItemRelationView', namespace=namespace, taxonomy=\n taxonomy, taxonomy_item=taxonomy_item, relation=relation, _external=True)\n", (30569, 30713), False, 'from flask.helpers import url_for\n')] |
shijiale0609/Python_Data_Analysis | PythonDAdata/3358OS_06_Code/code6/pd_plotting.py | c18b5ed006c171bbb6fcb6be5f51b2686edc8f7e | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('transcount.csv')
df = df.groupby('year').aggregate(np.mean)
gpu = pd.read_csv('gpu_transcount.csv')
gpu = gpu.groupby('year').aggregate(np.mean)
df = pd.merge(df, gpu, how='outer', left_index=True, right_index=True)
df = df.replace(np.nan, 0)
df.plot()
df.plot(logy=True)
df[df['gpu_trans_count'] > 0].plot(kind='scatter', x='trans_count', y='gpu_trans_count', loglog=True)
plt.show()
| [((78, 107), 'pandas.read_csv', 'pd.read_csv', (['"""transcount.csv"""'], {}), "('transcount.csv')\n", (89, 107), True, 'import pandas as pd\n'), ((158, 191), 'pandas.read_csv', 'pd.read_csv', (['"""gpu_transcount.csv"""'], {}), "('gpu_transcount.csv')\n", (169, 191), True, 'import pandas as pd\n'), ((243, 308), 'pandas.merge', 'pd.merge', (['df', 'gpu'], {'how': '"""outer"""', 'left_index': '(True)', 'right_index': '(True)'}), "(df, gpu, how='outer', left_index=True, right_index=True)\n", (251, 308), True, 'import pandas as pd\n'), ((467, 477), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (475, 477), True, 'import matplotlib.pyplot as plt\n')] |
JakubGutowski/PersonalBlog | source/blog/migrations/0004_postcomments.py | 96122b36486f7e874c013e50d939732a43db309f | # Generated by Django 2.0.5 on 2018-07-02 19:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_blogpost_author'),
]
operations = [
migrations.CreateModel(
name='PostComments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nick', models.CharField(max_length=20)),
('comment', models.CharField(max_length=140)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.BlogPost')),
],
),
]
| [((379, 472), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (395, 472), False, 'from django.db import migrations, models\n'), ((497, 528), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (513, 528), False, 'from django.db import migrations, models\n'), ((560, 592), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(140)'}), '(max_length=140)\n', (576, 592), False, 'from django.db import migrations, models\n'), ((621, 708), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""blog.BlogPost"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'blog.BlogPost')\n", (638, 708), False, 'from django.db import migrations, models\n')] |
m-star18/atcoder | submissions/aising2019/a.py | 08e475810516602fa088f87daf1eba590b4e07cc | import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
n = int(readline())
h = int(readline())
w = int(readline())
print((n - h + 1) * (n - w + 1))
| [((116, 146), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10 ** 7)'], {}), '(10 ** 7)\n', (137, 146), False, 'import sys\n')] |
yoyoberenguer/MultiplayerGameEngine | CreateHalo.py | 1d1a4c0ab40d636322c4e3299cbc84fb57965b31 |
import pygame
from NetworkBroadcast import Broadcast, AnimatedSprite, DeleteSpriteCommand
from Textures import HALO_SPRITE12, HALO_SPRITE14, HALO_SPRITE13
__author__ = "Yoann Berenguer"
__credits__ = ["Yoann Berenguer"]
__version__ = "1.0.0"
__maintainer__ = "Yoann Berenguer"
__email__ = "yoyoberenguer@hotmail.com"
class PlayerHalo(pygame.sprite.Sprite):
images = []
containers = None
def __init__(self, texture_name_, object_, timing_, layer_=0):
self.layer = layer_
pygame.sprite.Sprite.__init__(self, self.containers)
if isinstance(object_.gl.All, pygame.sprite.LayeredUpdates):
object_.gl.All.change_layer(self, object_.layer)
self.object = object_
if isinstance(self.images, pygame.Surface):
self.images = [self.images] * 30
self.images_copy = self.images.copy()
self.image = self.images_copy[0]
self.rect = self.image.get_rect(center=object_.rect.center)
self.dt = 0
self.index = 0
self.gl = object_.gl
self.length = len(self.images) - 1
self.blend = 0
self.timing = timing_
self.texture_name = texture_name_
self.id_ = id(self)
self.player_halo_object = Broadcast(self.make_object())
def make_object(self) -> AnimatedSprite:
return AnimatedSprite(frame_=self.gl.FRAME, id_=self.id_, surface_=self.texture_name,
layer_=self.layer, blend_=self.blend, rect_=self.rect,
index_=self.index)
def update(self):
if self.dt > self.timing:
if self.object.rect.colliderect(self.gl.SCREENRECT):
self.image = self.images_copy[self.index]
self.rect = self.image.get_rect(center=self.object.rect.center)
self.index += 1
if self.index > self.length:
self.kill()
return
self.dt = 0
self.player_halo_object.update({'frame': self.gl.FRAME,
'rect': self.rect, 'index': self.index})
else:
self.kill()
return
else:
self.dt += self.gl.TIME_PASSED_SECONDS
self.player_halo_object.queue()
class AsteroidHalo(pygame.sprite.Sprite):
images = []
containers = None
def __init__(self, texture_name_, object_, timing_, layer_=0):
self.layer = layer_
pygame.sprite.Sprite.__init__(self, self.containers)
if isinstance(object_.gl.All, pygame.sprite.LayeredUpdates):
object_.gl.All.change_layer(self, object_.layer)
self.object = object_
if isinstance(self.images, pygame.Surface):
self.images = [self.images] * 30
self.images_copy = self.images.copy()
self.image = self.images_copy[0]
if not id(AsteroidHalo.images) == id(eval(texture_name_)):
raise ValueError("Asteroid image does not match with its surface name.")
self.rect = self.image.get_rect(center=object_.rect.center)
self.dt = 0
self.index = 0
self.gl = object_.gl
self.length = len(self.images) - 1
self.blend = 0
self.timing = timing_
self.texture_name = texture_name_
self.id_ = id(self)
self.asteroidHalo_object = Broadcast(self.make_object())
Broadcast.add_object_id(self.id_)
def delete_object(self) -> DeleteSpriteCommand:
"""
Send a command to kill an object on client side.
:return: DetectCollisionSprite object
"""
return DeleteSpriteCommand(frame_=self.gl.FRAME, to_delete_={self.id_: self.texture_name})
def make_object(self) -> AnimatedSprite:
return AnimatedSprite(frame_=self.gl.FRAME, id_=self.id_, surface_=self.texture_name,
layer_=self.layer, blend_=self.blend, rect_=self.rect,
index_=self.index)
def quit(self) -> None:
Broadcast.remove_object_id(self.id_)
obj = Broadcast(self.delete_object())
obj.queue()
self.kill()
def update(self) -> None:
if self.dt > self.timing:
if self.object.rect.colliderect(self.gl.SCREENRECT):
self.image = self.images_copy[self.index]
self.rect = self.image.get_rect(center=self.object.rect.center)
self.index += 1
if self.index > self.length:
self.quit()
return
self.asteroidHalo_object.update(
{'frame': self.gl.FRAME, 'rect': self.rect, 'index': self.index})
self.asteroidHalo_object.queue()
self.dt = 0
else:
self.quit()
return
else:
self.dt += self.gl.TIME_PASSED_SECONDS
| [((527, 579), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self', 'self.containers'], {}), '(self, self.containers)\n', (556, 579), False, 'import pygame\n'), ((1385, 1546), 'NetworkBroadcast.AnimatedSprite', 'AnimatedSprite', ([], {'frame_': 'self.gl.FRAME', 'id_': 'self.id_', 'surface_': 'self.texture_name', 'layer_': 'self.layer', 'blend_': 'self.blend', 'rect_': 'self.rect', 'index_': 'self.index'}), '(frame_=self.gl.FRAME, id_=self.id_, surface_=self.\n texture_name, layer_=self.layer, blend_=self.blend, rect_=self.rect,\n index_=self.index)\n', (1399, 1546), False, 'from NetworkBroadcast import Broadcast, AnimatedSprite, DeleteSpriteCommand\n'), ((2591, 2643), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self', 'self.containers'], {}), '(self, self.containers)\n', (2620, 2643), False, 'import pygame\n'), ((3553, 3586), 'NetworkBroadcast.Broadcast.add_object_id', 'Broadcast.add_object_id', (['self.id_'], {}), '(self.id_)\n', (3576, 3586), False, 'from NetworkBroadcast import Broadcast, AnimatedSprite, DeleteSpriteCommand\n'), ((3791, 3879), 'NetworkBroadcast.DeleteSpriteCommand', 'DeleteSpriteCommand', ([], {'frame_': 'self.gl.FRAME', 'to_delete_': '{self.id_: self.texture_name}'}), '(frame_=self.gl.FRAME, to_delete_={self.id_: self.\n texture_name})\n', (3810, 3879), False, 'from NetworkBroadcast import Broadcast, AnimatedSprite, DeleteSpriteCommand\n'), ((3939, 4100), 'NetworkBroadcast.AnimatedSprite', 'AnimatedSprite', ([], {'frame_': 'self.gl.FRAME', 'id_': 'self.id_', 'surface_': 'self.texture_name', 'layer_': 'self.layer', 'blend_': 'self.blend', 'rect_': 'self.rect', 'index_': 'self.index'}), '(frame_=self.gl.FRAME, id_=self.id_, surface_=self.\n texture_name, layer_=self.layer, blend_=self.blend, rect_=self.rect,\n index_=self.index)\n', (3953, 4100), False, 'from NetworkBroadcast import Broadcast, AnimatedSprite, DeleteSpriteCommand\n'), ((4194, 4230), 'NetworkBroadcast.Broadcast.remove_object_id', 'Broadcast.remove_object_id', (['self.id_'], {}), '(self.id_)\n', (4220, 4230), False, 'from NetworkBroadcast import Broadcast, AnimatedSprite, DeleteSpriteCommand\n')] |
ShizhuZhang/ontask_b | src/dataops/pandas_db.py | acbf05ff9b18dae0a41c67d1e41774e54a890c40 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import logging
import os.path
import subprocess
from collections import OrderedDict
from itertools import izip
import numpy as np
import pandas as pd
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from sqlalchemy import create_engine
from dataops.formula_evaluation import evaluate_node_sql
from ontask import fix_pctg_in_name
SITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
table_prefix = '__ONTASK_WORKFLOW_TABLE_'
df_table_prefix = table_prefix + '{0}'
upload_table_prefix = table_prefix + 'UPLOAD_{0}'
# Query to count the number of rows in a table
query_count_rows = 'SELECT count(*) from "{0}"'
logger = logging.getLogger(__name__)
# Translation between pandas data type names, and those handled in OnTask
pandas_datatype_names = {
'object': 'string',
'int64': 'integer',
'float64': 'double',
'bool': 'boolean',
'datetime64[ns]': 'datetime'
}
# Translation between SQL data type names, and those handled in OnTask
sql_datatype_names = {
'text': 'string',
'bigint': 'integer',
'double precision': 'double',
'boolean': 'boolean',
'timestamp without time zone': 'datetime'
}
# DB Engine to use with Pandas (required by to_sql, from_sql
engine = None
def create_db_connection(dialect, driver, username, password, host, dbname):
"""
Function that creates the engine object to connect to the database. The
object is required by the pandas functions to_sql and from_sql
:param dialect: Dialect for the engine (oracle, mysql, postgresql, etc)
:param driver: DBAPI driver (psycopg2, ...)
:param username: Username to connect with the database
:param password: Password to connect with the database
:param host: Host to connect with the database
:param dbname: database name
:return: the engine
"""
# DB engine
database_url = \
'{dialect}{driver}://{user}:{password}@{host}/{database_name}'.format(
dialect=dialect,
driver=driver,
user=username,
password=password,
host=host,
database_name=dbname,
)
return create_engine(database_url, echo=False, paramstyle='format')
def create_db_engine(dialect, driver, username, password, host, dbname):
"""
Function that creates the engine object to connect to the database. The
object is required by the pandas functions to_sql and from_sql
:param dialect: Dialect for the engine (oracle, mysql, postgresql, etc)
:param driver: DBAPI driver (psycopg2, ...)
:param username: Username to connect with the database
:param password: Password to connect with the database
:param host: Host to connect with the database
:param dbname: database name
:return: the engine
"""
# DB engine
database_url = \
'{dialect}{driver}://{user}:{password}@{host}/{database_name}'.format(
dialect=dialect,
driver=driver,
user=username,
password=password,
host=host,
database_name=dbname,
)
engine = create_db_connection(dialect, driver, username, password, host,
dbname)
if settings.DEBUG:
print('Creating engine with ', database_url)
return engine
def destroy_db_engine(db_engine):
"""
Method that disposes of the given engine (to guarantee there are no
connections available
:param db_engine: Engine to destroy
:return: Nothing
"""
db_engine.dispose()
def pg_restore_table(filename):
"""
Function that given a file produced with a pg_dump, it uploads its
content to the existing database
:param filename: File in pg_dump format to restore
:return:
"""
process = subprocess.Popen(['psql',
'-d',
settings.DATABASES['default']['NAME'],
'-q',
'-f',
filename])
process.wait()
def delete_all_tables():
"""
Delete all tables related to existing workflows
:return:
"""
cursor = connection.cursor()
table_list = connection.introspection.get_table_list(cursor)
for tinfo in table_list:
if not tinfo.name.startswith(table_prefix):
continue
cursor.execute('DROP TABLE "{0}";'.format(tinfo.name))
# To make sure the table is dropped.
connection.commit()
return
def is_table_in_db(table_name):
cursor = connection.cursor()
return next(
(True for x in connection.introspection.get_table_list(cursor)
if x.name == table_name),
False
)
def is_wf_table_in_db(workflow):
return is_table_in_db(create_table_name(workflow.id))
def create_table_name(pk):
"""
:param pk: Primary Key of a workflow
:return: The unique table name to use to store a workflow data frame
"""
return df_table_prefix.format(pk)
def create_upload_table_name(pk):
"""
:param pk: Primary key of a workflow
:return: The unique table to use to upload a new data frame
"""
return upload_table_prefix.format(pk)
def load_from_db(pk, columns=None, filter_exp=None):
"""
Load the data frame stored for the workflow with the pk
:param pk: Primary key of the workflow
:param columns: Optional list of columns to load (all if NOne is given)
:param filter_exp: JSON expression to filter a subset of rows
:return: data frame
"""
return load_table(create_table_name(pk),
columns=columns,
filter_exp=filter_exp)
def load_table(table_name, columns=None, filter_exp=None):
"""
Load a data frame from the SQL DB.
FUTURE WORK:
Consider to store the dataframes in Redis to reduce load/store time.
The trick is to use a compressed format:
SET: redisConn.set("key", df.to_msgpack(compress='zlib'))
GET: pd.read_msgpack(redisConn.get("key"))
Need to agree on a sensible item name that does not collide with anything
else and a policy to detect a cached dataframe and remove it when the data
changes (difficult to detect? Perhaps df_new.equals(df_current))
If feasible, a write-through system could be easily implemented.
:param table_name: Table name to read from the db in to data frame
:param view: Optional view object to restrict access to the DB
:return: data_frame or None if it does not exist.
"""
if table_name not in connection.introspection.table_names():
return None
if settings.DEBUG:
print('Loading table ', table_name)
if columns or filter_exp:
# A list of columns or a filter exp is given
query, params = get_filter_query(table_name, columns, filter_exp)
result = pd.read_sql_query(query, engine, params=params)
else:
# No view given, so simply get the whole table
result = pd.read_sql(table_name, engine)
# After reading from the DB, turn all None into NaN
result.fillna(value=np.nan, inplace=True)
return result
def load_query(query):
"""
Load a data frame from the SQL DB running the given query.
:param query: Query to run in the DB
:return: data_frame or None if it does not exist.
"""
if settings.DEBUG:
print('Loading query ', query)
result = pd.read_sql_query(query, engine)
# After reading from the DB, turn all None into NaN
result.fillna(value=np.nan, inplace=True)
return result
def load_df_from_csvfile(file, skiprows=0, skipfooter=0):
"""
Given a file object, try to read the content as a CSV file and transform
into a data frame. The skiprows and skipfooter are number of lines to skip
from the top and bottom of the file (see read_csv in pandas).
It also tries to convert as many columns as possible to date/time format
(testing the conversion on every string column).
:param filename: File object to read the CSV content
:param skiprows: Number of lines to skip at the top of the document
:param skipfooter: Number of lines to skip at the bottom of the document
:return: Resulting data frame, or an Exception.
"""
data_frame = pd.read_csv(
file,
index_col=False,
infer_datetime_format=True,
quotechar='"',
skiprows=skiprows,
skipfooter=skipfooter
)
# Strip white space from all string columns and try to convert to
# datetime just in case
for x in list(data_frame.columns):
if data_frame[x].dtype.name == 'object':
# Column is a string! Remove the leading and trailing white
# space
data_frame[x] = data_frame[x].str.strip().fillna(data_frame[x])
# Try the datetime conversion
try:
series = pd.to_datetime(data_frame[x],
infer_datetime_format=True)
# Datetime conversion worked! Update the data_frame
data_frame[x] = series
except (ValueError, TypeError):
pass
return data_frame
def load_df_from_sqlconnection(conn_item, pwd=None):
"""
Load a DF from a SQL connection open with the parameters given in conn_item.
:param conn_item: SQLConnection object with the connection parameters.
:return: Data frame or raise an exception.
"""
# Get the connection
db_connection = create_db_connection(conn_item.conn_type,
conn_item.conn_driver,
conn_item.db_user,
pwd,
conn_item.db_host,
conn_item.db_name)
# Try to fetch the data
result = pd.read_sql(conn_item.db_table, db_connection)
# After reading from the DB, turn all None into NaN
result.fillna(value=np.nan, inplace=True)
return result
def store_table(data_frame, table_name):
"""
Store a data frame in the DB
:param data_frame: The data frame to store
:param table_name: The name of the table in the DB
:return: Nothing. Side effect in the DB
"""
with cache.lock(table_name):
# We ovewrite the content and do not create an index
data_frame.to_sql(table_name,
engine,
if_exists='replace',
index=False)
return
def delete_table(pk):
"""Delete the table representing the workflow with the given PK. Due to
the dual use of the database, the command has to be executed directly on
the DB.
"""
try:
cursor = connection.cursor()
cursor.execute('DROP TABLE "{0}";'.format(create_table_name(pk)))
connection.commit()
except Exception:
logger.error(
'Error while dropping table {0}'.format(create_table_name(pk))
)
def delete_upload_table(pk):
"""Delete the table used to merge data into the workflow with the given
PK. Due to the dual use of the database, the command has to be executed
directly on the DB.
"""
cursor = connection.cursor()
cursor.execute('DROP TABLE "{0}"'.format(create_upload_table_name(pk)))
connection.commit()
def get_table_column_types(table_name):
"""
:param table_name: Table name
:return: List of pairs (column name, SQL type)
"""
cursor = connection.cursor()
cursor.execute("""select column_name, data_type from
INFORMATION_SCHEMA.COLUMNS where table_name = '{0}'""".format(table_name))
return cursor.fetchall()
def df_column_types_rename(table_name):
"""
:param table_name: Primary key of the workflow containing this data frame (table)
:return: List of data type strings translated to the proper values
"""
column_types = get_table_column_types(table_name)
# result = [table_name[x].dtype.name for x in list(table_name.columns)]
# for tname, ntname in pandas_datatype_names.items():
# result[:] = [x if x != tname else ntname for x in result]
return [sql_datatype_names[x] for __, x in
get_table_column_types(table_name)]
def df_drop_column(pk, column_name):
"""
Drop a column from the DB table storing a data frame
:param pk: Workflow primary key to obtain table name
:param column_name: Column name
:return: Drops the column from the corresponding DB table
"""
query = 'ALTER TABLE "{0}" DROP COLUMN "{1}"'.format(
create_table_name(pk),
column_name
)
cursor = connection.cursor()
cursor.execute(query)
def get_subframe(pk, cond_filter, column_names=None):
"""
Execute a select query to extract a subset of the dataframe and turn the
resulting query set into a data frame.
:param pk: Workflow primary key
:param cond_filter: Condition object to filter the data (or None)
:param column_names: [list of column names], QuerySet with the data rows
:return:
"""
# Get the cursor
cursor = get_table_cursor(pk, cond_filter, column_names)
# Create the DataFrame and set the column names
result = pd.DataFrame.from_records(cursor.fetchall(), coerce_float=True)
result.columns = [c.name for c in cursor.description]
return result
def get_table_cursor(pk, cond_filter, column_names=None):
"""
Execute a select query in the database with an optional filter obtained
from the jquery QueryBuilder.
:param pk: Primary key of the workflow storing the data
:param cond_filter: Condition object to filter the data (or None)
:param column_names: optional list of columns to select
:return: ([list of column names], QuerySet with the data rows)
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}" from "{1}"'.format(
'", "'.join(safe_column_names),
create_table_name(pk)
)
else:
query = 'SELECT * from "{0}"'.format(create_table_name(pk))
# See if the action has a filter or not
fields = []
if cond_filter is not None:
cond_filter, fields = evaluate_node_sql(cond_filter.formula)
if cond_filter:
# The condition may be empty, in which case, nothing is needed.
query += ' WHERE ' + cond_filter
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
return cursor
def get_table_data(pk, cond_filter, column_names=None):
# Get first the cursor
cursor = get_table_cursor(pk, cond_filter, column_names)
# Return the data
return cursor.fetchall()
def execute_select_on_table(pk, fields, values, column_names=None):
"""
Execute a select query in the database with an optional filter obtained
from the jquery QueryBuilder.
:param pk: Primary key of the workflow storing the data
:param fields: List of fields to add to the WHERE clause
:param values: parameters to match the previous fields
:param column_names: optional list of columns to select
:return: QuerySet with the data rows
"""
# Create the query
if column_names:
safe_column_names = ['"' + fix_pctg_in_name(x) + '"'
for x in column_names]
query = 'SELECT {0}'.format(','.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(create_table_name(pk))
# See if the action has a filter or not
cursor = connection.cursor()
if fields:
query += ' WHERE ' + \
' AND '.join(['"{0}" = %s'.format(fix_pctg_in_name(x))
for x in fields])
cursor.execute(query, values)
else:
# Execute the query
cursor.execute(query)
# Get the data
return cursor.fetchall()
def get_table_queryset(tablename):
query = 'SELECT * from "{0}";'.format(tablename)
try:
cursor = connection.cursor()
cursor.execute(query)
except Exception:
return None
return cursor.fetchall()
def query_to_dicts(query_string, *query_args):
"""
Run a simple query and produce a generator that returns the results as
a bunch of dictionaries with keys for the column values selected.
"""
cursor = connection.cursor()
cursor.execute(query_string, query_args)
col_names = [desc[0] for desc in cursor.description]
while True:
row = cursor.fetchone()
if row is None:
break
row_dict = OrderedDict(izip(col_names, row))
yield row_dict
return
def update_row(pk, set_fields, set_values, where_fields, where_values):
"""
Given a primary key, pairs (set_field, set_value), and pairs (where_field,
where_value), it updates the row in the table selected with the
list of (where field = where value) with the values in the assignments in
the list of (set_fields, set_values)
:param pk: Primary key to detect workflow
:param set_fields: List of field names to be updated
:param set_values: List of values to update the fields of the previous list
:param where_fields: List of fields used to filter the row in the table
:param where_values: List of values of the previous fields to filter the row
:return: The table in the workflow pointed by PK is modified.
"""
# First part of the query with the table name
query = 'UPDATE "{0}"'.format(create_table_name(pk))
# Add the SET field = value clauses
query += ' SET ' + ', '.join(['"{0}" = %s'.format(fix_pctg_in_name(x))
for x in set_fields])
# And finally add the WHERE clause
query += ' WHERE ' + ' AND '.join(['"{0}" = %s'.format(fix_pctg_in_name(x))
for x in where_fields])
# Concatenate the values as parameters to the query
parameters = set_values + where_values
# Execute the query
cursor = connection.cursor()
cursor.execute(query, parameters)
connection.commit()
def increase_row_integer(pk, set_field, where_field, where_value):
"""
Given a primary key, a field set_field, and a pair (where_field,
where_value), it increases the field in the appropriate row
:param pk: Primary key to detect workflow
:param set_field: name of the field to be increased
:param where_field: Field used to filter the row in the table
:param where_value: Value of the previous field to filter the row
:return: The table in the workflow pointed by PK is modified.
"""
# First part of the query with the table name
query = 'UPDATE "{0}" SET "{1}" = "{1}" + 1 WHERE "{2}" = %s'.format(
create_table_name(pk),
set_field,
where_field
)
# Execute the query
cursor = connection.cursor()
cursor.execute(query, [where_value])
connection.commit()
def get_table_row_by_key(workflow, cond_filter, kv_pair, column_names=None):
"""
Select the set of elements after filtering and with the key=value pair
:param workflow: workflow object to get to the table
:param cond_filter: Condition object to filter the data (or None)
:param kv_pair: A key=value pair to identify the row. Key is suppose to
be unique.
:param column_names: Optional list of column names to select
:return: A dictionary with the (column_name, value) data or None if the
row has not been found
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}"'.format('", "'.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(create_table_name(workflow.id))
# Create the second part of the query setting key=value
query += ' WHERE ("{0}" = %s)'.format(fix_pctg_in_name(kv_pair[0]))
fields = [kv_pair[1]]
# See if the action has a filter or not
if cond_filter is not None:
cond_filter, filter_fields = \
evaluate_node_sql(cond_filter.formula)
query += ' AND (' + cond_filter + ')'
fields = fields + filter_fields
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
# Get the data
qs = cursor.fetchall()
# If there is anything different than one element, return None
if len(qs) != 1:
return None
# Get the only element
qs = qs[0]
# ZIP the values to create a dictionary
return OrderedDict(zip(workflow.get_column_names(), qs))
def get_column_stats_from_df(df_column):
"""
Given a data frame with a single column, return a set of statistics
depending on its type.
:param df_column: data frame with a single column
:return: A dictionary with keys depending on the type of column
{'min': minimum value (integer, double an datetime),
'q1': Q1 value (0.25) (integer, double),
'mean': mean value (integer, double),
'median': median value (integer, double),
'mean': mean value (integer, double),
'q3': Q3 value (0.75) (integer, double),
'max': maximum value (integer, double an datetime),
'std': standard deviation (integer, double),
'counts': (integer, double, string, datetime, Boolean',
'mode': (integer, double, string, datetime, Boolean,
or None if the column has all its values to NaN
"""
if len(df_column.loc[df_column.notnull()]) == 0:
# The column has no data
return None
# Dictionary to return
result = {
'min': 0,
'q1': 0,
'mean': 0,
'median': 0,
'q3': 0,
'max': 0,
'std': 0,
'mode': None,
'counts': {},
}
data_type = pandas_datatype_names[df_column.dtype.name]
if data_type == 'integer' or data_type == 'double':
quantiles = df_column.quantile([0, .25, .5, .75, 1])
result['min'] = '{0:g}'.format(quantiles[0])
result['q1'] = '{0:g}'.format(quantiles[.25])
result['mean'] = '{0:g}'.format(df_column.mean())
result['median'] = '{0:g}'.format(quantiles[.5])
result['q3'] = '{0:g}'.format(quantiles[.75])
result['max'] = '{0:g}'.format(quantiles[1])
result['std'] = '{0:g}'.format(df_column.std())
result['counts'] = df_column.value_counts().to_dict()
mode = df_column.mode()
if len(mode) == 0:
mode = '--'
result['mode'] = mode[0]
return result
def get_filter_query(table_name, column_names, filter_exp):
"""
Given a set of columns and a filter expression, return a pair of SQL query
and params to be executed
:param table_name: Table to query
:param column_names: list of columns to consider or None to consider all
:param filter_exp: Text filter expression
:return: (sql query, sql params)
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}"'.format('", "'.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(table_name)
# Calculate the first suffix to add to the query
filter_txt = ''
filter_fields = []
if filter_exp:
filter_txt, filter_fields = evaluate_node_sql(filter_exp)
# Build the query so far appending the filter and/or the cv_tuples
if filter_txt:
query += ' WHERE '
fields = []
# If there has been a suffix from the filter, add it.
if filter_txt:
query += filter_txt
if filter_fields:
fields.extend(filter_fields)
return (query, fields)
def search_table_rows(workflow_id,
cv_tuples=None,
any_join=True,
order_col_name=None,
order_asc=True,
column_names=None,
pre_filter=None):
"""
Select rows where for every (column, value) pair, column contains value (
as in LIKE %value%, these are combined with OR if any is TRUE, or AND if
any is false, and the result is ordered by the given column and type (if
given)
:param workflow_id: workflow object to get to the table
:param cv_tuples: A column, value, type tuple to search the value in the
column
:param any_join: Boolean encoding if values should be combined with OR (or
AND)
:param order_col_name: Order results by this column
:param order_asc: Order results in ascending values (or descending)
:param column_names: Optional list of column names to select
:param pre_filter: Optional filter condition to pre filter the query set.
the query is built with these terms as requirement AND the cv_tuples.
:return: The resulting query set
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}"'.format('", "'.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(create_table_name(workflow_id))
# Calculate the first suffix to add to the query
filter_txt = ''
filter_fields = []
if pre_filter:
filter_txt, filter_fields = evaluate_node_sql(pre_filter)
if cv_tuples:
likes = []
tuple_fields = []
for name, value, data_type in cv_tuples:
# Make sure we escape the name and search as text
name = fix_pctg_in_name(name)
mod_name = '(CAST("{0}" AS TEXT) LIKE %s)'.format(name)
# Create the second part of the query setting column LIKE '%value%'
likes.append(mod_name)
tuple_fields.append('%' + value + '%')
# Combine the search subqueries
if any_join:
tuple_txt = '(' + ' OR '.join(likes) + ')'
else:
tuple_txt = '(' + ' AND '.join(likes) + ')'
# Build the query so far appending the filter and/or the cv_tuples
if filter_txt or cv_tuples:
query += ' WHERE '
fields = []
# If there has been a suffix from the filter, add it.
if filter_txt:
query += filter_txt
fields.extend(filter_fields)
# If there is a pre-filter, the suffix needs to be "AND" with the ones
# just calculated
if filter_txt and cv_tuples:
query += ' AND '
if cv_tuples:
query += tuple_txt
fields.extend(tuple_fields)
# Add the order if needed
if order_col_name:
query += ' ORDER BY "{0}"'.format(fix_pctg_in_name(order_col_name))
if not order_asc:
query += ' DESC'
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
# Get the data
return cursor.fetchall()
def delete_table_row_by_key(workflow_id, kv_pair):
"""
Delete the row in the table attached to a workflow with the given key,
value pairs
:param workflow_id: workflow object to get to the table
:param kv_pair: A key=value pair to identify the row. Key is suppose to
be unique.
:return: Drops that row from the table in the DB
"""
# Create the query
query = 'DELETE FROM "{0}"'.format(create_table_name(workflow_id))
# Create the second part of the query setting key=value
query += ' WHERE ("{0}" = %s)'.format(fix_pctg_in_name(kv_pair[0]))
fields = [kv_pair[1]]
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
def num_rows(pk, cond_filter=None):
"""
Obtain the number of rows of the table storing workflow with given pk
:param pk: Primary key of the table storing the data frame
:param cond_filter: Condition element to filter the query
:return:
"""
return num_rows_by_name(create_table_name(pk), cond_filter)
def num_rows_by_name(table_name, cond_filter=None):
"""
Given a table name, get its number of rows
:param table_name: Table name
:param cond_filter: Condition element used to filter the query
:return: integer
"""
# Initial query with the table name
query = query_count_rows.format(table_name)
fields = []
if cond_filter is not None:
cond_filter, fields = evaluate_node_sql(cond_filter)
query += ' WHERE ' + cond_filter
cursor = connection.cursor()
cursor.execute(query, fields)
return cursor.fetchone()[0]
def check_wf_df(workflow):
"""
Check the consistency between the information stored in the workflow
and the structure of the underlying dataframe
:param workflow: Workflow object
:return: Boolean stating the result of the check. True: Correct.
"""
# Get the df
df = load_from_db(workflow.id)
# Set values in case there is no df
if df is not None:
dfnrows = df.shape[0]
dfncols = df.shape[1]
df_col_names = list(df.columns)
else:
dfnrows = 0
dfncols = 0
df_col_names = []
# Check 1: Number of rows and columns
if workflow.nrows != dfnrows:
return False
if workflow.ncols != dfncols:
return False
# Identical sets of columns
wf_cols = workflow.columns.all()
if [x.name for x in wf_cols] != df_col_names:
return False
# Identical data types
for n1, n2 in zip(wf_cols, df_col_names):
df_dt = pandas_datatype_names[df[n2].dtype.name]
if n1.data_type == 'boolean' and df_dt == 'string':
# This is the case of a column with Boolean and Nulls
continue
if n1.data_type != df_dt:
return False
return True
| [((777, 804), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (794, 804), False, 'import logging\n'), ((2261, 2321), 'sqlalchemy.create_engine', 'create_engine', (['database_url'], {'echo': '(False)', 'paramstyle': '"""format"""'}), "(database_url, echo=False, paramstyle='format')\n", (2274, 2321), False, 'from sqlalchemy import create_engine\n'), ((3896, 3993), 'subprocess.Popen', 'subprocess.Popen', (["['psql', '-d', settings.DATABASES['default']['NAME'], '-q', '-f', filename]"], {}), "(['psql', '-d', settings.DATABASES['default']['NAME'], '-q',\n '-f', filename])\n", (3912, 3993), False, 'import subprocess\n'), ((4291, 4310), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (4308, 4310), False, 'from django.db import connection\n'), ((4328, 4375), 'django.db.connection.introspection.get_table_list', 'connection.introspection.get_table_list', (['cursor'], {}), '(cursor)\n', (4367, 4375), False, 'from django.db import connection\n'), ((4587, 4606), 'django.db.connection.commit', 'connection.commit', ([], {}), '()\n', (4604, 4606), False, 'from django.db import connection\n'), ((4665, 4684), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (4682, 4684), False, 'from django.db import connection\n'), ((7528, 7560), 'pandas.read_sql_query', 'pd.read_sql_query', (['query', 'engine'], {}), '(query, engine)\n', (7545, 7560), True, 'import pandas as pd\n'), ((8387, 8511), 'pandas.read_csv', 'pd.read_csv', (['file'], {'index_col': '(False)', 'infer_datetime_format': '(True)', 'quotechar': '"""\\""""', 'skiprows': 'skiprows', 'skipfooter': 'skipfooter'}), '(file, index_col=False, infer_datetime_format=True, quotechar=\n \'"\', skiprows=skiprows, skipfooter=skipfooter)\n', (8398, 8511), True, 'import pandas as pd\n'), ((9988, 10034), 'pandas.read_sql', 'pd.read_sql', (['conn_item.db_table', 'db_connection'], {}), '(conn_item.db_table, db_connection)\n', (9999, 10034), True, 'import pandas as pd\n'), ((11361, 11380), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (11378, 11380), False, 'from django.db import connection\n'), ((11461, 11480), 'django.db.connection.commit', 'connection.commit', ([], {}), '()\n', (11478, 11480), False, 'from django.db import connection\n'), ((11637, 11656), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (11654, 11656), False, 'from django.db import connection\n'), ((12794, 12813), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (12811, 12813), False, 'from django.db import connection\n'), ((14640, 14659), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (14657, 14659), False, 'from django.db import connection\n'), ((15785, 15804), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (15802, 15804), False, 'from django.db import connection\n'), ((16588, 16607), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (16605, 16607), False, 'from django.db import connection\n'), ((18249, 18268), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (18266, 18268), False, 'from django.db import connection\n'), ((18311, 18330), 'django.db.connection.commit', 'connection.commit', ([], {}), '()\n', (18328, 18330), False, 'from django.db import connection\n'), ((19093, 19112), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (19110, 19112), False, 'from django.db import connection\n'), ((19158, 19177), 'django.db.connection.commit', 'connection.commit', ([], {}), '()\n', (19175, 19177), False, 'from django.db import connection\n'), ((20504, 20523), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (20521, 20523), False, 'from django.db import connection\n'), ((27019, 27038), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (27036, 27038), False, 'from django.db import connection\n'), ((27786, 27805), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (27803, 27805), False, 'from django.db import connection\n'), ((28663, 28682), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (28680, 28682), False, 'from django.db import connection\n'), ((6665, 6703), 'django.db.connection.introspection.table_names', 'connection.introspection.table_names', ([], {}), '()\n', (6701, 6703), False, 'from django.db import connection\n'), ((6968, 7015), 'pandas.read_sql_query', 'pd.read_sql_query', (['query', 'engine'], {'params': 'params'}), '(query, engine, params=params)\n', (6985, 7015), True, 'import pandas as pd\n'), ((7098, 7129), 'pandas.read_sql', 'pd.read_sql', (['table_name', 'engine'], {}), '(table_name, engine)\n', (7109, 7129), True, 'import pandas as pd\n'), ((10404, 10426), 'django.core.cache.cache.lock', 'cache.lock', (['table_name'], {}), '(table_name)\n', (10414, 10426), False, 'from django.core.cache import cache\n'), ((10882, 10901), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (10899, 10901), False, 'from django.db import connection\n'), ((10984, 11003), 'django.db.connection.commit', 'connection.commit', ([], {}), '()\n', (11001, 11003), False, 'from django.db import connection\n'), ((14418, 14456), 'dataops.formula_evaluation.evaluate_node_sql', 'evaluate_node_sql', (['cond_filter.formula'], {}), '(cond_filter.formula)\n', (14435, 14456), False, 'from dataops.formula_evaluation import evaluate_node_sql\n'), ((16243, 16262), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (16260, 16262), False, 'from django.db import connection\n'), ((20157, 20185), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['kv_pair[0]'], {}), '(kv_pair[0])\n', (20173, 20185), False, 'from ontask import fix_pctg_in_name\n'), ((20341, 20379), 'dataops.formula_evaluation.evaluate_node_sql', 'evaluate_node_sql', (['cond_filter.formula'], {}), '(cond_filter.formula)\n', (20358, 20379), False, 'from dataops.formula_evaluation import evaluate_node_sql\n'), ((23627, 23656), 'dataops.formula_evaluation.evaluate_node_sql', 'evaluate_node_sql', (['filter_exp'], {}), '(filter_exp)\n', (23644, 23656), False, 'from dataops.formula_evaluation import evaluate_node_sql\n'), ((25607, 25636), 'dataops.formula_evaluation.evaluate_node_sql', 'evaluate_node_sql', (['pre_filter'], {}), '(pre_filter)\n', (25624, 25636), False, 'from dataops.formula_evaluation import evaluate_node_sql\n'), ((27692, 27720), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['kv_pair[0]'], {}), '(kv_pair[0])\n', (27708, 27720), False, 'from ontask import fix_pctg_in_name\n'), ((28577, 28607), 'dataops.formula_evaluation.evaluate_node_sql', 'evaluate_node_sql', (['cond_filter'], {}), '(cond_filter)\n', (28594, 28607), False, 'from dataops.formula_evaluation import evaluate_node_sql\n'), ((14036, 14055), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['x'], {}), '(x)\n', (14052, 14055), False, 'from ontask import fix_pctg_in_name\n'), ((16831, 16851), 'itertools.izip', 'izip', (['col_names', 'row'], {}), '(col_names, row)\n', (16835, 16851), False, 'from itertools import izip\n'), ((19817, 19836), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['x'], {}), '(x)\n', (19833, 19836), False, 'from ontask import fix_pctg_in_name\n'), ((23258, 23277), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['x'], {}), '(x)\n', (23274, 23277), False, 'from ontask import fix_pctg_in_name\n'), ((25218, 25237), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['x'], {}), '(x)\n', (25234, 25237), False, 'from ontask import fix_pctg_in_name\n'), ((25831, 25853), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['name'], {}), '(name)\n', (25847, 25853), False, 'from ontask import fix_pctg_in_name\n'), ((26900, 26932), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['order_col_name'], {}), '(order_col_name)\n', (26916, 26932), False, 'from ontask import fix_pctg_in_name\n'), ((4725, 4772), 'django.db.connection.introspection.get_table_list', 'connection.introspection.get_table_list', (['cursor'], {}), '(cursor)\n', (4764, 4772), False, 'from django.db import connection\n'), ((9001, 9058), 'pandas.to_datetime', 'pd.to_datetime', (['data_frame[x]'], {'infer_datetime_format': '(True)'}), '(data_frame[x], infer_datetime_format=True)\n', (9015, 9058), True, 'import pandas as pd\n'), ((15469, 15488), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['x'], {}), '(x)\n', (15485, 15488), False, 'from ontask import fix_pctg_in_name\n'), ((17852, 17871), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['x'], {}), '(x)\n', (17868, 17871), False, 'from ontask import fix_pctg_in_name\n'), ((18027, 18046), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['x'], {}), '(x)\n', (18043, 18046), False, 'from ontask import fix_pctg_in_name\n'), ((15902, 15921), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['x'], {}), '(x)\n', (15918, 15921), False, 'from ontask import fix_pctg_in_name\n')] |
rbsdev/config-client | config/cf.py | 761f39cd8839daba10bf21b98ccdd44d33eaebe8 | from typing import Any, Dict, KeysView
import attr
from config.auth import OAuth2
from config.cfenv import CFenv
from config.spring import ConfigClient
@attr.s(slots=True)
class CF:
cfenv = attr.ib(
type=CFenv, factory=CFenv, validator=attr.validators.instance_of(CFenv),
)
oauth2 = attr.ib(type=OAuth2, default=None)
client = attr.ib(type=ConfigClient, default=None)
def __attrs_post_init__(self) -> None:
if not self.oauth2:
self.oauth2 = OAuth2(
access_token_uri=self.cfenv.configserver_access_token_uri(),
client_id=self.cfenv.configserver_client_id(),
client_secret=self.cfenv.configserver_client_secret(),
)
if not self.client:
self.client = ConfigClient(
address=self.cfenv.configserver_uri(),
app_name=self.cfenv.application_name,
profile=self.cfenv.space_name.lower(),
)
self.oauth2.configure()
@property
def vcap_services(self):
return self.cfenv.vcap_services
@property
def vcap_application(self):
return self.cfenv.vcap_application
def get_config(self) -> None:
header = {"Authorization": f"Bearer {self.oauth2.token}"}
self.client.get_config(headers=header)
@property
def config(self) -> Dict:
return self.client.config
def get_attribute(self, value: str) -> Any:
return self.client.get_attribute(value)
def get_keys(self) -> KeysView:
return self.client.get_keys()
| [((157, 175), 'attr.s', 'attr.s', ([], {'slots': '(True)'}), '(slots=True)\n', (163, 175), False, 'import attr\n'), ((307, 341), 'attr.ib', 'attr.ib', ([], {'type': 'OAuth2', 'default': 'None'}), '(type=OAuth2, default=None)\n', (314, 341), False, 'import attr\n'), ((355, 395), 'attr.ib', 'attr.ib', ([], {'type': 'ConfigClient', 'default': 'None'}), '(type=ConfigClient, default=None)\n', (362, 395), False, 'import attr\n'), ((252, 286), 'attr.validators.instance_of', 'attr.validators.instance_of', (['CFenv'], {}), '(CFenv)\n', (279, 286), False, 'import attr\n')] |
rancp/ducktape-docs | ducktape/template.py | e1a3b1b7e68beedf5f8d29a4e5f196912a20e264 | # Copyright 2015 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.utils.util import package_is_installed
from jinja2 import Template, FileSystemLoader, PackageLoader, ChoiceLoader, Environment
import os.path
import inspect
class TemplateRenderer(object):
def render_template(self, template, **kwargs):
"""
Render a template using the context of the current object, optionally with overrides.
:param template: the template to render, a Template or a str
:param kwargs: optional override parameters
:return: the rendered template
"""
if not hasattr(template, 'render'): template = Template(template)
ctx = dict(self.__class__.__dict__)
ctx.update(self.__dict__)
return template.render(ctx, **kwargs)
@staticmethod
def _package_search_path(module_name):
"""
:param module_name: Name of a module
:return: (package, package_search_path) where package is the package containing the module,
and package_search_path is a path relative to the package in which to search for templates.
"""
module_parts = module_name.split(".")
package = module_parts[0]
# Construct path relative to package under which "templates" would be found
directory = ""
for d in module_parts[1: -1]:
directory = os.path.join(directory, d)
return package, os.path.join(directory, "templates")
def render(self, path, **kwargs):
"""
Render a template loaded from a file.
template files referenced in file f should be in a sibling directory of f called "templates".
:param path: path, relative to the search paths, to the template file
:param kwargs: optional override parameters
:return: the rendered template
"""
if not hasattr(self, 'template_loader'):
class_dir = os.path.dirname(inspect.getfile(self.__class__))
module_name = self.__class__.__module__
package, package_search_path = self._package_search_path(module_name)
loaders = []
msg = ""
if os.path.isdir(class_dir):
# FileSystemLoader overrides PackageLoader if the path containing this directory
# is a valid directory. FileSystemLoader throws an error from which ChoiceLoader
# doesn't recover if the directory is invalid
loaders.append(FileSystemLoader(os.path.join(class_dir, 'templates')))
else:
msg += "Will not search in %s for template files since it is not a valid directory. " % class_dir
if package_is_installed(package):
loaders.append(PackageLoader(package, package_search_path))
else:
msg += "Will not search in package %s for template files because it cannot be imported."
if len(loaders) == 0:
# Expect at least one of FileSystemLoader and PackageLoader to be present
raise EnvironmentError(msg)
self.template_loader = ChoiceLoader(loaders)
self.template_env = Environment(loader=self.template_loader, trim_blocks=True, lstrip_blocks=True)
template = self.template_env.get_template(path)
return self.render_template(template, **kwargs)
| [((1167, 1185), 'jinja2.Template', 'Template', (['template'], {}), '(template)\n', (1175, 1185), False, 'from jinja2 import Template, FileSystemLoader, PackageLoader, ChoiceLoader, Environment\n'), ((3200, 3229), 'ducktape.utils.util.package_is_installed', 'package_is_installed', (['package'], {}), '(package)\n', (3220, 3229), False, 'from ducktape.utils.util import package_is_installed\n'), ((3635, 3656), 'jinja2.ChoiceLoader', 'ChoiceLoader', (['loaders'], {}), '(loaders)\n', (3647, 3656), False, 'from jinja2 import Template, FileSystemLoader, PackageLoader, ChoiceLoader, Environment\n'), ((3689, 3767), 'jinja2.Environment', 'Environment', ([], {'loader': 'self.template_loader', 'trim_blocks': '(True)', 'lstrip_blocks': '(True)'}), '(loader=self.template_loader, trim_blocks=True, lstrip_blocks=True)\n', (3700, 3767), False, 'from jinja2 import Template, FileSystemLoader, PackageLoader, ChoiceLoader, Environment\n'), ((2453, 2484), 'inspect.getfile', 'inspect.getfile', (['self.__class__'], {}), '(self.__class__)\n', (2468, 2484), False, 'import inspect\n'), ((3262, 3305), 'jinja2.PackageLoader', 'PackageLoader', (['package', 'package_search_path'], {}), '(package, package_search_path)\n', (3275, 3305), False, 'from jinja2 import Template, FileSystemLoader, PackageLoader, ChoiceLoader, Environment\n')] |
AkshayManchanda/Python_Training | day4/homework/q7.py | 5a50472d118ac6d40145bf1dd60f26864bf9fb6c | i=input("Enter a string: ")
list = i.split()
list.sort()
for i in list:
print(i,end=' ')
| [] |
staticdev/github-portfolio | src/git_portfolio/use_cases/config_repos.py | 850461eed8160e046ee16664ac3dbc19e3ec0965 | """Config repositories use case."""
from __future__ import annotations
import git_portfolio.config_manager as cm
import git_portfolio.domain.gh_connection_settings as cs
import git_portfolio.responses as res
class ConfigReposUseCase:
"""Gitp config repositories use case."""
def __init__(self, config_manager: cm.ConfigManager) -> None:
"""Initializer."""
self.config_manager = config_manager
def execute(
self, github_config: cs.GhConnectionSettings, selected_repos: list[str]
) -> res.Response:
"""Configuration of git repositories."""
self.config_manager.config.github_access_token = github_config.access_token
self.config_manager.config.github_hostname = github_config.hostname
self.config_manager.config.github_selected_repos = selected_repos
self.config_manager.save_config()
return res.ResponseSuccess("gitp repositories successfully configured.")
| [((882, 947), 'git_portfolio.responses.ResponseSuccess', 'res.ResponseSuccess', (['"""gitp repositories successfully configured."""'], {}), "('gitp repositories successfully configured.')\n", (901, 947), True, 'import git_portfolio.responses as res\n')] |
mateuszkowalke/sudoku_game | test/test_logic.py | 800e33a6fe755b493d8e9c3c9a20204af5865148 | import pytest
from ..logic import Board, empty_board, example_board, solved_board
class TestBoard:
def test_create_board(self):
board = Board(example_board)
assert board.tiles == example_board
def test_solve_board(self):
board = Board(example_board)
board.solve()
assert board.tiles == solved_board
def test_check_if_possible(self):
board = Board(example_board)
assert board.check_if_possible(0, 0, 4) == False
assert board.check_if_possible(0, 0, 9) == True
def test_check_solution(self):
board = Board(solved_board)
assert board.check_solution()
def test_new_board(self):
board = Board(empty_board)
board.new_board(example_board)
assert board.tiles == example_board
def test_lock_tiles(self):
board = Board(example_board)
board.lock_tiles()
assert board.check_if_tile_locked(0, 1)
| [] |
jf---/compas | src/compas_rhino/objects/_select.py | cd878ece933013b8ac34e9d42cf6d5c62a5396ee | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import ast
import rhinoscriptsyntax as rs
__all__ = [
'mesh_select_vertex',
'mesh_select_vertices',
'mesh_select_face',
'mesh_select_faces',
'mesh_select_edge',
'mesh_select_edges',
'network_select_node',
'network_select_nodes',
'network_select_edge',
'network_select_edges',
]
def mesh_select_vertex(mesh, message="Select a vertex."):
"""Select a single vertex of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
int or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'vertex' in name:
if not prefix or prefix in name:
key = name[-1]
return ast.literal_eval(key)
return None
def mesh_select_vertices(mesh, message="Select vertices."):
"""Select multiple vertices of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'vertex' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def mesh_select_face(mesh, message="Select a face."):
"""Select a single face of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
int or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.mesh | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'face' in name:
if not prefix or prefix in name:
key = name[-1]
key = ast.literal_eval(key)
return key
return None
def mesh_select_faces(mesh, message="Select faces."):
"""Select multiple faces of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.mesh | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'face' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def mesh_select_edge(mesh, message="Select an edge."):
"""Select a single edge of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
tuple of int, or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
return u, v
return None
def mesh_select_edges(mesh, message="Select edges."):
"""Select multiple edges of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of tuple of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
keys.append((u, v))
return keys
def network_select_node(network, message="Select a node."):
"""Select a single node of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
hashable or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guid:
prefix = network.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'node' in name:
if not prefix or prefix in name:
key = name[-1]
return ast.literal_eval(key)
return None
def network_select_nodes(network, message="Select nodes."):
"""Select multiple nodes of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
list of hashable
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guids:
prefix = network.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'node' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def network_select_edge(network, message="Select an edge."):
"""Select a single edge of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
tuple of hashable, or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guid:
prefix = network.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
return u, v
return None
def network_select_edges(network, message="Select edges."):
"""Select multiple edges of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
list of tuple of hashable
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guids:
prefix = network.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
keys.append((u, v))
return keys
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass
| [((695, 781), 'rhinoscriptsyntax.GetObject', 'rs.GetObject', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.point | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.point | rs.filter.\n textdot)\n', (707, 781), True, 'import rhinoscriptsyntax as rs\n'), ((1327, 1414), 'rhinoscriptsyntax.GetObjects', 'rs.GetObjects', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.point | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.point | rs.filter.\n textdot)\n', (1340, 1414), True, 'import rhinoscriptsyntax as rs\n'), ((2090, 2175), 'rhinoscriptsyntax.GetObject', 'rs.GetObject', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.mesh | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.mesh | rs.filter.textdot\n )\n', (2102, 2175), True, 'import rhinoscriptsyntax as rs\n'), ((2736, 2822), 'rhinoscriptsyntax.GetObjects', 'rs.GetObjects', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.mesh | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.mesh | rs.filter.\n textdot)\n', (2749, 2822), True, 'import rhinoscriptsyntax as rs\n'), ((3507, 3593), 'rhinoscriptsyntax.GetObject', 'rs.GetObject', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.curve | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.curve | rs.filter.\n textdot)\n', (3519, 3593), True, 'import rhinoscriptsyntax as rs\n'), ((4238, 4325), 'rhinoscriptsyntax.GetObjects', 'rs.GetObjects', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.curve | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.curve | rs.filter.\n textdot)\n', (4251, 4325), True, 'import rhinoscriptsyntax as rs\n'), ((5112, 5198), 'rhinoscriptsyntax.GetObject', 'rs.GetObject', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.point | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.point | rs.filter.\n textdot)\n', (5124, 5198), True, 'import rhinoscriptsyntax as rs\n'), ((5756, 5843), 'rhinoscriptsyntax.GetObjects', 'rs.GetObjects', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.point | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.point | rs.filter.\n textdot)\n', (5769, 5843), True, 'import rhinoscriptsyntax as rs\n'), ((6551, 6637), 'rhinoscriptsyntax.GetObject', 'rs.GetObject', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.curve | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.curve | rs.filter.\n textdot)\n', (6563, 6637), True, 'import rhinoscriptsyntax as rs\n'), ((7305, 7392), 'rhinoscriptsyntax.GetObjects', 'rs.GetObjects', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.curve | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.curve | rs.filter.\n textdot)\n', (7318, 7392), True, 'import rhinoscriptsyntax as rs\n'), ((846, 865), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (859, 865), True, 'import rhinoscriptsyntax as rs\n'), ((1005, 1026), 'ast.literal_eval', 'ast.literal_eval', (['key'], {}), '(key)\n', (1021, 1026), False, 'import ast\n'), ((2240, 2259), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (2253, 2259), True, 'import rhinoscriptsyntax as rs\n'), ((2396, 2417), 'ast.literal_eval', 'ast.literal_eval', (['key'], {}), '(key)\n', (2412, 2417), False, 'import ast\n'), ((3658, 3677), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (3671, 3677), True, 'import rhinoscriptsyntax as rs\n'), ((3850, 3869), 'ast.literal_eval', 'ast.literal_eval', (['u'], {}), '(u)\n', (3866, 3869), False, 'import ast\n'), ((3890, 3909), 'ast.literal_eval', 'ast.literal_eval', (['v'], {}), '(v)\n', (3906, 3909), False, 'import ast\n'), ((5266, 5285), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (5279, 5285), True, 'import rhinoscriptsyntax as rs\n'), ((5423, 5444), 'ast.literal_eval', 'ast.literal_eval', (['key'], {}), '(key)\n', (5439, 5444), False, 'import ast\n'), ((6705, 6724), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (6718, 6724), True, 'import rhinoscriptsyntax as rs\n'), ((6897, 6916), 'ast.literal_eval', 'ast.literal_eval', (['u'], {}), '(u)\n', (6913, 6916), False, 'import ast\n'), ((6937, 6956), 'ast.literal_eval', 'ast.literal_eval', (['v'], {}), '(v)\n', (6953, 6956), False, 'import ast\n'), ((1532, 1551), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (1545, 1551), True, 'import rhinoscriptsyntax as rs\n'), ((2940, 2959), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (2953, 2959), True, 'import rhinoscriptsyntax as rs\n'), ((4443, 4462), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (4456, 4462), True, 'import rhinoscriptsyntax as rs\n'), ((5964, 5983), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (5977, 5983), True, 'import rhinoscriptsyntax as rs\n'), ((7513, 7532), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (7526, 7532), True, 'import rhinoscriptsyntax as rs\n'), ((1752, 1773), 'ast.literal_eval', 'ast.literal_eval', (['key'], {}), '(key)\n', (1768, 1773), False, 'import ast\n'), ((3158, 3179), 'ast.literal_eval', 'ast.literal_eval', (['key'], {}), '(key)\n', (3174, 3179), False, 'import ast\n'), ((4705, 4724), 'ast.literal_eval', 'ast.literal_eval', (['u'], {}), '(u)\n', (4721, 4724), False, 'import ast\n'), ((4753, 4772), 'ast.literal_eval', 'ast.literal_eval', (['v'], {}), '(v)\n', (4769, 4772), False, 'import ast\n'), ((6182, 6203), 'ast.literal_eval', 'ast.literal_eval', (['key'], {}), '(key)\n', (6198, 6203), False, 'import ast\n'), ((7775, 7794), 'ast.literal_eval', 'ast.literal_eval', (['u'], {}), '(u)\n', (7791, 7794), False, 'import ast\n'), ((7823, 7842), 'ast.literal_eval', 'ast.literal_eval', (['v'], {}), '(v)\n', (7839, 7842), False, 'import ast\n')] |
MuchkoM/CalorieMatchBot | handlers/product_add.py | ca26a1f6195079e10dd798ca9e77968438f2aa01 | from telegram import Update
from telegram.ext import Updater, CallbackContext, ConversationHandler, CommandHandler, MessageHandler, Filters
from db import DBConnector
import re
str_matcher = r"\"(?P<name>.+)\"\s*(?P<fat>\d+)\s*/\s*(?P<protein>\d+)\s*/\s*(?P<carbohydrates>\d+)\s*(?P<kcal>\d+)"
ADD_1 = 0
def add_0(update: Update, _: CallbackContext):
update.message.reply_text('Enter new product in format\n'
'"name" fat/protein/carbohydrates kcal')
return ADD_1
def add_1(update: Update, context: CallbackContext):
db_connect: DBConnector = context.bot_data['db_connect']
result = re.match(str_matcher, update.message.text)
if result:
db_connect.products.insert(result.groupdict())
update.message.reply_text('Product was added')
else:
update.message.reply_text('Message have wrong format')
return ConversationHandler.END
def add_handler(updater: Updater):
"""/product_add - Add product to list known products"""
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('product_add', add_0)],
states={
ADD_1: [MessageHandler(Filters.text & ~Filters.command, add_1)]
},
fallbacks=[]
))
| [((636, 678), 're.match', 're.match', (['str_matcher', 'update.message.text'], {}), '(str_matcher, update.message.text)\n', (644, 678), False, 'import re\n'), ((1087, 1123), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""product_add"""', 'add_0'], {}), "('product_add', add_0)\n", (1101, 1123), False, 'from telegram.ext import Updater, CallbackContext, ConversationHandler, CommandHandler, MessageHandler, Filters\n'), ((1163, 1217), 'telegram.ext.MessageHandler', 'MessageHandler', (['(Filters.text & ~Filters.command)', 'add_1'], {}), '(Filters.text & ~Filters.command, add_1)\n', (1177, 1217), False, 'from telegram.ext import Updater, CallbackContext, ConversationHandler, CommandHandler, MessageHandler, Filters\n')] |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/nolearn-0.5/build/lib.linux-x86_64-2.7/nolearn/tests/test_dataset.py | ee45bee6f96cdb6d91184abc16f41bba1546c943 | from mock import patch
import numpy as np
def test_dataset_simple():
from ..dataset import Dataset
data = object()
target = object()
dataset = Dataset(data, target)
assert dataset.data is data
assert dataset.target is target
@patch('nolearn.dataset.np.load')
def test_dataset_with_filenames(load):
from ..dataset import Dataset
data = 'datafile'
target = 'targetfile'
dataset = Dataset(data, target)
assert load.call_count == 2
assert dataset.target is load.return_value
def test_dataset_train_test_split():
from ..dataset import Dataset
data = np.arange(100)
target = np.array([0] * 50 + [1] * 50)
dataset = Dataset(data, target)
assert dataset.split_indices.classes.tolist() == [0, 1]
assert dataset.split_indices.n_train == 75
assert dataset.split_indices.n_test == 25
X_train, X_test, y_train, y_test = dataset.train_test_split()
assert len(X_train) == len(y_train)
assert len(X_test) == len(y_test)
def test_dataset_scale():
from ..dataset import Dataset
data = np.arange(100).astype('float')
target = np.array([0] * 100)
dataset = Dataset(data, target)
dataset.scale()
assert dataset.data[0] == -1.7148160424389376
assert dataset.data[-1] == 1.7148160424389376
| [((255, 287), 'mock.patch', 'patch', (['"""nolearn.dataset.np.load"""'], {}), "('nolearn.dataset.np.load')\n", (260, 287), False, 'from mock import patch\n'), ((610, 624), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (619, 624), True, 'import numpy as np\n'), ((638, 667), 'numpy.array', 'np.array', (['([0] * 50 + [1] * 50)'], {}), '([0] * 50 + [1] * 50)\n', (646, 667), True, 'import numpy as np\n'), ((1121, 1140), 'numpy.array', 'np.array', (['([0] * 100)'], {}), '([0] * 100)\n', (1129, 1140), True, 'import numpy as np\n'), ((1077, 1091), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (1086, 1091), True, 'import numpy as np\n')] |
EpicTofuu/Assignment | src/Cipher/MultiLevelCaesarDecrypt.py | 293f99d20e8fa7d688c16a56c48a554bcd3c9e7d | import Cipher.tk
from Cipher.tk import EncryptDecryptCoord, GetChiSquared, Mode
def MultiDecrypt (message, alphabet, usables = 3, lan = "English", transformations = [], lowestchi = 9999, ogMessage = ""):
msg = ""
prev = (9999, (0, 0)) # (chi, key)
for i in range (len(message)):
for k in range (1, len (alphabet)):
msg = EncryptDecryptCoord(message, (i,k), alphabet, Mode.DECRYPT)
chi = GetChiSquared (msg, lan)
if (round (chi, 3) < round (prev[0], 3)):
prev = (chi, (i,k))
# base case
if (prev[0] >= lowestchi):
v = ogMessage
for tr in transformations:
v = EncryptDecryptCoord (v, tr, alphabet, Mode.DECRYPT)
return (v, lowestchi, transformations)
if (len(transformations) == 0): # only set lowest chi on the first run
lowestchi = prev[0]
ogMessage = message
transformations.append (prev[1])
return MultiDecrypt (EncryptDecryptCoord (message, prev[1], alphabet, Mode.DECRYPT), alphabet, usables, lan, transformations, prev[0], ogMessage)
'''
# testing do write it here
a = " abcdefghijklmnopqrstuvwxyz"
p=[]
for c in a:
p.append (c)
print ("starting...")
print (MultiDecrypt ("dtyktckcxlbd", p))
# original 231
''' | [((1013, 1074), 'Cipher.tk.EncryptDecryptCoord', 'EncryptDecryptCoord', (['message', 'prev[1]', 'alphabet', 'Mode.DECRYPT'], {}), '(message, prev[1], alphabet, Mode.DECRYPT)\n', (1032, 1074), False, 'from Cipher.tk import EncryptDecryptCoord, GetChiSquared, Mode\n'), ((378, 438), 'Cipher.tk.EncryptDecryptCoord', 'EncryptDecryptCoord', (['message', '(i, k)', 'alphabet', 'Mode.DECRYPT'], {}), '(message, (i, k), alphabet, Mode.DECRYPT)\n', (397, 438), False, 'from Cipher.tk import EncryptDecryptCoord, GetChiSquared, Mode\n'), ((457, 480), 'Cipher.tk.GetChiSquared', 'GetChiSquared', (['msg', 'lan'], {}), '(msg, lan)\n', (470, 480), False, 'from Cipher.tk import EncryptDecryptCoord, GetChiSquared, Mode\n'), ((703, 753), 'Cipher.tk.EncryptDecryptCoord', 'EncryptDecryptCoord', (['v', 'tr', 'alphabet', 'Mode.DECRYPT'], {}), '(v, tr, alphabet, Mode.DECRYPT)\n', (722, 753), False, 'from Cipher.tk import EncryptDecryptCoord, GetChiSquared, Mode\n')] |
bunop/cyvcf | scripts/vcf_filter.py | f58860dd06b215b9d9ae80e2b46337fb6ab59139 | #!/usr/bin/env python
import sys
import argparse
import pkg_resources
import vcf
from vcf.parser import _Filter
parser = argparse.ArgumentParser(description='Filter a VCF file',
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('input', metavar='input', type=str, nargs=1,
help='File to process (use - for STDIN)')
parser.add_argument('filters', metavar='filter', type=str, nargs='+',
help='Filters to use')
parser.add_argument('--no-short-circuit', action='store_true',
help='Do not stop filter processing on a site if a single filter fails.')
parser.add_argument('--output', action='store', default=sys.stdout,
help='Filename to output (default stdout)')
parser.add_argument('--no-filtered', action='store_true',
help='Remove failed sites')
if __name__ == '__main__':
# TODO: allow filter specification by short name
# TODO: flag that writes filter output into INFO column
# TODO: argument use implies filter use
# TODO: parallelize
# TODO: prevent plugins raising an exception from crashing the script
# dynamically build the list of available filters
filters = {}
filter_help = '\n\navailable filters:'
for p in pkg_resources.iter_entry_points('vcf.filters'):
filt = p.load()
filters[filt.name] = filt
filt.customize_parser(parser)
filter_help += '\n %s:\t%s' % (filt.name, filt.description)
parser.description += filter_help
# parse command line args
args = parser.parse_args()
inp = vcf.Reader(file(args.input[0]))
# build filter chain
chain = []
for name in args.filters:
f = filters[name](args)
chain.append(f)
inp.filters[f.filter_name()] = _Filter(f.filter_name(), f.description)
oup = vcf.Writer(args.output, inp)
# apply filters
short_circuit = not args.no_short_circuit
for record in inp:
for filt in chain:
result = filt(record)
if result:
record.add_filter(filt.filter_name())
if short_circuit:
break
if (not args.no_filtered) or (record.FILTER == '.'):
oup.write_record(record)
| [((123, 238), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Filter a VCF file"""', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), "(description='Filter a VCF file', formatter_class=\n argparse.RawDescriptionHelpFormatter)\n", (146, 238), False, 'import argparse\n'), ((1241, 1287), 'pkg_resources.iter_entry_points', 'pkg_resources.iter_entry_points', (['"""vcf.filters"""'], {}), "('vcf.filters')\n", (1272, 1287), False, 'import pkg_resources\n'), ((1815, 1843), 'vcf.Writer', 'vcf.Writer', (['args.output', 'inp'], {}), '(args.output, inp)\n', (1825, 1843), False, 'import vcf\n')] |
Muxelmann/home-projects | src/flocker/blueprints/red/__init__.py | 85bd06873174b9c5c6276160988c19b460370db8 | import os
from flask import Blueprint, render_template
def create_bp():
bp_red = Blueprint('red', __name__, url_prefix='/red')
@bp_red.route('/index/')
@bp_red.route('/')
def index():
return render_template('red/index.html')
return bp_red | [((86, 131), 'flask.Blueprint', 'Blueprint', (['"""red"""', '__name__'], {'url_prefix': '"""/red"""'}), "('red', __name__, url_prefix='/red')\n", (95, 131), False, 'from flask import Blueprint, render_template\n'), ((217, 250), 'flask.render_template', 'render_template', (['"""red/index.html"""'], {}), "('red/index.html')\n", (232, 250), False, 'from flask import Blueprint, render_template\n')] |
whoiscc/alphacoders | alphacoders/__init__.py | 685d1e7e02a7276ae0518114b0c6aab58914aab7 | #
from aiohttp.client_exceptions import ClientError
from lxml import html
from pathlib import Path
from asyncio import create_task
from functools import wraps
def start_immediately(task):
@wraps(task)
def wrapper(*args, **kwargs):
return create_task(task(*args, **kwargs))
return wrapper
@start_immediately
async def download_page(client, url):
count = 0
while True:
print(f"(retry = {count}) download url: {url}")
try:
async with client.get(url) as resp:
assert resp.status == 200
return await resp.text()
except ClientError:
pass
finally:
count += 1
@start_immediately
async def download_image(client, url, target_dir, name):
count = 0
while True:
print(f"(retry = {count}) download image: {url} -> {target_dir / name}")
try:
async with client.get(url) as resp:
content = await resp.read()
target_dir.mkdir(exist_ok=True)
(target_dir / name).write_bytes(content)
return
except ClientError:
pass
finally:
count += 1
def download_search(client, keyword, page):
safe_keyword = keyword.replace(" ", "+")
# url = f"https://mobile.alphacoders.com/by-resolution/5?search={safe_keyword}&page={page}"
url = f"https://wall.alphacoders.com/search.php?search={safe_keyword}&page={page}"
return download_page(client, url)
@start_immediately
async def query_image_id(client, keyword=None, page=None, document=None):
if document is None:
assert keyword is not None and page is not None
search = await download_search(client, keyword, page)
document = html.fromstring(search)
a_list = document.xpath('//div[@class="boxgrid"]/a')
href_list = [a.attrib["href"] for a in a_list]
return href_list
def query_page_count(document):
count_string = document.xpath('//ul[@class="pagination"]/li[last() - 1]/a/text()')[
0
]
return int(count_string)
@start_immediately
async def query_image_url(client, detail_path):
url = f"https://wall.alphacoders.com/{detail_path}"
detail = await download_page(client, url)
document = html.fromstring(detail)
image = document.xpath('//div[@class="center img-container-desktop"]/a')[0]
return image.attrib["href"]
@start_immediately
async def download_image_by_id(manager, client, image_id, target_dir):
image_url = await query_image_url(client, image_id)
name = image_url.split("/")[-1]
await download_image(client, image_url, target_dir, name)
manager.complete_count += 1
class SingleTask:
def __init__(self, keyword, limit=None):
self.keyword = keyword
self.limit = limit
self.complete_count = 0
self.triggered = False
async def run(self, client):
assert not self.triggered
self.triggered = True
first_search_doc = html.fromstring(
await download_search(client, self.keyword, 1)
)
page_count = query_page_count(first_search_doc)
download_image_task_list = []
image_count = 0
for page in range(1, page_count + 1):
if page == 1:
partial_list = await query_image_id(client, document=first_search_doc)
else:
partial_list = await query_image_id(
client, keyword=self.keyword, page=page
)
if self.limit is not None:
partial_list = partial_list[: self.limit - image_count]
image_count += len(partial_list)
for image_id in partial_list:
download_image_task_list.append(
download_image_by_id(self, client, image_id, Path(self.keyword))
)
if self.limit is not None and image_count == self.limit:
break
for task in download_image_task_list:
await task
@start_immediately
async def execute_single_task(manager, client):
return await manager.run(client)
| [((196, 207), 'functools.wraps', 'wraps', (['task'], {}), '(task)\n', (201, 207), False, 'from functools import wraps\n'), ((2268, 2291), 'lxml.html.fromstring', 'html.fromstring', (['detail'], {}), '(detail)\n', (2283, 2291), False, 'from lxml import html\n'), ((1762, 1785), 'lxml.html.fromstring', 'html.fromstring', (['search'], {}), '(search)\n', (1777, 1785), False, 'from lxml import html\n'), ((3819, 3837), 'pathlib.Path', 'Path', (['self.keyword'], {}), '(self.keyword)\n', (3823, 3837), False, 'from pathlib import Path\n')] |
PeriscopeData/analytics-toolbox | Python/Calculating_Trimmed_Means/calculating_trimmed_means1.py | 83effdee380c33e5eecea29528acf5375fd496fb | # SQL output is imported as a pandas dataframe variable called "df"
# Source: https://stackoverflow.com/questions/19441730/trimmed-mean-with-percentage-limit-in-python
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import tmean, scoreatpercentile
import numpy as np
def trimmean(arr, percent):
lower_limit = scoreatpercentile(arr, percent)
upper_limit = scoreatpercentile(arr, 100-percent)
return tmean(arr, limits=(lower_limit, upper_limit), inclusive=(False, False))
my_result = trimmean(df["amt_paid"].values,10) | [((337, 368), 'scipy.stats.scoreatpercentile', 'scoreatpercentile', (['arr', 'percent'], {}), '(arr, percent)\n', (354, 368), False, 'from scipy.stats import tmean, scoreatpercentile\n'), ((387, 424), 'scipy.stats.scoreatpercentile', 'scoreatpercentile', (['arr', '(100 - percent)'], {}), '(arr, 100 - percent)\n', (404, 424), False, 'from scipy.stats import tmean, scoreatpercentile\n'), ((434, 505), 'scipy.stats.tmean', 'tmean', (['arr'], {'limits': '(lower_limit, upper_limit)', 'inclusive': '(False, False)'}), '(arr, limits=(lower_limit, upper_limit), inclusive=(False, False))\n', (439, 505), False, 'from scipy.stats import tmean, scoreatpercentile\n')] |
amichalski2/WBC-SHAP | scripts/data_extract.py | b69a4a8746aaf7a8dfacfdb4dbd85b4868d73ad0 | import os
import cv2
import random
import numpy as np
from tensorflow.keras.utils import to_categorical
from scripts.consts import class_dict
def get_data(path, split=0.2):
X, y = [], []
for directory in os.listdir(path):
dirpath = os.path.join(path, directory)
print(directory, len(os.listdir(dirpath)))
for file in os.listdir(dirpath):
filepath = os.path.join(dirpath, file)
img = cv2.imread(filepath, cv2.IMREAD_UNCHANGED)
if img.shape != (360, 363, 3):
img = cv2.resize(img, (360, 363), cv2.INTER_CUBIC)
X.append(img)
y.append(class_dict[directory])
data = list(zip(X, y))
random.shuffle(data)
X, y = zip(*data)
num_train = int((1.0 - split) * len(y))
X_train, X_valid = np.array(X[:num_train]).astype(
'float32'), np.array(X[num_train:]).astype('float32')
y_train, y_valid = np.array(
y[:num_train]).reshape(-1, 1), np.array(y[num_train:]).reshape((-1, 1))
X_train = X_train / 255.0
X_valid = X_valid / 255.0
y_train, y_valid = to_categorical(y_train), to_categorical(y_valid)
print(X_train.shape, y_train.shape)
print(X_valid.shape, y_valid.shape)
return X_train, y_train, X_valid, y_valid
| [((216, 232), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (226, 232), False, 'import os\n'), ((716, 736), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (730, 736), False, 'import random\n'), ((253, 282), 'os.path.join', 'os.path.join', (['path', 'directory'], {}), '(path, directory)\n', (265, 282), False, 'import os\n'), ((355, 374), 'os.listdir', 'os.listdir', (['dirpath'], {}), '(dirpath)\n', (365, 374), False, 'import os\n'), ((1121, 1144), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_train'], {}), '(y_train)\n', (1135, 1144), False, 'from tensorflow.keras.utils import to_categorical\n'), ((1146, 1169), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_valid'], {}), '(y_valid)\n', (1160, 1169), False, 'from tensorflow.keras.utils import to_categorical\n'), ((400, 427), 'os.path.join', 'os.path.join', (['dirpath', 'file'], {}), '(dirpath, file)\n', (412, 427), False, 'import os\n'), ((446, 488), 'cv2.imread', 'cv2.imread', (['filepath', 'cv2.IMREAD_UNCHANGED'], {}), '(filepath, cv2.IMREAD_UNCHANGED)\n', (456, 488), False, 'import cv2\n'), ((312, 331), 'os.listdir', 'os.listdir', (['dirpath'], {}), '(dirpath)\n', (322, 331), False, 'import os\n'), ((567, 611), 'cv2.resize', 'cv2.resize', (['img', '(360, 363)', 'cv2.INTER_CUBIC'], {}), '(img, (360, 363), cv2.INTER_CUBIC)\n', (577, 611), False, 'import cv2\n'), ((829, 852), 'numpy.array', 'np.array', (['X[:num_train]'], {}), '(X[:num_train])\n', (837, 852), True, 'import numpy as np\n'), ((881, 904), 'numpy.array', 'np.array', (['X[num_train:]'], {}), '(X[num_train:])\n', (889, 904), True, 'import numpy as np\n'), ((946, 969), 'numpy.array', 'np.array', (['y[:num_train]'], {}), '(y[:num_train])\n', (954, 969), True, 'import numpy as np\n'), ((995, 1018), 'numpy.array', 'np.array', (['y[num_train:]'], {}), '(y[num_train:])\n', (1003, 1018), True, 'import numpy as np\n')] |
tzumainn/ironic | ironic/tests/unit/drivers/test_base.py | 91680bd450a4b2259d153b6a995a9436a5f82694 | # Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from ironic.common import exception
from ironic.common import raid
from ironic.common import states
from ironic.drivers import base as driver_base
from ironic.drivers.modules import fake
from ironic.tests import base
class FakeVendorInterface(driver_base.VendorInterface):
def get_properties(self):
pass
@driver_base.passthru(['POST'])
def noexception(self):
return "Fake"
@driver_base.driver_passthru(['POST'])
def driver_noexception(self):
return "Fake"
@driver_base.passthru(['POST'])
def ironicexception(self):
raise exception.IronicException("Fake!")
@driver_base.passthru(['POST'])
def normalexception(self):
raise Exception("Fake!")
@driver_base.passthru(['POST'], require_exclusive_lock=False)
def shared_task(self):
return "shared fake"
def validate(self, task, **kwargs):
pass
def driver_validate(self, **kwargs):
pass
class PassthruDecoratorTestCase(base.TestCase):
def setUp(self):
super(PassthruDecoratorTestCase, self).setUp()
self.fvi = FakeVendorInterface()
def test_passthru_noexception(self):
result = self.fvi.noexception()
self.assertEqual("Fake", result)
@mock.patch.object(driver_base, 'LOG', autospec=True)
def test_passthru_ironicexception(self, mock_log):
self.assertRaises(exception.IronicException,
self.fvi.ironicexception, mock.ANY)
mock_log.exception.assert_called_with(
mock.ANY, 'ironicexception')
@mock.patch.object(driver_base, 'LOG', autospec=True)
def test_passthru_nonironicexception(self, mock_log):
self.assertRaises(exception.VendorPassthruException,
self.fvi.normalexception, mock.ANY)
mock_log.exception.assert_called_with(
mock.ANY, 'normalexception')
def test_passthru_shared_task_metadata(self):
self.assertIn('require_exclusive_lock',
self.fvi.shared_task._vendor_metadata[1])
self.assertFalse(
self.fvi.shared_task._vendor_metadata[1]['require_exclusive_lock'])
def test_passthru_exclusive_task_metadata(self):
self.assertIn('require_exclusive_lock',
self.fvi.noexception._vendor_metadata[1])
self.assertTrue(
self.fvi.noexception._vendor_metadata[1]['require_exclusive_lock'])
def test_passthru_check_func_references(self):
inst1 = FakeVendorInterface()
inst2 = FakeVendorInterface()
self.assertNotEqual(inst1.vendor_routes['noexception']['func'],
inst2.vendor_routes['noexception']['func'])
self.assertNotEqual(inst1.driver_routes['driver_noexception']['func'],
inst2.driver_routes['driver_noexception']['func'])
class CleanStepDecoratorTestCase(base.TestCase):
def setUp(self):
super(CleanStepDecoratorTestCase, self).setUp()
method_mock = mock.MagicMock()
del method_mock._is_clean_step
del method_mock._clean_step_priority
del method_mock._clean_step_abortable
del method_mock._clean_step_argsinfo
self.method = method_mock
def test__validate_argsinfo(self):
# None, empty dict
driver_base._validate_argsinfo(None)
driver_base._validate_argsinfo({})
# Only description specified
driver_base._validate_argsinfo({'arg1': {'description': 'desc1'}})
# Multiple args
driver_base._validate_argsinfo({'arg1': {'description': 'desc1',
'required': True},
'arg2': {'description': 'desc2'}})
def test__validate_argsinfo_not_dict(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'argsinfo.+dictionary',
driver_base._validate_argsinfo, 'not-a-dict')
def test__validate_argsinfo_arg_not_dict(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'Argument.+dictionary',
driver_base._validate_argsinfo,
{'arg1': 'not-a-dict'})
def test__validate_argsinfo_arg_empty_dict(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'description',
driver_base._validate_argsinfo,
{'arg1': {}})
def test__validate_argsinfo_arg_missing_description(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'description',
driver_base._validate_argsinfo,
{'arg1': {'required': True}})
def test__validate_argsinfo_arg_description_invalid(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'string',
driver_base._validate_argsinfo,
{'arg1': {'description': True}})
def test__validate_argsinfo_arg_required_invalid(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'Boolean',
driver_base._validate_argsinfo,
{'arg1': {'description': 'desc1',
'required': 'maybe'}})
def test__validate_argsinfo_arg_unknown_key(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'invalid',
driver_base._validate_argsinfo,
{'arg1': {'description': 'desc1',
'unknown': 'bad'}})
def test_clean_step_priority_only(self):
d = driver_base.clean_step(priority=10)
d(self.method)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(10, self.method._clean_step_priority)
self.assertFalse(self.method._clean_step_abortable)
self.assertIsNone(self.method._clean_step_argsinfo)
def test_clean_step_all_args(self):
argsinfo = {'arg1': {'description': 'desc1',
'required': True}}
d = driver_base.clean_step(priority=0, abortable=True,
argsinfo=argsinfo)
d(self.method)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(0, self.method._clean_step_priority)
self.assertTrue(self.method._clean_step_abortable)
self.assertEqual(argsinfo, self.method._clean_step_argsinfo)
def test_clean_step_bad_priority(self):
d = driver_base.clean_step(priority='hi')
self.assertRaisesRegex(exception.InvalidParameterValue, 'priority',
d, self.method)
self.assertTrue(self.method._is_clean_step)
self.assertFalse(hasattr(self.method, '_clean_step_priority'))
self.assertFalse(hasattr(self.method, '_clean_step_abortable'))
self.assertFalse(hasattr(self.method, '_clean_step_argsinfo'))
def test_clean_step_bad_abortable(self):
d = driver_base.clean_step(priority=0, abortable='blue')
self.assertRaisesRegex(exception.InvalidParameterValue, 'abortable',
d, self.method)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(0, self.method._clean_step_priority)
self.assertFalse(hasattr(self.method, '_clean_step_abortable'))
self.assertFalse(hasattr(self.method, '_clean_step_argsinfo'))
@mock.patch.object(driver_base, '_validate_argsinfo', spec_set=True,
autospec=True)
def test_clean_step_bad_argsinfo(self, mock_valid):
mock_valid.side_effect = exception.InvalidParameterValue('bad')
d = driver_base.clean_step(priority=0, argsinfo=100)
self.assertRaises(exception.InvalidParameterValue, d, self.method)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(0, self.method._clean_step_priority)
self.assertFalse(self.method._clean_step_abortable)
self.assertFalse(hasattr(self.method, '_clean_step_argsinfo'))
class CleanStepTestCase(base.TestCase):
def test_get_and_execute_clean_steps(self):
# Create a fake Driver class, create some clean steps, make sure
# they are listed correctly, and attempt to execute one of them
method_mock = mock.MagicMock(spec_set=[])
method_args_mock = mock.MagicMock(spec_set=[])
task_mock = mock.MagicMock(spec_set=[])
class BaseTestClass(driver_base.BaseInterface):
def get_properties(self):
return {}
def validate(self, task):
pass
class TestClass(BaseTestClass):
interface_type = 'test'
@driver_base.clean_step(priority=0)
def manual_method(self, task):
pass
@driver_base.clean_step(priority=10, abortable=True)
def automated_method(self, task):
method_mock(task)
def not_clean_method(self, task):
pass
class TestClass2(BaseTestClass):
interface_type = 'test2'
@driver_base.clean_step(priority=0)
def manual_method2(self, task):
pass
@driver_base.clean_step(priority=20, abortable=True)
def automated_method2(self, task):
method_mock(task)
def not_clean_method2(self, task):
pass
class TestClass3(BaseTestClass):
interface_type = 'test3'
@driver_base.clean_step(priority=0, abortable=True, argsinfo={
'arg1': {'description': 'desc1',
'required': True}})
def manual_method3(self, task, **kwargs):
method_args_mock(task, **kwargs)
@driver_base.clean_step(priority=15, argsinfo={
'arg10': {'description': 'desc10'}})
def automated_method3(self, task, **kwargs):
pass
def not_clean_method3(self, task):
pass
obj = TestClass()
obj2 = TestClass2()
obj3 = TestClass3()
self.assertEqual(2, len(obj.get_clean_steps(task_mock)))
# Ensure the steps look correct
self.assertEqual(10, obj.get_clean_steps(task_mock)[0]['priority'])
self.assertTrue(obj.get_clean_steps(task_mock)[0]['abortable'])
self.assertEqual('test', obj.get_clean_steps(
task_mock)[0]['interface'])
self.assertEqual('automated_method', obj.get_clean_steps(
task_mock)[0]['step'])
self.assertEqual(0, obj.get_clean_steps(task_mock)[1]['priority'])
self.assertFalse(obj.get_clean_steps(task_mock)[1]['abortable'])
self.assertEqual('test', obj.get_clean_steps(
task_mock)[1]['interface'])
self.assertEqual('manual_method', obj.get_clean_steps(
task_mock)[1]['step'])
# Ensure the second obj get different clean steps
self.assertEqual(2, len(obj2.get_clean_steps(task_mock)))
# Ensure the steps look correct
self.assertEqual(20, obj2.get_clean_steps(task_mock)[0]['priority'])
self.assertTrue(obj2.get_clean_steps(task_mock)[0]['abortable'])
self.assertEqual('test2', obj2.get_clean_steps(
task_mock)[0]['interface'])
self.assertEqual('automated_method2', obj2.get_clean_steps(
task_mock)[0]['step'])
self.assertEqual(0, obj2.get_clean_steps(task_mock)[1]['priority'])
self.assertFalse(obj2.get_clean_steps(task_mock)[1]['abortable'])
self.assertEqual('test2', obj2.get_clean_steps(
task_mock)[1]['interface'])
self.assertEqual('manual_method2', obj2.get_clean_steps(
task_mock)[1]['step'])
self.assertIsNone(obj2.get_clean_steps(task_mock)[0]['argsinfo'])
# Ensure the third obj has different clean steps
self.assertEqual(2, len(obj3.get_clean_steps(task_mock)))
self.assertEqual(15, obj3.get_clean_steps(task_mock)[0]['priority'])
self.assertFalse(obj3.get_clean_steps(task_mock)[0]['abortable'])
self.assertEqual('test3', obj3.get_clean_steps(
task_mock)[0]['interface'])
self.assertEqual('automated_method3', obj3.get_clean_steps(
task_mock)[0]['step'])
self.assertEqual({'arg10': {'description': 'desc10'}},
obj3.get_clean_steps(task_mock)[0]['argsinfo'])
self.assertEqual(0, obj3.get_clean_steps(task_mock)[1]['priority'])
self.assertTrue(obj3.get_clean_steps(task_mock)[1]['abortable'])
self.assertEqual(obj3.interface_type, obj3.get_clean_steps(
task_mock)[1]['interface'])
self.assertEqual('manual_method3', obj3.get_clean_steps(
task_mock)[1]['step'])
self.assertEqual({'arg1': {'description': 'desc1', 'required': True}},
obj3.get_clean_steps(task_mock)[1]['argsinfo'])
# Ensure we can execute the function.
obj.execute_clean_step(task_mock, obj.get_clean_steps(task_mock)[0])
method_mock.assert_called_once_with(task_mock)
args = {'arg1': 'val1'}
clean_step = {'interface': 'test3', 'step': 'manual_method3',
'args': args}
obj3.execute_clean_step(task_mock, clean_step)
method_args_mock.assert_called_once_with(task_mock, **args)
class DeployStepDecoratorTestCase(base.TestCase):
def setUp(self):
super(DeployStepDecoratorTestCase, self).setUp()
method_mock = mock.MagicMock()
del method_mock._is_deploy_step
del method_mock._deploy_step_priority
del method_mock._deploy_step_argsinfo
self.method = method_mock
def test_deploy_step_priority_only(self):
d = driver_base.deploy_step(priority=10)
d(self.method)
self.assertTrue(self.method._is_deploy_step)
self.assertEqual(10, self.method._deploy_step_priority)
self.assertIsNone(self.method._deploy_step_argsinfo)
def test_deploy_step_all_args(self):
argsinfo = {'arg1': {'description': 'desc1',
'required': True}}
d = driver_base.deploy_step(priority=0, argsinfo=argsinfo)
d(self.method)
self.assertTrue(self.method._is_deploy_step)
self.assertEqual(0, self.method._deploy_step_priority)
self.assertEqual(argsinfo, self.method._deploy_step_argsinfo)
def test_deploy_step_bad_priority(self):
d = driver_base.deploy_step(priority='hi')
self.assertRaisesRegex(exception.InvalidParameterValue, 'priority',
d, self.method)
self.assertTrue(self.method._is_deploy_step)
self.assertFalse(hasattr(self.method, '_deploy_step_priority'))
self.assertFalse(hasattr(self.method, '_deploy_step_argsinfo'))
@mock.patch.object(driver_base, '_validate_argsinfo', spec_set=True,
autospec=True)
def test_deploy_step_bad_argsinfo(self, mock_valid):
mock_valid.side_effect = exception.InvalidParameterValue('bad')
d = driver_base.deploy_step(priority=0, argsinfo=100)
self.assertRaises(exception.InvalidParameterValue, d, self.method)
self.assertTrue(self.method._is_deploy_step)
self.assertEqual(0, self.method._deploy_step_priority)
self.assertFalse(hasattr(self.method, '_deploy_step_argsinfo'))
class DeployAndCleanStepDecoratorTestCase(base.TestCase):
def setUp(self):
super(DeployAndCleanStepDecoratorTestCase, self).setUp()
method_mock = mock.MagicMock()
del method_mock._is_deploy_step
del method_mock._deploy_step_priority
del method_mock._deploy_step_argsinfo
del method_mock._is_clean_step
del method_mock._clean_step_priority
del method_mock._clean_step_abortable
del method_mock._clean_step_argsinfo
self.method = method_mock
def test_deploy_and_clean_step_priority_only(self):
dd = driver_base.deploy_step(priority=10)
dc = driver_base.clean_step(priority=11)
dd(dc(self.method))
self.assertTrue(self.method._is_deploy_step)
self.assertEqual(10, self.method._deploy_step_priority)
self.assertIsNone(self.method._deploy_step_argsinfo)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(11, self.method._clean_step_priority)
self.assertFalse(self.method._clean_step_abortable)
self.assertIsNone(self.method._clean_step_argsinfo)
def test_deploy_and_clean_step_all_args(self):
dargsinfo = {'arg1': {'description': 'desc1',
'required': True}}
cargsinfo = {'arg2': {'description': 'desc2',
'required': False}}
dd = driver_base.deploy_step(priority=0, argsinfo=dargsinfo)
dc = driver_base.clean_step(priority=0, argsinfo=cargsinfo)
dd(dc(self.method))
self.assertTrue(self.method._is_deploy_step)
self.assertEqual(0, self.method._deploy_step_priority)
self.assertEqual(dargsinfo, self.method._deploy_step_argsinfo)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(0, self.method._clean_step_priority)
self.assertFalse(self.method._clean_step_abortable)
self.assertEqual(cargsinfo, self.method._clean_step_argsinfo)
def test_clean_and_deploy_step_all_args(self):
# Opposite ordering, should make no difference.
dargsinfo = {'arg1': {'description': 'desc1',
'required': True}}
cargsinfo = {'arg2': {'description': 'desc2',
'required': False}}
dd = driver_base.deploy_step(priority=0, argsinfo=dargsinfo)
dc = driver_base.clean_step(priority=0, argsinfo=cargsinfo)
dc(dd(self.method))
self.assertTrue(self.method._is_deploy_step)
self.assertEqual(0, self.method._deploy_step_priority)
self.assertEqual(dargsinfo, self.method._deploy_step_argsinfo)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(0, self.method._clean_step_priority)
self.assertFalse(self.method._clean_step_abortable)
self.assertEqual(cargsinfo, self.method._clean_step_argsinfo)
class DeployStepTestCase(base.TestCase):
def test_get_and_execute_deploy_steps(self):
# Create a fake Driver class, create some deploy steps, make sure
# they are listed correctly, and attempt to execute one of them
method_mock = mock.MagicMock(spec_set=[])
method_args_mock = mock.MagicMock(spec_set=[])
task_mock = mock.MagicMock(spec_set=[])
class BaseTestClass(driver_base.BaseInterface):
def get_properties(self):
return {}
def validate(self, task):
pass
class TestClass(BaseTestClass):
interface_type = 'test'
@driver_base.deploy_step(priority=0)
def deploy_zero(self, task):
pass
@driver_base.deploy_step(priority=10)
def deploy_ten(self, task):
method_mock(task)
def not_deploy_method(self, task):
pass
class TestClass2(BaseTestClass):
interface_type = 'test2'
@driver_base.deploy_step(priority=0)
def deploy_zero2(self, task):
pass
@driver_base.deploy_step(priority=20)
def deploy_twenty(self, task):
method_mock(task)
def not_deploy_method2(self, task):
pass
class TestClass3(BaseTestClass):
interface_type = 'test3'
@driver_base.deploy_step(priority=0, argsinfo={
'arg1': {'description': 'desc1',
'required': True}})
def deploy_zero3(self, task, **kwargs):
method_args_mock(task, **kwargs)
@driver_base.deploy_step(priority=15, argsinfo={
'arg10': {'description': 'desc10'}})
def deploy_fifteen(self, task, **kwargs):
pass
def not_deploy_method3(self, task):
pass
obj = TestClass()
obj2 = TestClass2()
obj3 = TestClass3()
self.assertEqual(2, len(obj.get_deploy_steps(task_mock)))
# Ensure the steps look correct
self.assertEqual(10, obj.get_deploy_steps(task_mock)[0]['priority'])
self.assertEqual('test', obj.get_deploy_steps(
task_mock)[0]['interface'])
self.assertEqual('deploy_ten', obj.get_deploy_steps(
task_mock)[0]['step'])
self.assertEqual(0, obj.get_deploy_steps(task_mock)[1]['priority'])
self.assertEqual('test', obj.get_deploy_steps(
task_mock)[1]['interface'])
self.assertEqual('deploy_zero', obj.get_deploy_steps(
task_mock)[1]['step'])
# Ensure the second obj has different deploy steps
self.assertEqual(2, len(obj2.get_deploy_steps(task_mock)))
# Ensure the steps look correct
self.assertEqual(20, obj2.get_deploy_steps(task_mock)[0]['priority'])
self.assertEqual('test2', obj2.get_deploy_steps(
task_mock)[0]['interface'])
self.assertEqual('deploy_twenty', obj2.get_deploy_steps(
task_mock)[0]['step'])
self.assertEqual(0, obj2.get_deploy_steps(task_mock)[1]['priority'])
self.assertEqual('test2', obj2.get_deploy_steps(
task_mock)[1]['interface'])
self.assertEqual('deploy_zero2', obj2.get_deploy_steps(
task_mock)[1]['step'])
self.assertIsNone(obj2.get_deploy_steps(task_mock)[0]['argsinfo'])
# Ensure the third obj has different deploy steps
self.assertEqual(2, len(obj3.get_deploy_steps(task_mock)))
self.assertEqual(15, obj3.get_deploy_steps(task_mock)[0]['priority'])
self.assertEqual('test3', obj3.get_deploy_steps(
task_mock)[0]['interface'])
self.assertEqual('deploy_fifteen', obj3.get_deploy_steps(
task_mock)[0]['step'])
self.assertEqual({'arg10': {'description': 'desc10'}},
obj3.get_deploy_steps(task_mock)[0]['argsinfo'])
self.assertEqual(0, obj3.get_deploy_steps(task_mock)[1]['priority'])
self.assertEqual(obj3.interface_type, obj3.get_deploy_steps(
task_mock)[1]['interface'])
self.assertEqual('deploy_zero3', obj3.get_deploy_steps(
task_mock)[1]['step'])
self.assertEqual({'arg1': {'description': 'desc1', 'required': True}},
obj3.get_deploy_steps(task_mock)[1]['argsinfo'])
# Ensure we can execute the function.
obj.execute_deploy_step(task_mock, obj.get_deploy_steps(task_mock)[0])
method_mock.assert_called_once_with(task_mock)
args = {'arg1': 'val1'}
deploy_step = {'interface': 'test3', 'step': 'deploy_zero3',
'args': args}
obj3.execute_deploy_step(task_mock, deploy_step)
method_args_mock.assert_called_once_with(task_mock, **args)
class MyRAIDInterface(driver_base.RAIDInterface):
def create_configuration(self, task,
create_root_volume=True,
create_nonroot_volumes=True,
delete_existing=True):
pass
def delete_configuration(self, task):
pass
class RAIDInterfaceTestCase(base.TestCase):
@mock.patch.object(driver_base.RAIDInterface, 'validate_raid_config',
autospec=True)
def test_validate(self, validate_raid_config_mock):
raid_interface = MyRAIDInterface()
node_mock = mock.MagicMock(target_raid_config='some_raid_config')
task_mock = mock.MagicMock(node=node_mock)
raid_interface.validate(task_mock)
validate_raid_config_mock.assert_called_once_with(
raid_interface, task_mock, 'some_raid_config')
@mock.patch.object(driver_base.RAIDInterface, 'validate_raid_config',
autospec=True)
def test_validate_no_target_raid_config(self, validate_raid_config_mock):
raid_interface = MyRAIDInterface()
node_mock = mock.MagicMock(target_raid_config={})
task_mock = mock.MagicMock(node=node_mock)
raid_interface.validate(task_mock)
self.assertFalse(validate_raid_config_mock.called)
@mock.patch.object(raid, 'validate_configuration', autospec=True)
def test_validate_raid_config(self, common_validate_mock):
with open(driver_base.RAID_CONFIG_SCHEMA, 'r') as raid_schema_fobj:
raid_schema = json.load(raid_schema_fobj)
raid_interface = MyRAIDInterface()
raid_interface.validate_raid_config('task', 'some_raid_config')
common_validate_mock.assert_called_once_with(
'some_raid_config', raid_schema)
@mock.patch.object(raid, 'get_logical_disk_properties',
autospec=True)
def test_get_logical_disk_properties(self, get_properties_mock):
with open(driver_base.RAID_CONFIG_SCHEMA, 'r') as raid_schema_fobj:
raid_schema = json.load(raid_schema_fobj)
raid_interface = MyRAIDInterface()
raid_interface.get_logical_disk_properties()
get_properties_mock.assert_called_once_with(raid_schema)
@mock.patch.object(MyRAIDInterface, 'create_configuration', autospec=True)
@mock.patch.object(MyRAIDInterface, 'validate_raid_config',
autospec=True)
def test_apply_configuration(self, mock_validate, mock_create):
raid_interface = MyRAIDInterface()
node_mock = mock.MagicMock(target_raid_config=None)
task_mock = mock.MagicMock(node=node_mock)
mock_create.return_value = states.DEPLOYWAIT
raid_config = 'some_raid_config'
result = raid_interface.apply_configuration(task_mock, raid_config)
self.assertEqual(states.DEPLOYWAIT, result)
mock_validate.assert_called_once_with(raid_interface, task_mock,
raid_config)
mock_create.assert_called_once_with(raid_interface, task_mock,
create_root_volume=True,
create_nonroot_volumes=True,
delete_existing=True)
self.assertEqual(raid_config, node_mock.target_raid_config)
@mock.patch.object(MyRAIDInterface, 'create_configuration', autospec=True)
@mock.patch.object(MyRAIDInterface, 'validate_raid_config',
autospec=True)
def test_apply_configuration_delete_existing(self, mock_validate,
mock_create):
raid_interface = MyRAIDInterface()
node_mock = mock.MagicMock(target_raid_config=None)
task_mock = mock.MagicMock(node=node_mock)
mock_create.return_value = states.DEPLOYWAIT
raid_config = 'some_raid_config'
result = raid_interface.apply_configuration(task_mock, raid_config,
delete_existing=True)
self.assertEqual(states.DEPLOYWAIT, result)
mock_validate.assert_called_once_with(raid_interface, task_mock,
raid_config)
mock_create.assert_called_once_with(raid_interface, task_mock,
create_root_volume=True,
create_nonroot_volumes=True,
delete_existing=True)
self.assertEqual(raid_config, node_mock.target_raid_config)
@mock.patch.object(MyRAIDInterface, 'create_configuration', autospec=True)
@mock.patch.object(MyRAIDInterface, 'validate_raid_config',
autospec=True)
def test_apply_configuration_invalid(self, mock_validate, mock_create):
raid_interface = MyRAIDInterface()
node_mock = mock.MagicMock(target_raid_config=None)
task_mock = mock.MagicMock(node=node_mock)
mock_validate.side_effect = exception.InvalidParameterValue('bad')
raid_config = 'some_raid_config'
self.assertRaises(exception.InvalidParameterValue,
raid_interface.apply_configuration, task_mock,
raid_config)
mock_validate.assert_called_once_with(raid_interface, task_mock,
raid_config)
self.assertFalse(mock_create.called)
self.assertIsNone(node_mock.target_raid_config)
class TestDeployInterface(base.TestCase):
@mock.patch.object(driver_base.LOG, 'warning', autospec=True)
def test_warning_on_heartbeat(self, mock_log):
# NOTE(dtantsur): FakeDeploy does not override heartbeat
deploy = fake.FakeDeploy()
deploy.heartbeat(mock.Mock(node=mock.Mock(uuid='uuid',
driver='driver')),
'url', '3.2.0')
self.assertTrue(mock_log.called)
class MyBIOSInterface(driver_base.BIOSInterface):
def get_properties(self):
pass
def validate(self, task):
pass
@driver_base.cache_bios_settings
def apply_configuration(self, task, settings):
return "return_value_apply_configuration"
@driver_base.cache_bios_settings
def factory_reset(self, task):
return "return_value_factory_reset"
def cache_bios_settings(self, task):
pass
class TestBIOSInterface(base.TestCase):
@mock.patch.object(MyBIOSInterface, 'cache_bios_settings', autospec=True)
def test_apply_configuration_wrapper(self, cache_bios_settings_mock):
bios = MyBIOSInterface()
task_mock = mock.MagicMock()
actual = bios.apply_configuration(task_mock, "")
cache_bios_settings_mock.assert_called_once_with(bios, task_mock)
self.assertEqual(actual, "return_value_apply_configuration")
@mock.patch.object(MyBIOSInterface, 'cache_bios_settings', autospec=True)
def test_factory_reset_wrapper(self, cache_bios_settings_mock):
bios = MyBIOSInterface()
task_mock = mock.MagicMock()
actual = bios.factory_reset(task_mock)
cache_bios_settings_mock.assert_called_once_with(bios, task_mock)
self.assertEqual(actual, "return_value_factory_reset")
class TestBootInterface(base.TestCase):
def test_validate_rescue_default_impl(self):
boot = fake.FakeBoot()
task_mock = mock.MagicMock(spec_set=['node'])
self.assertRaises(exception.UnsupportedDriverExtension,
boot.validate_rescue, task_mock)
class TestManagementInterface(base.TestCase):
def test_inject_nmi_default_impl(self):
management = fake.FakeManagement()
task_mock = mock.MagicMock(spec_set=['node'])
self.assertRaises(exception.UnsupportedDriverExtension,
management.inject_nmi, task_mock)
def test_get_supported_boot_modes_default_impl(self):
management = fake.FakeManagement()
task_mock = mock.MagicMock(spec_set=['node'])
self.assertRaises(exception.UnsupportedDriverExtension,
management.get_supported_boot_modes, task_mock)
def test_set_boot_mode_default_impl(self):
management = fake.FakeManagement()
task_mock = mock.MagicMock(spec_set=['node'])
self.assertRaises(exception.UnsupportedDriverExtension,
management.set_boot_mode, task_mock, 'whatever')
def test_get_boot_mode_default_impl(self):
management = fake.FakeManagement()
task_mock = mock.MagicMock(spec_set=['node'])
self.assertRaises(exception.UnsupportedDriverExtension,
management.get_boot_mode, task_mock)
class TestBareDriver(base.TestCase):
def test_class_variables(self):
self.assertEqual(['boot', 'deploy', 'management', 'network', 'power'],
driver_base.BareDriver().core_interfaces)
self.assertEqual(
['bios', 'console', 'inspect', 'raid', 'rescue', 'storage'],
driver_base.BareDriver().optional_interfaces
)
| [((985, 1015), 'ironic.drivers.base.passthru', 'driver_base.passthru', (["['POST']"], {}), "(['POST'])\n", (1005, 1015), True, 'from ironic.drivers import base as driver_base\n'), ((1071, 1108), 'ironic.drivers.base.driver_passthru', 'driver_base.driver_passthru', (["['POST']"], {}), "(['POST'])\n", (1098, 1108), True, 'from ironic.drivers import base as driver_base\n'), ((1171, 1201), 'ironic.drivers.base.passthru', 'driver_base.passthru', (["['POST']"], {}), "(['POST'])\n", (1191, 1201), True, 'from ironic.drivers import base as driver_base\n'), ((1288, 1318), 'ironic.drivers.base.passthru', 'driver_base.passthru', (["['POST']"], {}), "(['POST'])\n", (1308, 1318), True, 'from ironic.drivers import base as driver_base\n'), ((1389, 1449), 'ironic.drivers.base.passthru', 'driver_base.passthru', (["['POST']"], {'require_exclusive_lock': '(False)'}), "(['POST'], require_exclusive_lock=False)\n", (1409, 1449), True, 'from ironic.drivers import base as driver_base\n'), ((1912, 1964), 'mock.patch.object', 'mock.patch.object', (['driver_base', '"""LOG"""'], {'autospec': '(True)'}), "(driver_base, 'LOG', autospec=True)\n", (1929, 1964), False, 'import mock\n'), ((2229, 2281), 'mock.patch.object', 'mock.patch.object', (['driver_base', '"""LOG"""'], {'autospec': '(True)'}), "(driver_base, 'LOG', autospec=True)\n", (2246, 2281), False, 'import mock\n'), ((8378, 8464), 'mock.patch.object', 'mock.patch.object', (['driver_base', '"""_validate_argsinfo"""'], {'spec_set': '(True)', 'autospec': '(True)'}), "(driver_base, '_validate_argsinfo', spec_set=True,\n autospec=True)\n", (8395, 8464), False, 'import mock\n'), ((15890, 15976), 'mock.patch.object', 'mock.patch.object', (['driver_base', '"""_validate_argsinfo"""'], {'spec_set': '(True)', 'autospec': '(True)'}), "(driver_base, '_validate_argsinfo', spec_set=True,\n autospec=True)\n", (15907, 15976), False, 'import mock\n'), ((24661, 24748), 'mock.patch.object', 'mock.patch.object', (['driver_base.RAIDInterface', '"""validate_raid_config"""'], {'autospec': '(True)'}), "(driver_base.RAIDInterface, 'validate_raid_config',\n autospec=True)\n", (24678, 24748), False, 'import mock\n'), ((25161, 25248), 'mock.patch.object', 'mock.patch.object', (['driver_base.RAIDInterface', '"""validate_raid_config"""'], {'autospec': '(True)'}), "(driver_base.RAIDInterface, 'validate_raid_config',\n autospec=True)\n", (25178, 25248), False, 'import mock\n'), ((25608, 25672), 'mock.patch.object', 'mock.patch.object', (['raid', '"""validate_configuration"""'], {'autospec': '(True)'}), "(raid, 'validate_configuration', autospec=True)\n", (25625, 25672), False, 'import mock\n'), ((26088, 26157), 'mock.patch.object', 'mock.patch.object', (['raid', '"""get_logical_disk_properties"""'], {'autospec': '(True)'}), "(raid, 'get_logical_disk_properties', autospec=True)\n", (26105, 26157), False, 'import mock\n'), ((26547, 26620), 'mock.patch.object', 'mock.patch.object', (['MyRAIDInterface', '"""create_configuration"""'], {'autospec': '(True)'}), "(MyRAIDInterface, 'create_configuration', autospec=True)\n", (26564, 26620), False, 'import mock\n'), ((26626, 26699), 'mock.patch.object', 'mock.patch.object', (['MyRAIDInterface', '"""validate_raid_config"""'], {'autospec': '(True)'}), "(MyRAIDInterface, 'validate_raid_config', autospec=True)\n", (26643, 26699), False, 'import mock\n'), ((27654, 27727), 'mock.patch.object', 'mock.patch.object', (['MyRAIDInterface', '"""create_configuration"""'], {'autospec': '(True)'}), "(MyRAIDInterface, 'create_configuration', autospec=True)\n", (27671, 27727), False, 'import mock\n'), ((27733, 27806), 'mock.patch.object', 'mock.patch.object', (['MyRAIDInterface', '"""validate_raid_config"""'], {'autospec': '(True)'}), "(MyRAIDInterface, 'validate_raid_config', autospec=True)\n", (27750, 27806), False, 'import mock\n'), ((28900, 28973), 'mock.patch.object', 'mock.patch.object', (['MyRAIDInterface', '"""create_configuration"""'], {'autospec': '(True)'}), "(MyRAIDInterface, 'create_configuration', autospec=True)\n", (28917, 28973), False, 'import mock\n'), ((28979, 29052), 'mock.patch.object', 'mock.patch.object', (['MyRAIDInterface', '"""validate_raid_config"""'], {'autospec': '(True)'}), "(MyRAIDInterface, 'validate_raid_config', autospec=True)\n", (28996, 29052), False, 'import mock\n'), ((29876, 29936), 'mock.patch.object', 'mock.patch.object', (['driver_base.LOG', '"""warning"""'], {'autospec': '(True)'}), "(driver_base.LOG, 'warning', autospec=True)\n", (29893, 29936), False, 'import mock\n'), ((30801, 30873), 'mock.patch.object', 'mock.patch.object', (['MyBIOSInterface', '"""cache_bios_settings"""'], {'autospec': '(True)'}), "(MyBIOSInterface, 'cache_bios_settings', autospec=True)\n", (30818, 30873), False, 'import mock\n'), ((31225, 31297), 'mock.patch.object', 'mock.patch.object', (['MyBIOSInterface', '"""cache_bios_settings"""'], {'autospec': '(True)'}), "(MyBIOSInterface, 'cache_bios_settings', autospec=True)\n", (31242, 31297), False, 'import mock\n'), ((1247, 1281), 'ironic.common.exception.IronicException', 'exception.IronicException', (['"""Fake!"""'], {}), "('Fake!')\n", (1272, 1281), False, 'from ironic.common import exception\n'), ((3673, 3689), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (3687, 3689), False, 'import mock\n'), ((3974, 4010), 'ironic.drivers.base._validate_argsinfo', 'driver_base._validate_argsinfo', (['None'], {}), '(None)\n', (4004, 4010), True, 'from ironic.drivers import base as driver_base\n'), ((4019, 4053), 'ironic.drivers.base._validate_argsinfo', 'driver_base._validate_argsinfo', (['{}'], {}), '({})\n', (4049, 4053), True, 'from ironic.drivers import base as driver_base\n'), ((4100, 4166), 'ironic.drivers.base._validate_argsinfo', 'driver_base._validate_argsinfo', (["{'arg1': {'description': 'desc1'}}"], {}), "({'arg1': {'description': 'desc1'}})\n", (4130, 4166), True, 'from ironic.drivers import base as driver_base\n'), ((4200, 4322), 'ironic.drivers.base._validate_argsinfo', 'driver_base._validate_argsinfo', (["{'arg1': {'description': 'desc1', 'required': True}, 'arg2': {'description':\n 'desc2'}}"], {}), "({'arg1': {'description': 'desc1', 'required':\n True}, 'arg2': {'description': 'desc2'}})\n", (4230, 4322), True, 'from ironic.drivers import base as driver_base\n'), ((6578, 6613), 'ironic.drivers.base.clean_step', 'driver_base.clean_step', ([], {'priority': '(10)'}), '(priority=10)\n', (6600, 6613), True, 'from ironic.drivers import base as driver_base\n'), ((7026, 7095), 'ironic.drivers.base.clean_step', 'driver_base.clean_step', ([], {'priority': '(0)', 'abortable': '(True)', 'argsinfo': 'argsinfo'}), '(priority=0, abortable=True, argsinfo=argsinfo)\n', (7048, 7095), True, 'from ironic.drivers import base as driver_base\n'), ((7453, 7490), 'ironic.drivers.base.clean_step', 'driver_base.clean_step', ([], {'priority': '"""hi"""'}), "(priority='hi')\n", (7475, 7490), True, 'from ironic.drivers import base as driver_base\n'), ((7938, 7990), 'ironic.drivers.base.clean_step', 'driver_base.clean_step', ([], {'priority': '(0)', 'abortable': '"""blue"""'}), "(priority=0, abortable='blue')\n", (7960, 7990), True, 'from ironic.drivers import base as driver_base\n'), ((8573, 8611), 'ironic.common.exception.InvalidParameterValue', 'exception.InvalidParameterValue', (['"""bad"""'], {}), "('bad')\n", (8604, 8611), False, 'from ironic.common import exception\n'), ((8624, 8672), 'ironic.drivers.base.clean_step', 'driver_base.clean_step', ([], {'priority': '(0)', 'argsinfo': '(100)'}), '(priority=0, argsinfo=100)\n', (8646, 8672), True, 'from ironic.drivers import base as driver_base\n'), ((9251, 9278), 'mock.MagicMock', 'mock.MagicMock', ([], {'spec_set': '[]'}), '(spec_set=[])\n', (9265, 9278), False, 'import mock\n'), ((9306, 9333), 'mock.MagicMock', 'mock.MagicMock', ([], {'spec_set': '[]'}), '(spec_set=[])\n', (9320, 9333), False, 'import mock\n'), ((9354, 9381), 'mock.MagicMock', 'mock.MagicMock', ([], {'spec_set': '[]'}), '(spec_set=[])\n', (9368, 9381), False, 'import mock\n'), ((14568, 14584), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (14582, 14584), False, 'import mock\n'), ((14810, 14846), 'ironic.drivers.base.deploy_step', 'driver_base.deploy_step', ([], {'priority': '(10)'}), '(priority=10)\n', (14833, 14846), True, 'from ironic.drivers import base as driver_base\n'), ((15203, 15257), 'ironic.drivers.base.deploy_step', 'driver_base.deploy_step', ([], {'priority': '(0)', 'argsinfo': 'argsinfo'}), '(priority=0, argsinfo=argsinfo)\n', (15226, 15257), True, 'from ironic.drivers import base as driver_base\n'), ((15525, 15563), 'ironic.drivers.base.deploy_step', 'driver_base.deploy_step', ([], {'priority': '"""hi"""'}), "(priority='hi')\n", (15548, 15563), True, 'from ironic.drivers import base as driver_base\n'), ((16086, 16124), 'ironic.common.exception.InvalidParameterValue', 'exception.InvalidParameterValue', (['"""bad"""'], {}), "('bad')\n", (16117, 16124), False, 'from ironic.common import exception\n'), ((16137, 16186), 'ironic.drivers.base.deploy_step', 'driver_base.deploy_step', ([], {'priority': '(0)', 'argsinfo': '(100)'}), '(priority=0, argsinfo=100)\n', (16160, 16186), True, 'from ironic.drivers import base as driver_base\n'), ((16619, 16635), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (16633, 16635), False, 'import mock\n'), ((17047, 17083), 'ironic.drivers.base.deploy_step', 'driver_base.deploy_step', ([], {'priority': '(10)'}), '(priority=10)\n', (17070, 17083), True, 'from ironic.drivers import base as driver_base\n'), ((17097, 17132), 'ironic.drivers.base.clean_step', 'driver_base.clean_step', ([], {'priority': '(11)'}), '(priority=11)\n', (17119, 17132), True, 'from ironic.drivers import base as driver_base\n'), ((17846, 17901), 'ironic.drivers.base.deploy_step', 'driver_base.deploy_step', ([], {'priority': '(0)', 'argsinfo': 'dargsinfo'}), '(priority=0, argsinfo=dargsinfo)\n', (17869, 17901), True, 'from ironic.drivers import base as driver_base\n'), ((17915, 17969), 'ironic.drivers.base.clean_step', 'driver_base.clean_step', ([], {'priority': '(0)', 'argsinfo': 'cargsinfo'}), '(priority=0, argsinfo=cargsinfo)\n', (17937, 17969), True, 'from ironic.drivers import base as driver_base\n'), ((18757, 18812), 'ironic.drivers.base.deploy_step', 'driver_base.deploy_step', ([], {'priority': '(0)', 'argsinfo': 'dargsinfo'}), '(priority=0, argsinfo=dargsinfo)\n', (18780, 18812), True, 'from ironic.drivers import base as driver_base\n'), ((18826, 18880), 'ironic.drivers.base.clean_step', 'driver_base.clean_step', ([], {'priority': '(0)', 'argsinfo': 'cargsinfo'}), '(priority=0, argsinfo=cargsinfo)\n', (18848, 18880), True, 'from ironic.drivers import base as driver_base\n'), ((19601, 19628), 'mock.MagicMock', 'mock.MagicMock', ([], {'spec_set': '[]'}), '(spec_set=[])\n', (19615, 19628), False, 'import mock\n'), ((19656, 19683), 'mock.MagicMock', 'mock.MagicMock', ([], {'spec_set': '[]'}), '(spec_set=[])\n', (19670, 19683), False, 'import mock\n'), ((19704, 19731), 'mock.MagicMock', 'mock.MagicMock', ([], {'spec_set': '[]'}), '(spec_set=[])\n', (19718, 19731), False, 'import mock\n'), ((24887, 24940), 'mock.MagicMock', 'mock.MagicMock', ([], {'target_raid_config': '"""some_raid_config"""'}), "(target_raid_config='some_raid_config')\n", (24901, 24940), False, 'import mock\n'), ((24961, 24991), 'mock.MagicMock', 'mock.MagicMock', ([], {'node': 'node_mock'}), '(node=node_mock)\n', (24975, 24991), False, 'import mock\n'), ((25409, 25446), 'mock.MagicMock', 'mock.MagicMock', ([], {'target_raid_config': '{}'}), '(target_raid_config={})\n', (25423, 25446), False, 'import mock\n'), ((25467, 25497), 'mock.MagicMock', 'mock.MagicMock', ([], {'node': 'node_mock'}), '(node=node_mock)\n', (25481, 25497), False, 'import mock\n'), ((26854, 26893), 'mock.MagicMock', 'mock.MagicMock', ([], {'target_raid_config': 'None'}), '(target_raid_config=None)\n', (26868, 26893), False, 'import mock\n'), ((26914, 26944), 'mock.MagicMock', 'mock.MagicMock', ([], {'node': 'node_mock'}), '(node=node_mock)\n', (26928, 26944), False, 'import mock\n'), ((28026, 28065), 'mock.MagicMock', 'mock.MagicMock', ([], {'target_raid_config': 'None'}), '(target_raid_config=None)\n', (28040, 28065), False, 'import mock\n'), ((28086, 28116), 'mock.MagicMock', 'mock.MagicMock', ([], {'node': 'node_mock'}), '(node=node_mock)\n', (28100, 28116), False, 'import mock\n'), ((29215, 29254), 'mock.MagicMock', 'mock.MagicMock', ([], {'target_raid_config': 'None'}), '(target_raid_config=None)\n', (29229, 29254), False, 'import mock\n'), ((29275, 29305), 'mock.MagicMock', 'mock.MagicMock', ([], {'node': 'node_mock'}), '(node=node_mock)\n', (29289, 29305), False, 'import mock\n'), ((29342, 29380), 'ironic.common.exception.InvalidParameterValue', 'exception.InvalidParameterValue', (['"""bad"""'], {}), "('bad')\n", (29373, 29380), False, 'from ironic.common import exception\n'), ((30070, 30087), 'ironic.drivers.modules.fake.FakeDeploy', 'fake.FakeDeploy', ([], {}), '()\n', (30085, 30087), False, 'from ironic.drivers.modules import fake\n'), ((31001, 31017), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (31015, 31017), False, 'import mock\n'), ((31419, 31435), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (31433, 31435), False, 'import mock\n'), ((31728, 31743), 'ironic.drivers.modules.fake.FakeBoot', 'fake.FakeBoot', ([], {}), '()\n', (31741, 31743), False, 'from ironic.drivers.modules import fake\n'), ((31764, 31797), 'mock.MagicMock', 'mock.MagicMock', ([], {'spec_set': "['node']"}), "(spec_set=['node'])\n", (31778, 31797), False, 'import mock\n'), ((32036, 32057), 'ironic.drivers.modules.fake.FakeManagement', 'fake.FakeManagement', ([], {}), '()\n', (32055, 32057), False, 'from ironic.drivers.modules import fake\n'), ((32078, 32111), 'mock.MagicMock', 'mock.MagicMock', ([], {'spec_set': "['node']"}), "(spec_set=['node'])\n", (32092, 32111), False, 'import mock\n'), ((32317, 32338), 'ironic.drivers.modules.fake.FakeManagement', 'fake.FakeManagement', ([], {}), '()\n', (32336, 32338), False, 'from ironic.drivers.modules import fake\n'), ((32359, 32392), 'mock.MagicMock', 'mock.MagicMock', ([], {'spec_set': "['node']"}), "(spec_set=['node'])\n", (32373, 32392), False, 'import mock\n'), ((32601, 32622), 'ironic.drivers.modules.fake.FakeManagement', 'fake.FakeManagement', ([], {}), '()\n', (32620, 32622), False, 'from ironic.drivers.modules import fake\n'), ((32643, 32676), 'mock.MagicMock', 'mock.MagicMock', ([], {'spec_set': "['node']"}), "(spec_set=['node'])\n", (32657, 32676), False, 'import mock\n'), ((32886, 32907), 'ironic.drivers.modules.fake.FakeManagement', 'fake.FakeManagement', ([], {}), '()\n', (32905, 32907), False, 'from ironic.drivers.modules import fake\n'), ((32928, 32961), 'mock.MagicMock', 'mock.MagicMock', ([], {'spec_set': "['node']"}), "(spec_set=['node'])\n", (32942, 32961), False, 'import mock\n'), ((9654, 9688), 'ironic.drivers.base.clean_step', 'driver_base.clean_step', ([], {'priority': '(0)'}), '(priority=0)\n', (9676, 9688), True, 'from ironic.drivers import base as driver_base\n'), ((9767, 9818), 'ironic.drivers.base.clean_step', 'driver_base.clean_step', ([], {'priority': '(10)', 'abortable': '(True)'}), '(priority=10, abortable=True)\n', (9789, 9818), True, 'from ironic.drivers import base as driver_base\n'), ((10060, 10094), 'ironic.drivers.base.clean_step', 'driver_base.clean_step', ([], {'priority': '(0)'}), '(priority=0)\n', (10082, 10094), True, 'from ironic.drivers import base as driver_base\n'), ((10174, 10225), 'ironic.drivers.base.clean_step', 'driver_base.clean_step', ([], {'priority': '(20)', 'abortable': '(True)'}), '(priority=20, abortable=True)\n', (10196, 10225), True, 'from ironic.drivers import base as driver_base\n'), ((10469, 10587), 'ironic.drivers.base.clean_step', 'driver_base.clean_step', ([], {'priority': '(0)', 'abortable': '(True)', 'argsinfo': "{'arg1': {'description': 'desc1', 'required': True}}"}), "(priority=0, abortable=True, argsinfo={'arg1': {\n 'description': 'desc1', 'required': True}})\n", (10491, 10587), True, 'from ironic.drivers import base as driver_base\n'), ((10782, 10868), 'ironic.drivers.base.clean_step', 'driver_base.clean_step', ([], {'priority': '(15)', 'argsinfo': "{'arg10': {'description': 'desc10'}}"}), "(priority=15, argsinfo={'arg10': {'description':\n 'desc10'}})\n", (10804, 10868), True, 'from ironic.drivers import base as driver_base\n'), ((20004, 20039), 'ironic.drivers.base.deploy_step', 'driver_base.deploy_step', ([], {'priority': '(0)'}), '(priority=0)\n', (20027, 20039), True, 'from ironic.drivers import base as driver_base\n'), ((20116, 20152), 'ironic.drivers.base.deploy_step', 'driver_base.deploy_step', ([], {'priority': '(10)'}), '(priority=10)\n', (20139, 20152), True, 'from ironic.drivers import base as driver_base\n'), ((20389, 20424), 'ironic.drivers.base.deploy_step', 'driver_base.deploy_step', ([], {'priority': '(0)'}), '(priority=0)\n', (20412, 20424), True, 'from ironic.drivers import base as driver_base\n'), ((20502, 20538), 'ironic.drivers.base.deploy_step', 'driver_base.deploy_step', ([], {'priority': '(20)'}), '(priority=20)\n', (20525, 20538), True, 'from ironic.drivers import base as driver_base\n'), ((20779, 20881), 'ironic.drivers.base.deploy_step', 'driver_base.deploy_step', ([], {'priority': '(0)', 'argsinfo': "{'arg1': {'description': 'desc1', 'required': True}}"}), "(priority=0, argsinfo={'arg1': {'description':\n 'desc1', 'required': True}})\n", (20802, 20881), True, 'from ironic.drivers import base as driver_base\n'), ((21077, 21164), 'ironic.drivers.base.deploy_step', 'driver_base.deploy_step', ([], {'priority': '(15)', 'argsinfo': "{'arg10': {'description': 'desc10'}}"}), "(priority=15, argsinfo={'arg10': {'description':\n 'desc10'}})\n", (21100, 21164), True, 'from ironic.drivers import base as driver_base\n'), ((25838, 25865), 'json.load', 'json.load', (['raid_schema_fobj'], {}), '(raid_schema_fobj)\n', (25847, 25865), False, 'import json\n'), ((26352, 26379), 'json.load', 'json.load', (['raid_schema_fobj'], {}), '(raid_schema_fobj)\n', (26361, 26379), False, 'import json\n'), ((33270, 33294), 'ironic.drivers.base.BareDriver', 'driver_base.BareDriver', ([], {}), '()\n', (33292, 33294), True, 'from ironic.drivers import base as driver_base\n'), ((33423, 33447), 'ironic.drivers.base.BareDriver', 'driver_base.BareDriver', ([], {}), '()\n', (33445, 33447), True, 'from ironic.drivers import base as driver_base\n'), ((30128, 30167), 'mock.Mock', 'mock.Mock', ([], {'uuid': '"""uuid"""', 'driver': '"""driver"""'}), "(uuid='uuid', driver='driver')\n", (30137, 30167), False, 'import mock\n')] |
valerymelou/opentimesheet-server | opentimesheet/profiles/tests/test_models.py | 0da97ebb3c3e59962132d1bc5e83e1d727f7331b | import pytest
from opentimesheet.core.tests import TenantTestCase
@pytest.mark.usefixtures("profile")
class TestProfile(TenantTestCase):
def test__str__(self):
assert (
self.profile.first_name + " " + self.profile.last_name
== self.profile.__str__()
)
| [((70, 104), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""profile"""'], {}), "('profile')\n", (93, 104), False, 'import pytest\n')] |
chuckie82/ami | ami/flowchart/library/Display.py | 7adb72c709afe4c1af53ef7f0d2b0e3639c63bf3 | from ami.flowchart.library.DisplayWidgets import ScalarWidget, ScatterWidget, WaveformWidget, \
ImageWidget, ObjectWidget, LineWidget, TimeWidget, HistogramWidget, \
Histogram2DWidget
from ami.flowchart.library.common import CtrlNode
from amitypes import Array1d, Array2d
from typing import Any
import ami.graph_nodes as gn
class ScalarViewer(CtrlNode):
"""
ScalarViewer displays the value of a scalar.
"""
nodeName = "ScalarViewer"
uiTemplate = []
def __init__(self, name):
super().__init__(name,
terminals={"In": {"io": "in", "ttype": float}},
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, ScalarWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'ScalarWidget', 'terms': terms, 'topics': topics}
class WaveformViewer(CtrlNode):
"""
WaveformViewer displays 1D arrays.
"""
nodeName = "WaveformViewer"
uiTemplate = []
def __init__(self, name):
super().__init__(name, terminals={"In": {"io": "in", "ttype": Array1d}},
allowAddInput=True,
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, WaveformWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'WaveformWidget', 'terms': terms, 'topics': topics}
class ImageViewer(CtrlNode):
"""
ImageViewer displays 2D arrays.
"""
nodeName = "ImageViewer"
uiTemplate = []
def __init__(self, name):
super().__init__(name, terminals={"In": {"io": "in", "ttype": Array2d}}, viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, ImageWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'ImageWidget', 'terms': terms, 'topics': topics}
class ObjectViewer(CtrlNode):
"""
ObjectViewer displays string representation of a python object.
"""
nodeName = "ObjectViewer"
uiTemplate = []
def __init__(self, name):
super().__init__(name, terminals={"In": {"io": "in", "ttype": Any}}, viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, ObjectWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'ObjectWidget', 'terms': terms, 'topics': topics}
class Histogram(CtrlNode):
"""
Histogram plots a histogram created from Binning.
"""
nodeName = "Histogram"
uiTemplate = []
def __init__(self, name):
super().__init__(name,
terminals={"Bins": {"io": "in", "ttype": Array1d},
"Counts": {"io": "in", "ttype": Array1d}},
allowAddInput=True,
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, HistogramWidget, **kwargs)
def addInput(self, **args):
self.addTerminal(name="Bins", io='in', ttype=Array1d, **args)
self.addTerminal(name="Counts", io='in', ttype=Array1d, **args)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'HistogramWidget', 'terms': terms, 'topics': topics}
class Histogram2D(CtrlNode):
"""
Histogram2D plots a 2d histogram created from Binning2D.
"""
nodeName = "Histogram2D"
uiTemplate = []
def __init__(self, name):
super().__init__(name,
terminals={"XBins": {"io": "in", "ttype": Array1d},
"YBins": {"io": "in", "ttype": Array1d},
"Counts": {"io": "in", "ttype": Array2d}},
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, Histogram2DWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'Histogram2DWidget', 'terms': terms, 'topics': topics}
class ScatterPlot(CtrlNode):
"""
Scatter Plot collects two scalars and plots them against each other.
"""
nodeName = "ScatterPlot"
uiTemplate = [("Num Points", 'intSpin', {'value': 100, 'min': 1}),
('Unique', 'check')]
def __init__(self, name):
super().__init__(name, terminals={"X": {"io": "in", "ttype": float},
"Y": {"io": "in", "ttype": float}},
allowAddInput=True,
buffered=True)
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, ScatterWidget, **kwargs)
def isChanged(self, restore_ctrl, restore_widget):
return restore_ctrl
def addInput(self, **args):
self.addTerminal(name="X", io='in', ttype=float, **args)
self.addTerminal(name="Y", io='in', ttype=float, **args)
def to_operation(self, inputs, outputs, **kwargs):
outputs = [self.name()+'.'+i for i in inputs.keys()]
buffer_output = [self.name()]
nodes = [gn.RollingBuffer(name=self.name()+"_buffer",
N=self.values['Num Points'], unique=self.values['Unique'],
inputs=inputs, outputs=buffer_output, **kwargs),
gn.Map(name=self.name()+"_operation",
inputs=buffer_output, outputs=outputs,
func=lambda a: zip(*a),
**kwargs)]
return nodes
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'ScatterWidget', 'terms': terms, 'topics': topics}
class ScalarPlot(CtrlNode):
"""
Scalar Plot collects scalars and plots them.
"""
nodeName = "ScalarPlot"
uiTemplate = [("Num Points", 'intSpin', {'value': 100, 'min': 1})]
def __init__(self, name):
super().__init__(name, terminals={"Y": {"io": "in", "ttype": float}},
allowAddInput=True,
buffered=True)
def isChanged(self, restore_ctrl, restore_widget):
return restore_ctrl
def addInput(self, **args):
self.addTerminal(name="Y", io='in', ttype=float, **args)
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, WaveformWidget, **kwargs)
def to_operation(self, inputs, outputs, **kwargs):
outputs = [self.name()+'.'+i for i in inputs.keys()]
buffer_output = [self.name()]
if len(inputs.values()) > 1:
node = [gn.RollingBuffer(name=self.name()+"_buffer", N=self.values['Num Points'],
inputs=inputs, outputs=buffer_output, **kwargs),
gn.Map(name=self.name()+"_operation", inputs=buffer_output, outputs=outputs,
func=lambda a: zip(*a), **kwargs)]
else:
node = gn.RollingBuffer(name=self.name(), N=self.values['Num Points'],
inputs=inputs, outputs=outputs, **kwargs)
return node
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'WaveformWidget', 'terms': terms, 'topics': topics}
class LinePlot(CtrlNode):
"""
Line Plot plots arrays.
"""
nodeName = "LinePlot"
uiTemplate = []
def __init__(self, name):
super().__init__(name, terminals={"X": {"io": "in", "ttype": Array1d},
"Y": {"io": "in", "ttype": Array1d}},
allowAddInput=True,
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, LineWidget, **kwargs)
def addInput(self, **args):
group = self.nextGroupName()
self.addTerminal(name="X", io='in', ttype=Array1d, group=group, **args)
self.addTerminal(name="Y", io='in', ttype=Array1d, group=group, **args)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'LineWidget', 'terms': terms, 'topics': topics}
class TimePlot(CtrlNode):
"""
Plot a number against time of day.
"""
nodeName = "TimePlot"
uiTemplate = [("Num Points", 'intSpin', {'value': 1000, 'min': 1})]
def __init__(self, name):
super().__init__(name, terminals={"X": {"io": "in", "ttype": float},
"Y": {"io": "in", "ttype": float}},
allowAddInput=True,
buffered=True)
def isChanged(self, restore_ctrl, restore_widget):
return restore_ctrl
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, TimeWidget, **kwargs)
def addInput(self, **args):
self.addTerminal(name="X", io='in', ttype=float, **args)
self.addTerminal(name="Y", io='in', ttype=float, **args)
def to_operation(self, inputs, outputs, **kwargs):
outputs = [self.name()+'.'+i for i in inputs.keys()]
buffer_output = [self.name()]
nodes = [gn.RollingBuffer(name=self.name()+"_buffer", N=self.values['Num Points'],
inputs=inputs, outputs=buffer_output, **kwargs),
gn.Map(name=self.name()+"_operation", inputs=buffer_output, outputs=outputs,
func=lambda a: zip(*a), **kwargs)]
return nodes
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'TimeWidget', 'terms': terms, 'topics': topics}
| [] |
ShujaKhalid/deep-rl | deep-rl/lib/python2.7/site-packages/OpenGL/GL/ARB/transform_feedback_instanced.py | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | '''OpenGL extension ARB.transform_feedback_instanced
This module customises the behaviour of the
OpenGL.raw.GL.ARB.transform_feedback_instanced to provide a more
Python-friendly API
Overview (from the spec)
Multiple instances of geometry may be specified to the GL by calling
functions such as DrawArraysInstanced and DrawElementsInstanced. Further,
the results of a transform feedback operation may be returned to the GL
by calling DrawTransformFeedback, or DrawTransformFeedbackStream. However,
it is not presently possible to draw multiple instances of data
transform feedback without using a query and the resulting round trip from
server to client.
This extension adds functionality to draw multiple instances of the result
of a transform feedback operation.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/transform_feedback_instanced.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.transform_feedback_instanced import *
from OpenGL.raw.GL.ARB.transform_feedback_instanced import _EXTENSION_NAME
def glInitTransformFeedbackInstancedARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | [((1359, 1401), 'OpenGL.extensions.hasGLExtension', 'extensions.hasGLExtension', (['_EXTENSION_NAME'], {}), '(_EXTENSION_NAME)\n', (1384, 1401), False, 'from OpenGL import extensions\n')] |
xbabka01/retdec-regression-tests | features/cpp/simple/test.py | 1ac40cca5165740364e6f7fb72b20820eac9bc7c | from regression_tests import *
class TestBase(Test):
def test_for_main(self):
assert self.out_c.has_funcs('main') or self.out_c.has_funcs('entry_point')
def test_check_main_is_not_ctor_or_dtor(self):
for c in self.out_config.classes:
assert "main" not in c.constructors
assert "main" not in c.destructors
class TestAll(TestBase):
settings = TestSettings(
input=files_in_dir('inputs/symbols'),
args='-k'
)
def test_for_string(self):
# printf() is used -> '\n' at the end of the string
# puts() is used -> no '\n' at the end of the string
assert self.out_c.has_string_literal_matching( r'ClassA::ClassA(\\n)?' )
assert self.out_c.has_string_literal_matching( r'%i %i(\\n)?' )
assert self.out_c.has_string_literal_matching( r'~ClassA::ClassA(\\n)?' )
def test_for_vtables(self):
assert self.out_config.vtable_count == 1
vtable = self.out_config.vtables[0]
assert vtable.item_count == 1
assert "doSomething" in vtable.items[0].target_name
def test_for_classes(self):
assert self.out_config.classes_count == 1
c = self.out_config.classes[0]
assert len(c.constructors) == 2
assert len(c.destructors) == 2
assert len(c.virtualMethods) == 1
class TestAllStripped(TestBase):
settings = TestSettings(
input=files_in_dir('inputs/stripped'),
args='-k'
)
def test_for_vtables(self):
assert self.out_config.vtable_count == 1
vtable = self.out_config.vtables[0]
assert vtable.item_count == 1
assert vtable.items[0].target_name # there is some (!empty) function name
def test_for_classes(self):
assert self.out_config.classes_count == 1
c = self.out_config.classes[0]
assert len(c.virtualMethods) == 1
assert len(c.constructors) == 2
assert len(c.destructors) == 2
class TestMsvc(TestBase):
settings = TestSettings(
input='inputs/msvc/simple-msvc-release.ex',
args='-k'
)
settings_d = TestSettings(
input='inputs/msvc/simple-msvc-debug.ex',
args='-k'
)
def test_for_string(self):
assert self.out_c.has_string_literal( 'ClassA::ClassA\\n' )
assert self.out_c.has_string_literal( '~ClassA::ClassA\\n' )
assert self.out_c.has_string_literal( '%i %i\\n' )
def test_for_vtables(self):
assert self.out_config.vtable_count == 2
vtable1 = self.out_config.vtables[0]
assert vtable1.item_count == 1
vtable2 = self.out_config.vtables[0]
assert vtable2.item_count == 1
| [] |
windar427/find_alpha | src/experiment.py | dbca4e677c6cdc144f20f6259c07291b5d3e6eed | from .lib.DownloadData import DownloadData
| [] |
songchenwen/icloud-drive-docker | src/__init__.py | 7188dfbcc34e29ddbeeb1324c62ea77bed8f0576 | __author__ = 'Mandar Patil (mandarons@pm.me)'
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
| [((64, 126), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (87, 126), False, 'import warnings\n')] |
rafael-torraca/delivery | test_basico.py | 298db3c5d74938dc34687e7b65ee72a847e4deeb |
def test_one_plus_one_is_two():
assert 1 + 1 == 2 #o assert espera que algo seja verdadeiro, se for falso o teste quebrou
def test_negative_1_plus_1_is_3():
assert 1 + 1 == 3
| [] |
rohernandezz/coldtype | setup.py | 724234fce454699a469d17b6c78ae50fa8138169 | import setuptools
long_description = """
# Coldtype
### Programmatic display typography
More info available at: [coldtype.goodhertz.com](https://coldtype.goodhertz.com)
"""
setuptools.setup(
name="coldtype",
version="0.6.6",
author="Rob Stenson / Goodhertz",
author_email="rob@goodhertz.com",
description="Functions for manual vectorized typesetting",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/goodhertz/coldtype",
#package_dir={"": "coldtype"},
packages=[
"coldtype",
"coldtype.sh",
"coldtype.fx",
"coldtype.img",
"coldtype.time",
"coldtype.midi",
"coldtype.pens",
"coldtype.text",
"coldtype.grid",
"coldtype.color",
"coldtype.capture",
"coldtype.blender",
"coldtype.geometry",
"coldtype.time.nle",
"coldtype.renderer",
"coldtype.webserver",
"coldtype.renderable",
"coldtype.fontgoggles",
"coldtype.interpolation",
"coldtype.renderer.winman",
"coldtype.fontgoggles.font",
"coldtype.fontgoggles.misc",
"coldtype.fontgoggles.compile",
],
include_package_data=True,
package_data={
"": [
"webserver/webviewer.html",
"demo/RecMono-CasualItalic.ttf",
"demo/ColdtypeObviously-VF.ttf",
"demo/MutatorSans.ttf",
"demo/demo.py",
"demo/midi.py",
"demo/blank.py",
"demo/boiler.py",
"renderer/picklejar.py",
"renderer/.coldtype.py"
],
},
entry_points={
'console_scripts': [
'coldtype = coldtype.renderer:main'
],
},
extras_require={
"skia": [
"skia-python>=86.0",
],
"viewer": [
"glfw",
"PyOpenGL",
"PyOpenGL-accelerate",
"skia-python>=86.0",
"skia-pathops", # can this be taken from skia-python?
"SimpleWebSocketServer",
"watchdog<2.0.0", # https://github.com/gorakhargosh/watchdog/issues/702
"noise",
"ufo2ft",
"numpy",
],
"webviewer": [
"SimpleWebSocketServer",
"watchdog<2.0.0", # https://github.com/gorakhargosh/watchdog/issues/702
],
"experimental": [
"pynput",
"rtmidi",
"noise",
],
"c": [
"srt",
"noise",
],
"unicode": [
"unicodedata2"
],
"blender": [
"skia-pathops"
],
"notebook": [
"skia-pathops",
"skia-python",
]
},
install_requires=[
"lxml",
"fonttools[ufo]",
"fontPens",
"fontParts",
"more-itertools",
"easing-functions",
"timecode",
"mido",
"defcon",
"freetype-py",
"uharfbuzz>=0.14.0",
"python-bidi"
],
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
)
| [((177, 2174), 'setuptools.setup', 'setuptools.setup', ([], {'name': '"""coldtype"""', 'version': '"""0.6.6"""', 'author': '"""Rob Stenson / Goodhertz"""', 'author_email': '"""rob@goodhertz.com"""', 'description': '"""Functions for manual vectorized typesetting"""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/goodhertz/coldtype"""', 'packages': "['coldtype', 'coldtype.sh', 'coldtype.fx', 'coldtype.img', 'coldtype.time',\n 'coldtype.midi', 'coldtype.pens', 'coldtype.text', 'coldtype.grid',\n 'coldtype.color', 'coldtype.capture', 'coldtype.blender',\n 'coldtype.geometry', 'coldtype.time.nle', 'coldtype.renderer',\n 'coldtype.webserver', 'coldtype.renderable', 'coldtype.fontgoggles',\n 'coldtype.interpolation', 'coldtype.renderer.winman',\n 'coldtype.fontgoggles.font', 'coldtype.fontgoggles.misc',\n 'coldtype.fontgoggles.compile']", 'include_package_data': '(True)', 'package_data': "{'': ['webserver/webviewer.html', 'demo/RecMono-CasualItalic.ttf',\n 'demo/ColdtypeObviously-VF.ttf', 'demo/MutatorSans.ttf', 'demo/demo.py',\n 'demo/midi.py', 'demo/blank.py', 'demo/boiler.py',\n 'renderer/picklejar.py', 'renderer/.coldtype.py']}", 'entry_points': "{'console_scripts': ['coldtype = coldtype.renderer:main']}", 'extras_require': "{'skia': ['skia-python>=86.0'], 'viewer': ['glfw', 'PyOpenGL',\n 'PyOpenGL-accelerate', 'skia-python>=86.0', 'skia-pathops',\n 'SimpleWebSocketServer', 'watchdog<2.0.0', 'noise', 'ufo2ft', 'numpy'],\n 'webviewer': ['SimpleWebSocketServer', 'watchdog<2.0.0'],\n 'experimental': ['pynput', 'rtmidi', 'noise'], 'c': ['srt', 'noise'],\n 'unicode': ['unicodedata2'], 'blender': ['skia-pathops'], 'notebook': [\n 'skia-pathops', 'skia-python']}", 'install_requires': "['lxml', 'fonttools[ufo]', 'fontPens', 'fontParts', 'more-itertools',\n 'easing-functions', 'timecode', 'mido', 'defcon', 'freetype-py',\n 'uharfbuzz>=0.14.0', 'python-bidi']", 'classifiers': "['Programming Language :: Python :: 3', 'Operating System :: OS Independent']"}), "(name='coldtype', version='0.6.6', author=\n 'Rob Stenson / Goodhertz', author_email='rob@goodhertz.com',\n description='Functions for manual vectorized typesetting',\n long_description=long_description, long_description_content_type=\n 'text/markdown', url='https://github.com/goodhertz/coldtype', packages=\n ['coldtype', 'coldtype.sh', 'coldtype.fx', 'coldtype.img',\n 'coldtype.time', 'coldtype.midi', 'coldtype.pens', 'coldtype.text',\n 'coldtype.grid', 'coldtype.color', 'coldtype.capture',\n 'coldtype.blender', 'coldtype.geometry', 'coldtype.time.nle',\n 'coldtype.renderer', 'coldtype.webserver', 'coldtype.renderable',\n 'coldtype.fontgoggles', 'coldtype.interpolation',\n 'coldtype.renderer.winman', 'coldtype.fontgoggles.font',\n 'coldtype.fontgoggles.misc', 'coldtype.fontgoggles.compile'],\n include_package_data=True, package_data={'': [\n 'webserver/webviewer.html', 'demo/RecMono-CasualItalic.ttf',\n 'demo/ColdtypeObviously-VF.ttf', 'demo/MutatorSans.ttf', 'demo/demo.py',\n 'demo/midi.py', 'demo/blank.py', 'demo/boiler.py',\n 'renderer/picklejar.py', 'renderer/.coldtype.py']}, entry_points={\n 'console_scripts': ['coldtype = coldtype.renderer:main']},\n extras_require={'skia': ['skia-python>=86.0'], 'viewer': ['glfw',\n 'PyOpenGL', 'PyOpenGL-accelerate', 'skia-python>=86.0', 'skia-pathops',\n 'SimpleWebSocketServer', 'watchdog<2.0.0', 'noise', 'ufo2ft', 'numpy'],\n 'webviewer': ['SimpleWebSocketServer', 'watchdog<2.0.0'],\n 'experimental': ['pynput', 'rtmidi', 'noise'], 'c': ['srt', 'noise'],\n 'unicode': ['unicodedata2'], 'blender': ['skia-pathops'], 'notebook': [\n 'skia-pathops', 'skia-python']}, install_requires=['lxml',\n 'fonttools[ufo]', 'fontPens', 'fontParts', 'more-itertools',\n 'easing-functions', 'timecode', 'mido', 'defcon', 'freetype-py',\n 'uharfbuzz>=0.14.0', 'python-bidi'], classifiers=[\n 'Programming Language :: Python :: 3',\n 'Operating System :: OS Independent'])\n", (193, 2174), False, 'import setuptools\n')] |
xdedss/SuccessiveConvexification | GFOLD_problem.py | 8b330b64a31f546ce92c1e34036c212484cbae5e | # -*- coding: utf-8 -*-
# GFOLD_static_p3p4
min_=min
from cvxpy import *
import cvxpy_codegen as cpg
from time import time
import numpy as np
import sys
import GFOLD_params
''' As defined in the paper...
PROBLEM 3: Minimum Landing Error (tf roughly solved)
MINIMIZE : norm of landing error vector
SUBJ TO :
0) initial conditions satisfied (position, velocity)
1) final conditions satisfied (altitude, velocity)
2) dynamics always satisfied
3) x stays in cone at all times
4) relaxed convexified mass and thrust constraints
5) thrust pointing constraint
6) sub-surface flight constraint
PROBLEM 4: Minimum Fuel Use
MAXIMIZE : landing mass, opt variables are dynamical and
SUBJ TO :
0) same constraints as p1, plus:
1) landing point must be equal or better than that found by p1
'''
def solve(params, params_super = None, codegen = False, verbose=False):
#super params
if (params_super == None):
params_super = GFOLD_params.SuperParams() # default
N = params_super.N
#优化变量
x =Variable(6,N,name='var_x') # state vector (3position,3velocity)
u =Variable(3,N,name='var_u') # u = Tc/mass because Tc[:,n]/m[n] is not allowed by DCP
z= Variable(1,N,name='var_z') # z = ln(mass)
s= Variable(1,N,name='var_s') # thrust slack parameter
# Parameters
x0 = Parameter(6, 1, name="x0")
xf = Parameter(6, 1, name="xf")
z0_term_inv = Parameter(1, N, name="z0_term_inv", sign='positive')
z0_term_log = Parameter(1, N, name="z0_term_log")
g = Parameter(3, 1, name="g_vec")
p_cs_cos = Parameter(1, N, name='p_cs_cos')
sparse_params = Parameter(7, 1, name="sparse_params", sign='positive')
m_wet_log = Parameter(2, 1, name='m_wet_log')
if (not codegen):
x0.value = params.x0.reshape(6, 1)
xf.value = params.xf.reshape(6, 1)
z0_term_inv.value = params.z0_term_inv.reshape(1, N)
z0_term_log.value = params.z0_term_log.reshape(1, N)
g.value = params.g.reshape(3, 1)
p_cs_cos.value = params.p_cs_cos.reshape(1, N)
m_wet_log.value = [params.m_wet_log, 0]
sparse_params.value = np.array([
params.alpha_dt,
params.G_max,
params.V_max,
params.y_gs_cot,
params.r1,
params.r2,
params.tf
]).reshape(7, 1)
alpha_dt, G_max, V_max, y_gs_cot, r1, r2, tf_ = sparse_params
dt = tf_ * (1/N) # Integration dt
# constraints
con = []
con += [x[0:3,0] == x0[0:3]] # initial pos
con += [x[3:6,0] == x0[3:6]] # initial vel
con += [x[0:3,N-1] == xf[0:3]] # final pos
con += [x[3:6,N-1]== xf[3:6]] # final vel
con += [s[0,N-1] == 0] # thrust at the end must be zero
con += [u[:,0] == s[0,0]*np.array([1,0,0])] # thrust direction starts straight
con += [u[:,N-1] == s[0,N-1]*np.array([1,0,0])] # and ends straight
con += [z[0,0] == m_wet_log[0,0]] # convexified (7)
for n in range(0,N-1):
#dynamics
con += [x[3:6,n+1] == x[3:6,n] + (dt*0.5)*((u[:,n]+g[:,0]) + (u[:,n+1]+g[:,0]))]
con += [x[0:3,n+1] == x[0:3,n] + (dt*0.5)*(x[3:6,n+1]+x[3:6,n])]
# glideslope cone
con += [ norm( (x[0:3,n])[1:3] ) - y_gs_cot*(x[0,n]) <= 0 ]
con += [ norm(x[3:6,n]) <= V_max ] # velocity
#con += [norm(u[:,n+1]-u[:,n]) <= dt*T_max/m_dry * 3]
con += [z[0,n+1] == z[0,n] - (alpha_dt*0.5)*(s[0,n] + s[0,n+1])] # mass decreases
con += [norm(u[:,n]) <= s[0,n]] # limit thrust magnitude & also therefore, mass
# Thrust pointing constraint
con += [ u[0,n] >= p_cs_cos[0,n]*s[0,n] ]
if n > 0:
#z0_term = m_wet - alpha * r2 * (n) * dt # see ref [2], eq 34,35,36
#z0 = log(z0_term)
z0 = z0_term_log[0,n]
mu_1 = r1*(z0_term_inv[0,n])
mu_2 = r2*(z0_term_inv[0,n])
#更正一处原项目与论文不符之处
# 示意图:https://www.desmos.com/calculator/wtcfgnepe1
con += [s[0,n] >= mu_1 * (1 - (z[0,n] - z0) + (z[0,n] - z0)**2 *0.5)] # lower thrust bound
con += [s[0,n] <= mu_2 * (1 - (z[0,n] - z0))] # upper thrust bound
#Objective
objective = Minimize(-z[0,N-1])
problem=Problem(objective, con)
if codegen:
cpg.codegen(problem, codegen_path)
else:
obj_opt = problem.solve(solver=ECOS, verbose=verbose)
return (
obj_opt,
np.array(x.value), # r,v
np.array(u.value), # u (acceleration)
np.exp(np.array(z.value)) # mass
) if type(x.value) != type(None) else (None, None, None, None)
if __name__ == '__main__':
if (len(sys.argv) > 2 and sys.argv[1] == 'codegen'):
codegen_path = sys.argv[2]
solve(None, None, True)
else:
print("invalid input")
print(sys.argv)
| [((1050, 1076), 'GFOLD_params.SuperParams', 'GFOLD_params.SuperParams', ([], {}), '()\n', (1074, 1076), False, 'import GFOLD_params\n'), ((4489, 4523), 'cvxpy_codegen.codegen', 'cpg.codegen', (['problem', 'codegen_path'], {}), '(problem, codegen_path)\n', (4500, 4523), True, 'import cvxpy_codegen as cpg\n'), ((2230, 2339), 'numpy.array', 'np.array', (['[params.alpha_dt, params.G_max, params.V_max, params.y_gs_cot, params.r1,\n params.r2, params.tf]'], {}), '([params.alpha_dt, params.G_max, params.V_max, params.y_gs_cot,\n params.r1, params.r2, params.tf])\n', (2238, 2339), True, 'import numpy as np\n'), ((2890, 2909), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2898, 2909), True, 'import numpy as np\n'), ((2977, 2996), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2985, 2996), True, 'import numpy as np\n'), ((4646, 4663), 'numpy.array', 'np.array', (['x.value'], {}), '(x.value)\n', (4654, 4663), True, 'import numpy as np\n'), ((4683, 4700), 'numpy.array', 'np.array', (['u.value'], {}), '(u.value)\n', (4691, 4700), True, 'import numpy as np\n'), ((4740, 4757), 'numpy.array', 'np.array', (['z.value'], {}), '(z.value)\n', (4748, 4757), True, 'import numpy as np\n')] |
SarienFates/MMRandomizer | Hints.py | 7c677140d83e94167fecee35e8c25216a51bdd56 | import io
import hashlib
import logging
import os
import struct
import random
from HintList import getHint, getHintGroup, Hint
from Utils import local_path
#builds out general hints based on location and whether an item is required or not
def buildGossipHints(world, rom):
stoneAddresses = [0x938e4c, 0x938EA8, 0x938F04, 0x938F60, 0x938FBC, 0x939018, 0x939074, 0x9390D0, 0x93912C, 0x939188,
0x9391E4, 0x939240, 0x93929C, 0x9392F8, 0x939354, 0x9393B0, 0x93940C, 0x939468, 0x9394C4, 0x939520,
0x93957C, 0x9395D8, 0x939634, 0x939690, 0x9396EC, 0x939748, 0x9397A4, 0x939800, 0x93985C, 0x9398B8,
0x939914, 0x939970] #address for gossip stone text boxes, byte limit is 92
alwaysLocations = getHintGroup('alwaysLocation')#These location will always have a hint somewhere in the world.
sometimesSpace = (int((len(stoneAddresses) - len(alwaysLocations)*2)/2))
sometimesLocations = getHintGroup('location')#A random selection of these locations will be in the hint pool.
random.shuffle(sometimesLocations)
sometimesLocations = sometimesLocations[0:sometimesSpace]
hintList = alwaysLocations
hintList.extend(alwaysLocations)
hintList.extend(sometimesLocations)
locationData = []
for hint in hintList:
for locationWorld in world.get_locations():
if hint.name == locationWorld.name:
locationData.extend([locationWorld])
#hopefully fixes weird VC error where the last character from a previous text box would sometimes spill over into the next box.
for address in range(stoneAddresses[0], 0x9399D8):
rom.write_byte(address, 0x08)
#shuffles the stone addresses for randomization, always locations will be placed first and twice
random.shuffle(stoneAddresses)
#loops through shuffled locations and addresses and builds hint.
while locationData:
currentLoc = locationData.pop(0)
Block_code = getBytes((getHint(currentLoc.name).text))
if currentLoc.item.type == 'Map' or currentLoc.item.type == 'Compass' or currentLoc.item.type == 'BossKey' or currentLoc.item.type == 'SmallKey':
Block_code.extend(getBytes((getHint(currentLoc.item.type).text)))
else:
Block_code.extend(getBytes((getHint(currentLoc.item.name).text)))
endText(Block_code)
if len(Block_code) > 92:
print('Too many characters in hint')
Block_code = getBytes("I am Error.")
Block_code.extend(getBytes(currentLoc.name))
Block_code.extend(getBytes('&'))
Block_code.extend(getBytes(currentLoc.item.name))
rom.write_bytes(stoneAddresses.pop(0), Block_code)
junkHints = getHintGroup('junkHint')
random.shuffle(junkHints)
while stoneAddresses:
junkHint = junkHints.pop()
Block_code = getBytes(junkHint.text)
endText(Block_code)
rom.write_bytes(stoneAddresses.pop(0), Block_code)
return rom
# builds boss reward text that is displayed at the temple of time altar for child and adult, pull based off of item in a fixed order.
def buildBossRewardHints(world, rom):
bossRewardsSpiritualStones = ['Kokiri Emerald', 'Goron Ruby', 'Zora Sapphire']
bossRewardsMedallions = ['Forest Medallion', 'Fire Medallion', 'Water Medallion', 'Shadow Medallion', 'Spirit Medallion', 'Light Medallion']
# text that appears at altar as a child.
Block_code = []
Block_code = getBytes(getHint('Spiritual Stone Text Start').text)
for reward in bossRewardsSpiritualStones:
buildBossString(Block_code, reward, world)
Block_code = setRewardColor(Block_code)
Block_code.extend(getBytes(getHint('Spiritual Stone Text End').text))
Block_code.extend([0x0B])
endText(Block_code)
rom.write_bytes(0x95ED95, Block_code)
# text that appears at altar as an adult.
Block_code = []
for reward in bossRewardsMedallions:
buildBossString(Block_code, reward, world)
Block_code = setRewardColor(Block_code)
Block_code.extend(getBytes(getHint('Medallion Text End').text))
Block_code.extend([0x0B])
endText(Block_code)
rom.write_bytes(0x95DB94, Block_code)
return rom
# pulls text string from hintlist for reward after sending the location to hintlist.
def buildBossString(Block_code, reward, world):
for location in world.get_locations():
if location.item.name == reward:
Block_code.extend([0x08])
Block_code.extend(getBytes(getHint(location.name).text))
return Block_code
# alternates through color set commands in child and adult boss reward hint strings setting the colors at the start of the string to correspond with the reward found at the location.
# skips over color commands at the end of stings to set color back to white.
def setRewardColor(Block_code):
rewardColors = [0x42, 0x41, 0x43, 0x45, 0x46, 0x44]
colorWhite = True
for i, byte in enumerate(Block_code):
if byte == 0x05 and colorWhite:
Block_code[i + 1] = rewardColors.pop(0)
colorWhite = False
elif byte == 0x05 and not colorWhite:
colorWhite = True
return Block_code
#sets the end of text byte in the text box.
def endText(byteArray):
return byteArray.extend([0x02])
# reads array of characters and converts them to an array of bytes.
def getBytes(string):
byteCode = []
for char in string:
if char == '^':
byteCode.extend([0x04])#box break
elif char == '&':
byteCode.extend([0x01])#new line
elif char == '@':
byteCode.extend([0x0F])#print player name
elif char == '#':
byteCode.extend([0x05, 0x40]) #sets color to white
else:
char = char.encode('utf-8')
char = char.hex()
byte = int('0x' + char, 16)
byteCode.extend([byte])
return byteCode
| [((780, 810), 'HintList.getHintGroup', 'getHintGroup', (['"""alwaysLocation"""'], {}), "('alwaysLocation')\n", (792, 810), False, 'from HintList import getHint, getHintGroup, Hint\n'), ((984, 1008), 'HintList.getHintGroup', 'getHintGroup', (['"""location"""'], {}), "('location')\n", (996, 1008), False, 'from HintList import getHint, getHintGroup, Hint\n'), ((1078, 1112), 'random.shuffle', 'random.shuffle', (['sometimesLocations'], {}), '(sometimesLocations)\n', (1092, 1112), False, 'import random\n'), ((1844, 1874), 'random.shuffle', 'random.shuffle', (['stoneAddresses'], {}), '(stoneAddresses)\n', (1858, 1874), False, 'import random\n'), ((2824, 2848), 'HintList.getHintGroup', 'getHintGroup', (['"""junkHint"""'], {}), "('junkHint')\n", (2836, 2848), False, 'from HintList import getHint, getHintGroup, Hint\n'), ((2854, 2879), 'random.shuffle', 'random.shuffle', (['junkHints'], {}), '(junkHints)\n', (2868, 2879), False, 'import random\n'), ((3606, 3643), 'HintList.getHint', 'getHint', (['"""Spiritual Stone Text Start"""'], {}), "('Spiritual Stone Text Start')\n", (3613, 3643), False, 'from HintList import getHint, getHintGroup, Hint\n'), ((2046, 2070), 'HintList.getHint', 'getHint', (['currentLoc.name'], {}), '(currentLoc.name)\n', (2053, 2070), False, 'from HintList import getHint, getHintGroup, Hint\n'), ((3828, 3863), 'HintList.getHint', 'getHint', (['"""Spiritual Stone Text End"""'], {}), "('Spiritual Stone Text End')\n", (3835, 3863), False, 'from HintList import getHint, getHintGroup, Hint\n'), ((4217, 4246), 'HintList.getHint', 'getHint', (['"""Medallion Text End"""'], {}), "('Medallion Text End')\n", (4224, 4246), False, 'from HintList import getHint, getHintGroup, Hint\n'), ((2274, 2303), 'HintList.getHint', 'getHint', (['currentLoc.item.type'], {}), '(currentLoc.item.type)\n', (2281, 2303), False, 'from HintList import getHint, getHintGroup, Hint\n'), ((2368, 2397), 'HintList.getHint', 'getHint', (['currentLoc.item.name'], {}), '(currentLoc.item.name)\n', (2375, 2397), False, 'from HintList import getHint, getHintGroup, Hint\n'), ((4677, 4699), 'HintList.getHint', 'getHint', (['location.name'], {}), '(location.name)\n', (4684, 4699), False, 'from HintList import getHint, getHintGroup, Hint\n')] |
Jhoselyn-Carballo/computacion_para_ingenieria | examen_2/p2/p2.py | 4b5ed7d4aa0017fb4993ccfdcc9fcef0fb5b3898 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 17 09:10:05 2022
@author: JHOSS
"""
from tkinter import *
def contador(accion, contador):
if accion == 'countUp':
contador == contador + 1
elif accion == 'coundDown':
contador == contador -1
elif accion == 'reset':
contador == 0
return contador
| [] |
ndepal/bokeh | bokeh/models/tests/test_callbacks.py | 1b514f28fe40eeb71954eac0c113b2debdb2eda9 | from pytest import raises
from bokeh.models import CustomJS, Slider
def test_js_callback():
slider = Slider()
cb = CustomJS(code="foo();", args=dict(x=slider))
assert 'foo()' in cb.code
assert cb.args['x'] is slider
cb = CustomJS(code="foo();", args=dict(x=3))
assert 'foo()' in cb.code
assert cb.args['x'] is 3
with raises(AttributeError): # kwargs not supported
CustomJS(code="foo();", x=slider)
def test_py_callback():
slider = Slider()
foo = None # fool pyflakes
def cb(x=slider):
foo()
cb = CustomJS.from_py_func(cb)
assert 'foo()' in cb.code
assert cb.args['x'] is slider
def cb(x=4):
foo()
cb = CustomJS.from_py_func(cb)
assert 'foo()' in cb.code
assert cb.args['x'] is 4
| [((107, 115), 'bokeh.models.Slider', 'Slider', ([], {}), '()\n', (113, 115), False, 'from bokeh.models import CustomJS, Slider\n'), ((483, 491), 'bokeh.models.Slider', 'Slider', ([], {}), '()\n', (489, 491), False, 'from bokeh.models import CustomJS, Slider\n'), ((570, 595), 'bokeh.models.CustomJS.from_py_func', 'CustomJS.from_py_func', (['cb'], {}), '(cb)\n', (591, 595), False, 'from bokeh.models import CustomJS, Slider\n'), ((701, 726), 'bokeh.models.CustomJS.from_py_func', 'CustomJS.from_py_func', (['cb'], {}), '(cb)\n', (722, 726), False, 'from bokeh.models import CustomJS, Slider\n'), ((354, 376), 'pytest.raises', 'raises', (['AttributeError'], {}), '(AttributeError)\n', (360, 376), False, 'from pytest import raises\n'), ((410, 443), 'bokeh.models.CustomJS', 'CustomJS', ([], {'code': '"""foo();"""', 'x': 'slider'}), "(code='foo();', x=slider)\n", (418, 443), False, 'from bokeh.models import CustomJS, Slider\n')] |
martindurant/awkward-1.0 | tests/test_0150-attributeerrors.py | a3221ee1bab6551dd01d5dd07a1d2dc24fd02c38 | # BSD 3-Clause License; see https://github.com/jpivarski/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import sys
import pytest
import numpy
import awkward1
class Dummy(awkward1.Record):
@property
def broken(self):
raise AttributeError("I'm broken!")
def test():
behavior = {}
behavior["Dummy"] = Dummy
array = awkward1.Array([{"x": 1}, {"x": 2}, {"x": 3}], behavior=behavior)
array.layout.setparameter("__record__", "Dummy")
with pytest.raises(AttributeError) as err:
array[1].broken
assert str(err.value) == "I'm broken!" # not "no field named 'broken'"
| [((371, 436), 'awkward1.Array', 'awkward1.Array', (["[{'x': 1}, {'x': 2}, {'x': 3}]"], {'behavior': 'behavior'}), "([{'x': 1}, {'x': 2}, {'x': 3}], behavior=behavior)\n", (385, 436), False, 'import awkward1\n'), ((500, 529), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (513, 529), False, 'import pytest\n')] |
umd-lib/solr-irroc | scripts/preprocess.py | 860be84ea1847cbb96c1a7a70b03f59dc6e0366b | #!/user/bin/env python3
# -*- coding: utf8 -*-
#===================================================#
# cleanup.py #
# Joshua Westgard #
# 2015-08-13 #
# #
# Data preprocessing script for IRRoC DB #
# Usage: python3 cleanup.py [in.csv] [out.csv] #
#===================================================#
import sys, csv, re
infields = ['id', 'str_resource', 'str_description', 'website', 'meta_title',
'meta_description', 'stage_list', 'task_list']
outfields = infields + ['stage_list_facet', 'task_list_facet']
with open(sys.argv[1], 'r') as infile, open(sys.argv[2], 'w') as outfile:
# skip header row in order to use own fieldnames
next(infile)
# instantiate the reader and writer objects
dr = csv.DictReader(infile, fieldnames=infields)
dw = csv.DictWriter(outfile, fieldnames=outfields)
dw.writeheader()
exp = re.compile(r'\d+::([^\b])')
# loop over the input file, writing results to output file
for row in dr:
# remove hash marks from URL
m = re.search('#(.+)#', row['website'])
if m:
row['website'] = m.group(1)
# remove spaces from all multivalued fields
row['stage_list_facet'] = row['stage_list'].replace('; ', ';')
row['task_list_facet'] = row['task_list'].replace('; ', ';')
row['meta_description'] = row['meta_description'].replace(', ', ',')
# create stage_list_facet and task_list_facet cols and strip numbers
row['stage_list'] = re.sub(exp, r'\1', row['stage_list_facet'])
row['task_list'] = re.sub(exp, r'\1', row['task_list_facet'])
# write row
dw.writerow(row)
| [] |
appressoas/ievv_opensource | ievv_opensource/demo/batchframeworkdemo/apps.py | 63e87827952ddc8f6f86145b79478ef21d6a0990 | from django.apps import AppConfig
from ievv_opensource import ievv_batchframework
from ievv_opensource.ievv_batchframework import batchregistry
class HelloWorldAction(ievv_batchframework.Action):
def execute(self):
self.logger.info('Hello world! %r', self.kwargs)
class HelloWorldAsyncAction(ievv_batchframework.Action):
def execute(self):
self.logger.info('\n\n\n\n\n\n\n\nHello world, async! %r\n\n\n\n\n', self.kwargs)
class BatchFrameworkDemoAppConfig(AppConfig):
name = 'ievv_opensource.demo.batchframeworkdemo'
verbose_name = "IEVV Batchframework demo"
def ready(self):
batchregistry.Registry.get_instance().add_actiongroup(
batchregistry.ActionGroup(
name='batchframeworkdemo_helloworld',
mode=batchregistry.ActionGroup.MODE_SYNCHRONOUS,
actions=[
HelloWorldAction
]))
batchregistry.Registry.get_instance().add_actiongroup(
batchregistry.ActionGroup(
name='batchframeworkdemo_helloworld_async',
mode=batchregistry.ActionGroup.MODE_ASYNCHRONOUS,
actions=[
HelloWorldAsyncAction
]
)
)
| [((695, 840), 'ievv_opensource.ievv_batchframework.batchregistry.ActionGroup', 'batchregistry.ActionGroup', ([], {'name': '"""batchframeworkdemo_helloworld"""', 'mode': 'batchregistry.ActionGroup.MODE_SYNCHRONOUS', 'actions': '[HelloWorldAction]'}), "(name='batchframeworkdemo_helloworld', mode=\n batchregistry.ActionGroup.MODE_SYNCHRONOUS, actions=[HelloWorldAction])\n", (720, 840), False, 'from ievv_opensource.ievv_batchframework import batchregistry\n'), ((999, 1161), 'ievv_opensource.ievv_batchframework.batchregistry.ActionGroup', 'batchregistry.ActionGroup', ([], {'name': '"""batchframeworkdemo_helloworld_async"""', 'mode': 'batchregistry.ActionGroup.MODE_ASYNCHRONOUS', 'actions': '[HelloWorldAsyncAction]'}), "(name='batchframeworkdemo_helloworld_async', mode=\n batchregistry.ActionGroup.MODE_ASYNCHRONOUS, actions=[\n HelloWorldAsyncAction])\n", (1024, 1161), False, 'from ievv_opensource.ievv_batchframework import batchregistry\n'), ((628, 665), 'ievv_opensource.ievv_batchframework.batchregistry.Registry.get_instance', 'batchregistry.Registry.get_instance', ([], {}), '()\n', (663, 665), False, 'from ievv_opensource.ievv_batchframework import batchregistry\n'), ((932, 969), 'ievv_opensource.ievv_batchframework.batchregistry.Registry.get_instance', 'batchregistry.Registry.get_instance', ([], {}), '()\n', (967, 969), False, 'from ievv_opensource.ievv_batchframework import batchregistry\n')] |
GODVIX/fastmoe | fmoe/gates/utils.py | 7f6463f0367205a1e95139c6d7e930be6e7fa746 | r"""
Utilities that may be used in the gates
"""
import torch
from fmoe.functions import count_by_gate
import fmoe_cuda as fmoe_native
def limit_by_capacity(topk_idx, num_expert, world_size, capacity):
capacity = torch.ones(num_expert, dtype=torch.int32,
device=topk_idx.device) * capacity
pos, lec, gec = count_by_gate(topk_idx, num_expert, world_size,
require_pos=False)
new_gec, = fmoe_native.limit_by_capacity(gec, capacity,
num_expert, world_size)
if world_size > 1:
new_lec, = fmoe_native.expert_exchange(new_gec, num_expert, world_size)
else:
new_lec = new_gec
fmoe_native.prune_gate_by_capacity(topk_idx,
new_lec.to(torch.int32), num_expert, world_size)
return new_lec, new_gec
| [((329, 395), 'fmoe.functions.count_by_gate', 'count_by_gate', (['topk_idx', 'num_expert', 'world_size'], {'require_pos': '(False)'}), '(topk_idx, num_expert, world_size, require_pos=False)\n', (342, 395), False, 'from fmoe.functions import count_by_gate\n'), ((423, 491), 'fmoe_cuda.limit_by_capacity', 'fmoe_native.limit_by_capacity', (['gec', 'capacity', 'num_expert', 'world_size'], {}), '(gec, capacity, num_expert, world_size)\n', (452, 491), True, 'import fmoe_cuda as fmoe_native\n'), ((219, 284), 'torch.ones', 'torch.ones', (['num_expert'], {'dtype': 'torch.int32', 'device': 'topk_idx.device'}), '(num_expert, dtype=torch.int32, device=topk_idx.device)\n', (229, 284), False, 'import torch\n'), ((546, 606), 'fmoe_cuda.expert_exchange', 'fmoe_native.expert_exchange', (['new_gec', 'num_expert', 'world_size'], {}), '(new_gec, num_expert, world_size)\n', (573, 606), True, 'import fmoe_cuda as fmoe_native\n')] |
DeppMeng/DANNet | evaluate.py | 831eb70d44a4a0b6f6f57ca2014521fc64d1906c | import os
import torch
import numpy as np
from PIL import Image
import torch.nn as nn
from torch.utils import data
from network import *
from dataset.zurich_night_dataset import zurich_night_DataSet
from configs.test_config import get_arguments
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def main():
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
device = torch.device("cuda")
args = get_arguments()
if not os.path.exists(args.save):
os.makedirs(args.save)
if args.model == 'PSPNet':
model = PSPNet(num_classes=args.num_classes)
if args.model == 'DeepLab':
model = Deeplab(num_classes=args.num_classes)
if args.model == 'RefineNet':
model = RefineNet(num_classes=args.num_classes, imagenet=False)
saved_state_dict = torch.load(args.restore_from)
model_dict = model.state_dict()
saved_state_dict = {k: v for k, v in saved_state_dict.items() if k in model_dict}
model_dict.update(saved_state_dict)
model.load_state_dict(saved_state_dict)
lightnet = LightNet()
saved_state_dict = torch.load(args.restore_from_light)
model_dict = lightnet.state_dict()
saved_state_dict = {k: v for k, v in saved_state_dict.items() if k in model_dict}
model_dict.update(saved_state_dict)
lightnet.load_state_dict(saved_state_dict)
model = model.to(device)
lightnet = lightnet.to(device)
model.eval()
lightnet.eval()
testloader = data.DataLoader(zurich_night_DataSet(args.data_dir, args.data_list, set=args.set))
interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)
weights = torch.log(torch.FloatTensor(
[0.36869696, 0.06084986, 0.22824049, 0.00655399, 0.00877272, 0.01227341, 0.00207795, 0.0055127, 0.15928651,
0.01157818, 0.04018982, 0.01218957, 0.00135122, 0.06994545, 0.00267456, 0.00235192, 0.00232904, 0.00098658,
0.00413907])).cuda()
weights = (torch.mean(weights) - weights) / torch.std(weights) * args.std + 1.0
for index, batch in enumerate(testloader):
if index % 10 == 0:
print('%d processd' % index)
image, name = batch
image = image.to(device)
with torch.no_grad():
r = lightnet(image)
enhancement = image + r
if args.model == 'RefineNet':
output2 = model(enhancement)
else:
_, output2 = model(enhancement)
weights_prob = weights.expand(output2.size()[0], output2.size()[3], output2.size()[2], 19)
weights_prob = weights_prob.transpose(1, 3)
output2 = output2 * weights_prob
output = interp(output2).cpu().data[0].numpy()
output = output.transpose(1,2,0)
output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
output_col = colorize_mask(output)
output = Image.fromarray(output)
###### get the enhanced image
mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
enhancement = enhancement.cpu().data[0].numpy().transpose(1,2,0)
enhancement = enhancement * mean_std[1] + mean_std[0]
enhancement = (enhancement - enhancement.min()) / (enhancement.max()-enhancement.min())
enhancement = enhancement[:, :, ::-1] * 255 # change to BGR
enhancement = Image.fromarray(enhancement.astype(np.uint8))
###### get the light
light = r.cpu().data[0].numpy().transpose(1, 2, 0)
light = (light-light.min()) / (light.max() - light.min())
light = light[:, :, ::-1] * 255 # change to BGR
light = Image.fromarray(light.astype(np.uint8))
name = name[0].split('/')[-1]
output.save('%s/%s' % (args.save, name))
output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0]))
enhancement.save('%s/%s_enhancement.png' % (args.save, name.split('.')[0]))
light.save('%s/%s_light.png' % (args.save, name.split('.')[0]))
if __name__ == '__main__':
main()
| [((831, 851), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (843, 851), False, 'import torch\n'), ((865, 880), 'configs.test_config.get_arguments', 'get_arguments', ([], {}), '()\n', (878, 880), False, 'from configs.test_config import get_arguments\n'), ((1251, 1280), 'torch.load', 'torch.load', (['args.restore_from'], {}), '(args.restore_from)\n', (1261, 1280), False, 'import torch\n'), ((1537, 1572), 'torch.load', 'torch.load', (['args.restore_from_light'], {}), '(args.restore_from_light)\n', (1547, 1572), False, 'import torch\n'), ((2001, 2068), 'torch.nn.Upsample', 'nn.Upsample', ([], {'size': '(1080, 1920)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(size=(1080, 1920), mode='bilinear', align_corners=True)\n", (2012, 2068), True, 'import torch.nn as nn\n'), ((892, 917), 'os.path.exists', 'os.path.exists', (['args.save'], {}), '(args.save)\n', (906, 917), False, 'import os\n'), ((927, 949), 'os.makedirs', 'os.makedirs', (['args.save'], {}), '(args.save)\n', (938, 949), False, 'import os\n'), ((1921, 1986), 'dataset.zurich_night_dataset.zurich_night_DataSet', 'zurich_night_DataSet', (['args.data_dir', 'args.data_list'], {'set': 'args.set'}), '(args.data_dir, args.data_list, set=args.set)\n', (1941, 1986), False, 'from dataset.zurich_night_dataset import zurich_night_DataSet\n'), ((3311, 3334), 'PIL.Image.fromarray', 'Image.fromarray', (['output'], {}), '(output)\n', (3326, 3334), False, 'from PIL import Image\n'), ((2651, 2666), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2664, 2666), False, 'import torch\n'), ((3207, 3232), 'numpy.argmax', 'np.argmax', (['output'], {'axis': '(2)'}), '(output, axis=2)\n', (3216, 3232), True, 'import numpy as np\n'), ((2094, 2354), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.36869696, 0.06084986, 0.22824049, 0.00655399, 0.00877272, 0.01227341, \n 0.00207795, 0.0055127, 0.15928651, 0.01157818, 0.04018982, 0.01218957, \n 0.00135122, 0.06994545, 0.00267456, 0.00235192, 0.00232904, 0.00098658,\n 0.00413907]'], {}), '([0.36869696, 0.06084986, 0.22824049, 0.00655399, \n 0.00877272, 0.01227341, 0.00207795, 0.0055127, 0.15928651, 0.01157818, \n 0.04018982, 0.01218957, 0.00135122, 0.06994545, 0.00267456, 0.00235192,\n 0.00232904, 0.00098658, 0.00413907])\n', (2111, 2354), False, 'import torch\n'), ((2424, 2442), 'torch.std', 'torch.std', (['weights'], {}), '(weights)\n', (2433, 2442), False, 'import torch\n'), ((2391, 2410), 'torch.mean', 'torch.mean', (['weights'], {}), '(weights)\n', (2401, 2410), False, 'import torch\n')] |
zengboming/python | decorator.py | 13018f476554adc3bff831af27c08f7c216d4b09 | #decorator
def now():
print "2015-11-18"
f=now
f()
print now.__name__
print f.__name__
def log(func):
def wrapper(*args,**kw):
print 'begin call %s():' %func.__name__
func(*args,**kw)
print 'end call %s():' %func.__name__
return wrapper
@log
def now1():
print now1.__name__
now1()
now1=log(now1)
now1()
def log1(text):
def decorator(func):
def wrapper(*args,**kw):
print '%s %s():' %(text,func.__name__)
return func(*args,**kw)
return wrapper
return decorator
@log1('execute')
def now2():
print now2.__name__
now2()
import functools
def log2(func):
@functools.wraps(func)
def wrapper(*args,**kw):
print 'call %s():' %func.__name__
return func(*args,**kw)
return wrapper
@log2
def now3():
print now3.__name__
now3()
def log3(text):
def decorator(func):
@functools.wraps(func)
def wrapper(*args,**kw):
print '%s %s():' %(text,func.__name__)
return func(*args,**kw)
return wrapper
return decorator
@log3('execute')
def now4():
print now4.__name__
now4()
def log4(text):
if callable(text):
@functools.wraps(text)
def wrapper(*args,**kw):
print 'begin call %s:' %text.__name__
text(*args,**kw)
print 'end call '+text.__name__
return wrapper
else :
def decorator(func):
@functools.wraps(func)
def wrapper(*args,**kw):
print 'begin call %s %s():' %(text,func.__name__)
func(*args,**kw)
print 'end call %s %s():' %(text,func.__name__)
return wrapper
return decorator
@log4
def now5():
print 'doing'+now5.__name__
now5()
@log4('execute')
def now6():
print 'doing'+now6.__name__
now6() | [] |
compgeomTU/frechetForCurves | test/pyfrechet_visualize.py | 625bfe32a45d23b194226b4ac7713ded09bd2825 | # Author: Will Rodman
# wrodman@tulane.edu
#
# Command line to run program:
# python3 pyfrechet_visualize.py
import sys, os, unittest
sys.path.insert(0, "../")
from pyfrechet.distance import StrongDistance
from pyfrechet.visualize import FreeSpaceDiagram, Trajectories
TEST_DATA = "sp500"
if TEST_DATA == "sp500":
REACHABLE_EPSILON = 5
UNREACHABLE_EPSILON = 1
REVERSE_CURVE = False
elif TEST_DATA == "trajectory":
REACHABLE_EPSILON = 70
UNREACHABLE_EPSILON = 60
REVERSE_CURVE = True
CURVE_1 = f"{TEST_DATA}_data/sample_1.txt"
CURVE_2 = f"{TEST_DATA}_data/sample_2.txt"
class pyfrechet_optimise(unittest.TestCase):
global REACHABLE_EPSILON
global UNREACHABLE_EPSILON
global REVERSE_CURVE
global CURVE_1
global CURVE_2
def test_fail_BinarySearch_instance_argument(self):
class BadClass(): pass
with self.assertRaises(TypeError):
bc = BadClass()
FreeSpaceDiagram(bc)
def test_FreeSpaceDiagram_plot(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
sd.setFreeSpace(REACHABLE_EPSILON)
fsd = FreeSpaceDiagram(sd)
fsd.plot()
def test_FreeSpaceDiagram__addEpsilonSlider(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
fsd = FreeSpaceDiagram(sd)
fsd.addEpsilonSlider(UNREACHABLE_EPSILON, REACHABLE_EPSILON, 1)
fsd.plot()
def test_FreeSpaceDiagram__weighted_cells(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
fsd = FreeSpaceDiagram(sd)
sd.setFreeSpace(REACHABLE_EPSILON)
fsd.plot(True, False)
def test_FreeSpaceDiagram__gridlines(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
fsd = FreeSpaceDiagram(sd)
sd.setFreeSpace(REACHABLE_EPSILON)
fsd.plot(True, True)
def test_Trajectories(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
t = Trajectories(sd)
t.plot()
if __name__ == '__main__':
unittest.main()
| [((135, 160), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../"""'], {}), "(0, '../')\n", (150, 160), False, 'import sys, os, unittest\n'), ((2074, 2089), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2087, 2089), False, 'import sys, os, unittest\n'), ((1017, 1074), 'pyfrechet.distance.StrongDistance.setCurves', 'StrongDistance.setCurves', (['CURVE_1', 'CURVE_2', 'REVERSE_CURVE'], {}), '(CURVE_1, CURVE_2, REVERSE_CURVE)\n', (1041, 1074), False, 'from pyfrechet.distance import StrongDistance\n'), ((1132, 1152), 'pyfrechet.visualize.FreeSpaceDiagram', 'FreeSpaceDiagram', (['sd'], {}), '(sd)\n', (1148, 1152), False, 'from pyfrechet.visualize import FreeSpaceDiagram, Trajectories\n'), ((1241, 1298), 'pyfrechet.distance.StrongDistance.setCurves', 'StrongDistance.setCurves', (['CURVE_1', 'CURVE_2', 'REVERSE_CURVE'], {}), '(CURVE_1, CURVE_2, REVERSE_CURVE)\n', (1265, 1298), False, 'from pyfrechet.distance import StrongDistance\n'), ((1313, 1333), 'pyfrechet.visualize.FreeSpaceDiagram', 'FreeSpaceDiagram', (['sd'], {}), '(sd)\n', (1329, 1333), False, 'from pyfrechet.visualize import FreeSpaceDiagram, Trajectories\n'), ((1492, 1549), 'pyfrechet.distance.StrongDistance.setCurves', 'StrongDistance.setCurves', (['CURVE_1', 'CURVE_2', 'REVERSE_CURVE'], {}), '(CURVE_1, CURVE_2, REVERSE_CURVE)\n', (1516, 1549), False, 'from pyfrechet.distance import StrongDistance\n'), ((1564, 1584), 'pyfrechet.visualize.FreeSpaceDiagram', 'FreeSpaceDiagram', (['sd'], {}), '(sd)\n', (1580, 1584), False, 'from pyfrechet.visualize import FreeSpaceDiagram, Trajectories\n'), ((1720, 1777), 'pyfrechet.distance.StrongDistance.setCurves', 'StrongDistance.setCurves', (['CURVE_1', 'CURVE_2', 'REVERSE_CURVE'], {}), '(CURVE_1, CURVE_2, REVERSE_CURVE)\n', (1744, 1777), False, 'from pyfrechet.distance import StrongDistance\n'), ((1792, 1812), 'pyfrechet.visualize.FreeSpaceDiagram', 'FreeSpaceDiagram', (['sd'], {}), '(sd)\n', (1808, 1812), False, 'from pyfrechet.visualize import FreeSpaceDiagram, Trajectories\n'), ((1932, 1989), 'pyfrechet.distance.StrongDistance.setCurves', 'StrongDistance.setCurves', (['CURVE_1', 'CURVE_2', 'REVERSE_CURVE'], {}), '(CURVE_1, CURVE_2, REVERSE_CURVE)\n', (1956, 1989), False, 'from pyfrechet.distance import StrongDistance\n'), ((2002, 2018), 'pyfrechet.visualize.Trajectories', 'Trajectories', (['sd'], {}), '(sd)\n', (2014, 2018), False, 'from pyfrechet.visualize import FreeSpaceDiagram, Trajectories\n'), ((940, 960), 'pyfrechet.visualize.FreeSpaceDiagram', 'FreeSpaceDiagram', (['bc'], {}), '(bc)\n', (956, 960), False, 'from pyfrechet.visualize import FreeSpaceDiagram, Trajectories\n')] |
nww2007/py_ser_freeastro | py_ser_freeastro/core.py | 5806cf83316f48a6db0abe4a88e4485fc04a1b4d | #!/usr/bin/env python3
# vim:fileencoding=UTF-8
# -*- coding: UTF-8 -*-
"""
Created on 15 juny 2019 y.
@author: Vlsdimir Nekrasov nww2007@mail.ru
"""
import sys
import struct
import numpy as np
from progress.bar import Bar
import logging
logging.basicConfig(format = u'%(filename)s:%(lineno)d: %(levelname)-8s [%(asctime)s] %(message)s', level = logging.DEBUG, stream=sys.stdout)
# class ser(np.array):
class ser(object):
"""
A set of methods for working with a set of images in the SER format.
"""
def __init__(self, fname):
"""
Download information from file.
"""
# super.__init__()
# luids
self.MONO = 0
self.BAYER_RGGB = 8
self.BAYER_GRBG = 9
self.BAYER_GBRG = 10
self.BAYER_BGGR = 11
self.BAYER_CYYM = 16
self.BAYER_YCMY = 17
self.BAYER_YMCY = 18
self.BAYER_MYYC = 19
self.RGB = 100
self.BGR = 101
self.fname = fname
with open(self.fname, 'rb') as fd:
# Download information from the header.
self.header = fd.read(178)
self.parse_header()
# Download images.
self.frames = np.zeros((self.framecount, self.imageheight, self.imagewidth))
bar = Bar('Downloading', max=self.framecount)
for frame in range(self.framecount):
# for frame in range(1):
bar.next()
t_frame = fd.read(self.imageheight * self.imagewidth * self.pixeldepthperplane//8)
for line in range(self.imageheight):
for pixel in range(self.imagewidth):
index = (line * self.imagewidth + pixel) * 2
self.frames[frame][line][pixel] = struct.unpack('<H', t_frame[index:index+2])[0]
bar.finish()
# Download the trailer
self.trailer = fd.read(self.framecount * 8)
self.parse_trailer()
def parse_header(self):
"""
Parse the title.
"""
self.fileid = self.header[0:14]
self.luid = struct.unpack('<i', self.header[14:18])[0]
self.colorid = struct.unpack('<i', self.header[18:22])[0]
self.littleendian_FALSE = 0
self.littleendian_TRUE = 1
self.littleendian = struct.unpack('<i', self.header[22:26])[0]
self.imagewidth = struct.unpack('<i', self.header[26:30])[0]
self.imageheight = struct.unpack('<i', self.header[30:34])[0]
self.pixeldepthperplane = struct.unpack('<i', self.header[34:38])[0]
self.framecount = struct.unpack('<i', self.header[38:42])[0]
self.observer = self.header[42:82]
self.telescope = self.header[82:122]
self.datetime = struct.unpack('<q', self.header[122:130])[0]
self.datetime_utc = struct.unpack('<q', self.header[130:138])[0]
# logging.info('{0}x{1}'.format(self.imagewidth, self.imageheight))
def parse_trailer(self):
"""
Parse the trailer
"""
for i in range(0, self.framecount*8, 8):
tuli = (struct.unpack('<Q', self.trailer[i:i+8])[0])
def main(argv):
logging.info('%s started.\n' % argv[0])
fn = './images/ASICAP_2019-05-10_01_43_36_523.SER'
frames = ser(fn)
# logging.debug(type(frames))
# logging.debug(type(object))
# # darks_fn = './images/ASICAP_2019-05-10_02_12_00_621.SER'
# # offsets_fn = './images/ASICAP_2019-05-10_02_30_47_294.SER'
#
# # frames = ser.ser()
# # frames.read(darks_fn)
# # frames.read(lights_fn)
# # ser_fr = serialise_frames(frames)
# # logging.debug('std1={}'.format(ser_fr.std()))
# # hist_fr = get_hist(ser_fr)
# # plt.plot(hist_fr)
# # plt.grid()
# # plt.show()
#
# fnames = [
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_34_52_584.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_36_05_343.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_34_373.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_47_276.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_58_784.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_06_703.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_17_476.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_27_330.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_36_623.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_48_239.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_20_816.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_32_118.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_47_796.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_59_999.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_41_10_321.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_41_41_276.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_07_956.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_19_287.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_31_180.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_43_981.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_43_07_152.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_43_36_180.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_01_167.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_33_214.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_58_952.SER',
# ]
#
# print('{};{};{};{};{}'.format('File', 'Temperature', 'Exposure', 'Gain', 'std'))
# for fn in fnames:
# print('{}'.format(fn), flush=True, file=sys.stderr)
# frames = ser.ser()
# frames.read(fn)
# ser_fr = serialise_frames(frames)
#
# config = configparser.ConfigParser()
# config.read(fn + '.txt')
#
# print('{};{};{};{};{}'.format(fn, config['ZWO ASI120MC']['temperature'], config['ZWO ASI120MC']['exposure'], config['ZWO ASI120MC']['gain'], ser_fr.std()))
logging.info('%s finished.\n' % argv[0])
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [((242, 388), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'u"""%(filename)s:%(lineno)d: %(levelname)-8s [%(asctime)s] %(message)s"""', 'level': 'logging.DEBUG', 'stream': 'sys.stdout'}), "(format=\n u'%(filename)s:%(lineno)d: %(levelname)-8s [%(asctime)s] %(message)s',\n level=logging.DEBUG, stream=sys.stdout)\n", (261, 388), False, 'import logging\n'), ((3295, 3334), 'logging.info', 'logging.info', (["('%s started.\\n' % argv[0])"], {}), "('%s started.\\n' % argv[0])\n", (3307, 3334), False, 'import logging\n'), ((6290, 6330), 'logging.info', 'logging.info', (["('%s finished.\\n' % argv[0])"], {}), "('%s finished.\\n' % argv[0])\n", (6302, 6330), False, 'import logging\n'), ((1228, 1290), 'numpy.zeros', 'np.zeros', (['(self.framecount, self.imageheight, self.imagewidth)'], {}), '((self.framecount, self.imageheight, self.imagewidth))\n', (1236, 1290), True, 'import numpy as np\n'), ((1309, 1348), 'progress.bar.Bar', 'Bar', (['"""Downloading"""'], {'max': 'self.framecount'}), "('Downloading', max=self.framecount)\n", (1312, 1348), False, 'from progress.bar import Bar\n'), ((2160, 2199), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[14:18]'], {}), "('<i', self.header[14:18])\n", (2173, 2199), False, 'import struct\n'), ((2237, 2276), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[18:22]'], {}), "('<i', self.header[18:22])\n", (2250, 2276), False, 'import struct\n'), ((2386, 2425), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[22:26]'], {}), "('<i', self.header[22:26])\n", (2399, 2425), False, 'import struct\n'), ((2463, 2502), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[26:30]'], {}), "('<i', self.header[26:30])\n", (2476, 2502), False, 'import struct\n'), ((2540, 2579), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[30:34]'], {}), "('<i', self.header[30:34])\n", (2553, 2579), False, 'import struct\n'), ((2617, 2656), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[34:38]'], {}), "('<i', self.header[34:38])\n", (2630, 2656), False, 'import struct\n'), ((2694, 2733), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[38:42]'], {}), "('<i', self.header[38:42])\n", (2707, 2733), False, 'import struct\n'), ((2878, 2919), 'struct.unpack', 'struct.unpack', (['"""<q"""', 'self.header[122:130]'], {}), "('<q', self.header[122:130])\n", (2891, 2919), False, 'import struct\n'), ((2957, 2998), 'struct.unpack', 'struct.unpack', (['"""<q"""', 'self.header[130:138]'], {}), "('<q', self.header[130:138])\n", (2970, 2998), False, 'import struct\n'), ((3228, 3270), 'struct.unpack', 'struct.unpack', (['"""<Q"""', 'self.trailer[i:i + 8]'], {}), "('<Q', self.trailer[i:i + 8])\n", (3241, 3270), False, 'import struct\n'), ((1798, 1843), 'struct.unpack', 'struct.unpack', (['"""<H"""', 't_frame[index:index + 2]'], {}), "('<H', t_frame[index:index + 2])\n", (1811, 1843), False, 'import struct\n')] |
humeniuka/sGDML_dataset_generation | sgdml_dataset_generation/readers/fchk.py | a99f792b6aac7ff869ebcd1bd7a7226ca81f43ee | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__all__ = ["FormattedCheckpointFile"]
# # Imports
import numpy as np
import scipy.linalg as sla
from collections import OrderedDict
import re
import logging
# # Local Imports
from sgdml_dataset_generation import units
from sgdml_dataset_generation.units import hbar
# # Logging
logger = logging.getLogger(__name__)
logging.basicConfig(format="[%(module)-12s] %(message)s", level=logging.INFO)
class FormattedCheckpointFile(object):
"""
reads all fields from formatted checkpoint files produced by the quantum chemistry
programs Gaussian 16 and QChem.
Parameters
----------
f : File
file handle opened for reading a formatted checkpoint file
The user has to ensure the file handle is opened and closed at the end.
The fields of the checkpoint file can be accessed by their names (see example below).
Array fields are stored as 1D numpy arrays of float (R) or integer (I) type.
Example
-------
>>> with open("freq.fchk") as f:
>>> fchk = FormattedCheckpointFile(f)
>>> print(fchk["Number of atoms"])
"""
def __init__(self, f):
self.filename = f.name
self.data = OrderedDict()
# accumulate all lines belonging to the same field (whithout newlines)
acc = ""
dtype = None
for line_number, line in enumerate(f.readlines()):
# count lines starting from 1
line_number += 1
# The name of a field starts in the first column and with a capital letter
if re.match(r"^[A-Z].*", line):
if len(acc) > 0 and not dtype is None:
# All lines belonging to the previous field must have been read,
# so we convert it to a numpy array.
try:
if dtype == str:
self.data[field] = acc
else:
# numerical types
array = np.fromstring(acc, dtype=dtype, sep=" ")
assert len(array) == count
self.data[field] = array
except (ValueError,AssertionError) as err:
logger.warning(f"A problem occurred reading field `{field}` in line {line_number:10} in {f.name} .")
logger.warning(err)
self.data[field] = np.zeros(count, dtype=dtype)
# reset accumulator
acc = ""
try:
if len(line) < 43:
# skip title and method
logger.debug(f"skipping line {line_number:10} in {f.name}: `{line.strip()}`")
continue
# First 43 columns are reserved for the field name
field = line[0:43].strip()
logger.debug(f"field `{field}` encountered")
# Colum 43 contains a character indicating the data type:
# I -> integer
# R -> real
type_char = line[43]
if type_char == "I":
dtype = int
elif type_char == "R":
dtype = float
elif type_char == "C":
dtype = str
else:
dtype = None
# skip lines without I or R data type markers
logger.debug(f"skipping line {line_number:10} in {f.name}: `{line.strip()}` .")
continue
# If column 47-48 contain the string "N=", we are dealing with an array
# and the last integer indicates the number of elements
if line[47:49] == "N=":
count = int(line[49:])
else:
# scalar value
self.data[field] = dtype(line[49:])
except Exception as err:
logger.error(f"An error occurred while reading line {line_number:10} in {f.name} .")
raise err
else:
acc += " " + line
# read last field
if len(acc) > 0:
self.data[field] = np.fromstring(acc, dtype=dtype, sep=" ")
assert len(self.data[field]) == count
def __getitem__(self, key):
"""
access data fields by their names
Parameters
----------
key : str
name of field that should be retrieved (e.g. 'Number of atoms')
Returns
-------
field : float, int or ndarray
a KeyError is raised if the field is not present in the formatted checkpoint file
"""
return self.data[key]
def keys(self):
"""
list names of all fields present in the formatted checkpoint file
Returns
-------
keys : list of str
field names
"""
return self.data.keys()
def harmonic_approximation(self):
"""
extract the position, gradient and Hessian of the potential energy in cartesian coordinates
The potential is expanded to second order around the current position x0:
E(x) = E(x0) + grad(E)^T.(x-x0) + 1/2 (x-x0)^T . hess(E) . (x-x0)
A frequency calculation has to be present in the formatted checkpoint file.
The frequency calculation should be performed in a separate Gaussian 16 job using the
following route line for the ground state calculation:
#P functional/basis Freq NoSymm IOp(7/32=5)
and the following route line for an excited state frequency calculation:
#P functional/basis TD=(Nstates=2, Root=1, NAC) Freq NoSymm IOp(7/32=5)
Returns
-------
pos : ndarray (3*nat,)
cartesian coordinates x0
energy : ndarray (1,)
total energy E(x0) of state of interest (in Hartree)
grad : ndarray (3*nat,)
cartesian gradient dE/dx(x0) (in Hartree/bohr)
hess : ndarray (3*nat,3*nat)
cartesian force constants d^2E/(dxdx)(x0) (in Hartree/bohr^2)
"""
try:
nat = self.data["Number of atoms"]
# total energy of state of interest
energy = np.array(self.data["Total Energy"])
# geometry
pos = self.data["Current cartesian coordinates"]
# cartesian gradient
grad = self.data["Cartesian Gradient"]
# Only the lower triangular part of the Hessian is stored.
hess = np.zeros((3*nat,3*nat))
row, col = np.tril_indices(3*nat)
hess[row,col] = self.data["Cartesian Force Constants"]
# Hessian is symmetric, H^T = H
hess[col,row] = hess[row,col]
except KeyError as err:
logger.error(f"A required field could not be found in formatted checkpoint file {self.filename} .")
raise err
return pos, energy, grad, hess
def nonadiabatic_coupling(self):
"""
extract non-adiabatic coupling vector between ground and excited state (Root=I), if present.
Only Gaussian 16 saves the NAC vector in the checkpoint file, while QChem writes it to the output file.
Returns
-------
nac : ndarray (3*nat,)
1st order derivative coupling <0|d/dx|I>
"""
try:
nac = self.data["Nonadiabatic coupling"]
except KeyError as err:
logger.error(f"The field `Nonadiabatic coupling` could not be found in the formatted checkpoint file {self.filename} .")
raise err
if (nac == 0.0).all():
logger.warning(f"All components of non-adiabatic coupling vector in {self.filename} are zero.")
return nac
def vibrational_groundstate(self, zero_threshold=100.0):
"""
The vibrational ground state belonging to the harmonic potential is given by
1/4 T
psi (x) = (det(Gamma ) / pi^N) exp{ -1/2 (x-x ) Gamma (x-x ) }
0 0 0 0 0
provided that x0 is the minimum. This function computes the width parameter matrix
Gamma_0 from the Hessian at the minimum.
Optional
--------
zero_threshold : float > 0
threshold for considering normal mode frequencies as zero (in cm-1)
Returns
-------
x0 : ndarray (3*nat,)
center of Gaussian, in cartesian coordinates (bohr)
Gamma0 : ndarray (3*nat,3*nat)
symmetric, positive semi-definite matrix of width parameters (bohr^{-2})
en_zpt : float
zero-point energy (Hartree)
"""
x0, energy, grad, hess = self.harmonic_approximation()
mass = self.masses()
# diagonals of M^{1/2} and M^{-1/2}
msq = np.sqrt(mass)
imsq = 1.0/msq
# mass-weighted Hessian H
hess_mwc = np.einsum('i,ij,j->ij', imsq, hess, imsq)
# diagonalize symmetric H = V.diag(w).V^T
w2,V = sla.eigh(hess_mwc)
# vibrational energies
w = np.sqrt(w2)
# zero-point energy
en_zpt = 0.5 * hbar * np.sum(w)
logger.info("Normal mode frequencies (cm-1)")
logger.info(w*units.hartree_to_wavenumbers)
if not (w * units.hartree_to_wavenumbers > zero_threshold).all():
logger.warning("At a minimum all frequencies should be positive, found imaginary ones.")
# select non-zero vibrational modes
non_zero = (w * units.hartree_to_wavenumbers) > zero_threshold
# number of non singular dimensions
num_non_zero = np.count_nonzero( non_zero )
dim = x0.shape[0]
logger.info(f"number of zero modes : {dim - num_non_zero}")
# L = hbar^{-1/2} M^{1/2} V w^{1/2}
L = hbar**(-1/2) * np.einsum('i,ij,j->ij', msq, V[:,non_zero], np.sqrt(w[non_zero]))
# Gamma_0 = L . L^T
Gamma_0 = np.einsum('ij,kj->ik', L, L)
return x0, Gamma_0, en_zpt
def masses(self):
"""
atomic masses in a.u.
Returns
-------
masses : ndarray (3*nat,)
masses for each cartesian coordinate in multiples of electron mass
"""
mass = self.data["Real atomic weights"] * units.amu_to_aumass
mass = np.repeat(mass, 3)
return mass
def atomic_numbers(self):
"""
atomic numbers
Returns
-------
numbers : ndarray(nat,)
atomic number for each atom
"""
return self.data["Atomic numbers"]
| [((336, 363), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (353, 363), False, 'import logging\n'), ((364, 441), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(module)-12s] %(message)s"""', 'level': 'logging.INFO'}), "(format='[%(module)-12s] %(message)s', level=logging.INFO)\n", (383, 441), False, 'import logging\n'), ((1218, 1231), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1229, 1231), False, 'from collections import OrderedDict\n'), ((9138, 9151), 'numpy.sqrt', 'np.sqrt', (['mass'], {}), '(mass)\n', (9145, 9151), True, 'import numpy as np\n'), ((9228, 9269), 'numpy.einsum', 'np.einsum', (['"""i,ij,j->ij"""', 'imsq', 'hess', 'imsq'], {}), "('i,ij,j->ij', imsq, hess, imsq)\n", (9237, 9269), True, 'import numpy as np\n'), ((9336, 9354), 'scipy.linalg.eigh', 'sla.eigh', (['hess_mwc'], {}), '(hess_mwc)\n', (9344, 9354), True, 'import scipy.linalg as sla\n'), ((9399, 9410), 'numpy.sqrt', 'np.sqrt', (['w2'], {}), '(w2)\n', (9406, 9410), True, 'import numpy as np\n'), ((9961, 9987), 'numpy.count_nonzero', 'np.count_nonzero', (['non_zero'], {}), '(non_zero)\n', (9977, 9987), True, 'import numpy as np\n'), ((10270, 10298), 'numpy.einsum', 'np.einsum', (['"""ij,kj->ik"""', 'L', 'L'], {}), "('ij,kj->ik', L, L)\n", (10279, 10298), True, 'import numpy as np\n'), ((10651, 10669), 'numpy.repeat', 'np.repeat', (['mass', '(3)'], {}), '(mass, 3)\n', (10660, 10669), True, 'import numpy as np\n'), ((1581, 1607), 're.match', 're.match', (['"""^[A-Z].*"""', 'line'], {}), "('^[A-Z].*', line)\n", (1589, 1607), False, 'import re\n'), ((4370, 4410), 'numpy.fromstring', 'np.fromstring', (['acc'], {'dtype': 'dtype', 'sep': '""" """'}), "(acc, dtype=dtype, sep=' ')\n", (4383, 4410), True, 'import numpy as np\n'), ((6466, 6501), 'numpy.array', 'np.array', (["self.data['Total Energy']"], {}), "(self.data['Total Energy'])\n", (6474, 6501), True, 'import numpy as np\n'), ((6760, 6788), 'numpy.zeros', 'np.zeros', (['(3 * nat, 3 * nat)'], {}), '((3 * nat, 3 * nat))\n', (6768, 6788), True, 'import numpy as np\n'), ((6807, 6831), 'numpy.tril_indices', 'np.tril_indices', (['(3 * nat)'], {}), '(3 * nat)\n', (6822, 6831), True, 'import numpy as np\n'), ((9469, 9478), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (9475, 9478), True, 'import numpy as np\n'), ((10201, 10221), 'numpy.sqrt', 'np.sqrt', (['w[non_zero]'], {}), '(w[non_zero])\n', (10208, 10221), True, 'import numpy as np\n'), ((2036, 2076), 'numpy.fromstring', 'np.fromstring', (['acc'], {'dtype': 'dtype', 'sep': '""" """'}), "(acc, dtype=dtype, sep=' ')\n", (2049, 2076), True, 'import numpy as np\n'), ((2460, 2488), 'numpy.zeros', 'np.zeros', (['count'], {'dtype': 'dtype'}), '(count, dtype=dtype)\n', (2468, 2488), True, 'import numpy as np\n')] |
94JuHo/Algorithm_study | 2020_01_01/max_values/max_values.py | e2c10ec680d966e5bcc4e7cb88d9514f9ccbbf15 | values = []
for i in range(9):
values.append(int(input('')))
max_value = 0
location = 0
for i in range(9):
if values[i] > max_value:
max_value = values[i]
location = i+1
print(max_value)
print(location) | [] |
rhasspy/fuzzywuzzy | fuzzywuzzy/process.py | e5b486c756b392481ec8e1382eedce280e56fd69 | #!/usr/bin/env python
# encoding: utf-8
"""
process.py
Copyright (c) 2011 Adam Cohen
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from fuzz import *
import sys, os
import utils
#######################################
# Find Best Matchs In List Of Choices #
#######################################
def extract(query, choices, processor=None, scorer=None, limit=5):
# choices = a list of objects we are attempting to extract values from
# query = an object representing the thing we want to find
# scorer f(OBJ, QUERY) --> INT. We will return the objects with the highest score
# by default, we use score.WRatio() and both OBJ and QUERY should be strings
# processor f(OBJ_A) --> OBJ_B, where the output is an input to scorer
# for example, "processor = lambda x: x[0]" would return the first element in a collection x (of, say, strings)
# this would then be used in the scoring collection
if choices is None or len(choices) == 0:
return []
# default, turn whatever the choice is into a string
if processor is None:
processor = lambda x: utils.asciidammit(x)
# default: wratio
if scorer is None:
scorer = WRatio
sl = list()
for choice in choices:
processed = processor(choice)
score = scorer(query, processed)
tuple = (choice, score)
sl.append(tuple)
sl.sort(key=lambda i: -1*i[1])
return sl[:limit]
##########################
# Find Single Best Match #
##########################
def extractOne(query, choices, processor=None, scorer=None, score_cutoff=0):
# convenience method which returns the single best choice
# optional parameter: score_cutoff.
# If the best choice has a score of less than score_cutoff
# we will return none (intuition: not a good enough match)
best_list = extract(query, choices, processor, scorer, limit=1)
if len(best_list) > 0:
best = best_list[0]
if best[1] > score_cutoff:
return best
else:
return None
else:
return None
| [((2119, 2139), 'utils.asciidammit', 'utils.asciidammit', (['x'], {}), '(x)\n', (2136, 2139), False, 'import utils\n')] |
robfalck/AoC2017 | day03/day03.py | fa19f3fb42d979b60888a1954bea571c9d4ee735 | from __future__ import print_function, division, absolute_import
import numpy as np
INPUT = 265149
def part1(number):
skip = 2
d = 1
row = None
col = None
for shell_idx in range(1, 10000):
size = shell_idx * 2 + 1
a = d + skip
b = a + skip
c = b + skip
d = c + skip
skip = skip + 2
if a <= number <= b:
# top
col = -(size // 2) + (b - number)
row = size // 2
elif b <= number <= c:
# left
row = size // 2 - (c - number)
col = -(size // 2)
elif c <= number <= d:
# bottom
row = -(size // 2)
col = row + (number - c)
elif number < a:
# right
col = size // 2
row = col - (a - number)
if row is not None and col is not None:
manh_dist = abs(row) + abs(col)
return manh_dist
def part2(number):
"""
A brute-force approach to part 2.
"""
map = np.zeros((11, 11), dtype=int)
row = 5
col = 5
map[row, col] = 1
heading = 'RIGHT'
dcol = 1
drow = 0
nsteps = 70
for i in range(nsteps):
row += drow
col += dcol
sum_at_next = map[row-1:row+2, col-1:col+2].sum()
map[row, col] = sum_at_next
if sum_at_next > number:
return sum_at_next
# Determine if we need to change heading
if heading == 'RIGHT' and map[row-1, col] == 0:
heading = 'UP'
drow = -1
dcol = 0
elif heading == 'UP' and map[row, col-1] == 0:
heading = 'LEFT'
drow = 0
dcol = -1
elif heading == 'LEFT' and map[row+1, col] == 0:
heading = 'DOWN'
drow = 1
dcol = 0
elif heading == 'DOWN' and map[row, col+1] == 0:
heading = 'RIGHT'
drow = 0
dcol = 1
if __name__ == '__main__':
print(part1(number=INPUT))
print(part2(number=INPUT))
| [((1037, 1066), 'numpy.zeros', 'np.zeros', (['(11, 11)'], {'dtype': 'int'}), '((11, 11), dtype=int)\n', (1045, 1066), True, 'import numpy as np\n')] |
codefair114/Inventory-App-Django | core/migrations/0004_auto_20210929_2354.py | f09f43ca282f82be981cac26a92d614fdf2ff5ef | # Generated by Django 3.2.7 on 2021-09-29 23:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20210929_2353'),
]
operations = [
migrations.AlterField(
model_name='order',
name='client',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.orderclient'),
),
migrations.AlterField(
model_name='order',
name='payment',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.paymentmethod'),
),
migrations.AlterField(
model_name='order',
name='shipment',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.shipment'),
),
]
| [((365, 465), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.orderclient"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='core.orderclient')\n", (382, 465), False, 'from django.db import migrations, models\n'), ((583, 685), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.paymentmethod"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='core.paymentmethod')\n", (600, 685), False, 'from django.db import migrations, models\n'), ((804, 901), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.shipment"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='core.shipment')\n", (821, 901), False, 'from django.db import migrations, models\n')] |
bopopescu/nova-token | nova/api/openstack/compute/legacy_v2/contrib/console_auth_tokens.py | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | begin_unit
comment|'# Copyright 2013 Cloudbase Solutions Srl'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'webob'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
name|'import'
name|'extensions'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
name|'import'
name|'wsgi'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'consoleauth'
name|'import'
name|'rpcapi'
name|'as'
name|'consoleauth_rpcapi'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'i18n'
name|'import'
name|'_'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|authorize
name|'authorize'
op|'='
name|'extensions'
op|'.'
name|'extension_authorizer'
op|'('
string|"'compute'"
op|','
string|"'console_auth_tokens'"
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|ConsoleAuthTokensController
name|'class'
name|'ConsoleAuthTokensController'
op|'('
name|'wsgi'
op|'.'
name|'Controller'
op|')'
op|':'
newline|'\n'
DECL|member|__init__
indent|' '
name|'def'
name|'__init__'
op|'('
name|'self'
op|','
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_consoleauth_rpcapi'
op|'='
name|'consoleauth_rpcapi'
op|'.'
name|'ConsoleAuthAPI'
op|'('
op|')'
newline|'\n'
name|'super'
op|'('
name|'ConsoleAuthTokensController'
op|','
name|'self'
op|')'
op|'.'
name|'__init__'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
newline|'\n'
nl|'\n'
DECL|member|show
dedent|''
name|'def'
name|'show'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'id'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Checks a console auth token and returns the related connect info."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
nl|'\n'
name|'token'
op|'='
name|'id'
newline|'\n'
name|'connect_info'
op|'='
name|'self'
op|'.'
name|'_consoleauth_rpcapi'
op|'.'
name|'check_token'
op|'('
name|'context'
op|','
name|'token'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'connect_info'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'_'
op|'('
string|'"Token not found"'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'console_type'
op|'='
name|'connect_info'
op|'.'
name|'get'
op|'('
string|"'console_type'"
op|')'
newline|'\n'
comment|'# This is currently required only for RDP consoles'
nl|'\n'
name|'if'
name|'console_type'
op|'!='
string|'"rdp-html5"'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPUnauthorized'
op|'('
nl|'\n'
name|'explanation'
op|'='
name|'_'
op|'('
string|'"The requested console type details are not "'
nl|'\n'
string|'"accessible"'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
op|'{'
string|"'console'"
op|':'
nl|'\n'
op|'{'
name|'i'
op|':'
name|'connect_info'
op|'['
name|'i'
op|']'
nl|'\n'
name|'for'
name|'i'
name|'in'
op|'['
string|"'instance_uuid'"
op|','
string|"'host'"
op|','
string|"'port'"
op|','
nl|'\n'
string|"'internal_access_path'"
op|']'
nl|'\n'
name|'if'
name|'i'
name|'in'
name|'connect_info'
op|'}'
op|'}'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|Console_auth_tokens
dedent|''
dedent|''
name|'class'
name|'Console_auth_tokens'
op|'('
name|'extensions'
op|'.'
name|'ExtensionDescriptor'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Console token authentication support."""'
newline|'\n'
DECL|variable|name
name|'name'
op|'='
string|'"ConsoleAuthTokens"'
newline|'\n'
DECL|variable|alias
name|'alias'
op|'='
string|'"os-console-auth-tokens"'
newline|'\n'
DECL|variable|namespace
name|'namespace'
op|'='
op|'('
string|'"http://docs.openstack.org/compute/ext/"'
nl|'\n'
string|'"consoles-auth-tokens/api/v2"'
op|')'
newline|'\n'
DECL|variable|updated
name|'updated'
op|'='
string|'"2013-08-13T00:00:00Z"'
newline|'\n'
nl|'\n'
DECL|member|get_resources
name|'def'
name|'get_resources'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'controller'
op|'='
name|'ConsoleAuthTokensController'
op|'('
op|')'
newline|'\n'
name|'ext'
op|'='
name|'extensions'
op|'.'
name|'ResourceExtension'
op|'('
string|"'os-console-auth-tokens'"
op|','
nl|'\n'
name|'controller'
op|')'
newline|'\n'
name|'return'
op|'['
name|'ext'
op|']'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| [] |
jaluebbe/ahrs | ahrs/common/geometry.py | 4b4a33b1006e0d455a71ac8379a2697202361758 | # -*- coding: utf-8 -*-
"""
Geometrical functions
---------------------
References
----------
.. [W1] Wikipedia: https://de.wikipedia.org/wiki/Ellipse#Ellipsengleichung_(Parameterform)
.. [WAE] Wolfram Alpha: Ellipse. (http://mathworld.wolfram.com/Ellipse.html)
"""
import numpy as np
from typing import Union
def circle(center: Union[list, np.ndarray], radius: float = 1.0, num_points: int = 20) -> np.ndarray:
"""
Build a circle with the given characteristics.
Parameters
----------
c : array-like
2D Coordinates of center.
r : float
Radius of the circle.
num_points : int
Number of points to build.
Returns
-------
points : numpy.ndarray
N-by-2 array with the coordinates of the circle.
"""
R = np.linspace(0.0, 2.0*np.pi, num_points+1)
x = center[0] + radius*np.cos(R)
y = center[1] + radius*np.sin(R)
return np.array([x, y]).transpose()
def ellipse(center: Union[list, np.ndarray], phi: float, axes: Union[list, np.ndarray], num_points: int = 20) -> np.ndarray:
"""
Build an ellipse with the given characteristics.
Parameters
----------
center : array-like
2D Coordinates of center.
phi : float
Angle, in radians, of the major axis w.r.t. the X-axis
axes : array-like
Lengths of major and minor axes, respectively.
num_points : int
Number of points. Defaults to 20.
Returns
-------
points : numpy.ndarray
N-by-2 array with the coordinates of the ellipse.
"""
R = np.linspace(0.0, 2.0*np.pi, num_points+1)
a, b = axes
x = center[0] + a*np.cos(R)*np.cos(phi) - b*np.sin(R)*np.sin(phi)
y = center[1] + a*np.cos(R)*np.sin(phi) + b*np.sin(R)*np.cos(phi)
return np.array([x, y]).transpose()
| [((785, 830), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2.0 * np.pi)', '(num_points + 1)'], {}), '(0.0, 2.0 * np.pi, num_points + 1)\n', (796, 830), True, 'import numpy as np\n'), ((1563, 1608), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2.0 * np.pi)', '(num_points + 1)'], {}), '(0.0, 2.0 * np.pi, num_points + 1)\n', (1574, 1608), True, 'import numpy as np\n'), ((854, 863), 'numpy.cos', 'np.cos', (['R'], {}), '(R)\n', (860, 863), True, 'import numpy as np\n'), ((891, 900), 'numpy.sin', 'np.sin', (['R'], {}), '(R)\n', (897, 900), True, 'import numpy as np\n'), ((912, 928), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (920, 928), True, 'import numpy as np\n'), ((1679, 1690), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1685, 1690), True, 'import numpy as np\n'), ((1749, 1760), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1755, 1760), True, 'import numpy as np\n'), ((1772, 1788), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (1780, 1788), True, 'import numpy as np\n'), ((1653, 1664), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1659, 1664), True, 'import numpy as np\n'), ((1669, 1678), 'numpy.sin', 'np.sin', (['R'], {}), '(R)\n', (1675, 1678), True, 'import numpy as np\n'), ((1723, 1734), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1729, 1734), True, 'import numpy as np\n'), ((1739, 1748), 'numpy.sin', 'np.sin', (['R'], {}), '(R)\n', (1745, 1748), True, 'import numpy as np\n'), ((1643, 1652), 'numpy.cos', 'np.cos', (['R'], {}), '(R)\n', (1649, 1652), True, 'import numpy as np\n'), ((1713, 1722), 'numpy.cos', 'np.cos', (['R'], {}), '(R)\n', (1719, 1722), True, 'import numpy as np\n')] |
jamayfieldjr/iem | htdocs/plotting/auto/scripts100/p116.py | 275b77a65f3b12e26e6cbdb230786b9c7d2b9c9a | """Monthly HDD/CDD Totals."""
import datetime
from pandas.io.sql import read_sql
from pyiem.plot.use_agg import plt
from pyiem.util import get_dbconn, get_autoplot_context
from pyiem.exceptions import NoDataFound
PDICT = {'cdd': 'Cooling Degree Days',
'hdd': 'Heating Degree Days'}
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['report'] = True
desc['description'] = """This chart presents monthly cooling degree days
or heating degree days for a 20 year period of your choice. The 20 year
limit is for plot usability only, the data download has all available
years contained."""
y20 = datetime.date.today().year - 19
desc['arguments'] = [
dict(type='station', name='station', default='IATDSM',
label='Select Station', network='IACLIMATE'),
dict(type='select', options=PDICT, default='cdd', name='var',
label='Select Variable'),
dict(type='year', name='syear', default=y20,
label='For plotting, year to start 20 years of plot'),
]
return desc
def plotter(fdict):
""" Go """
import seaborn as sns
ctx = get_autoplot_context(fdict, get_description())
pgconn = get_dbconn('coop')
station = ctx['station']
varname = ctx['var']
table = "alldata_%s" % (station[:2], )
df = read_sql("""
SELECT year, month, sum(precip) as sum_precip,
avg(high) as avg_high,
avg(low) as avg_low,
sum(cdd(high,low,60)) as cdd60,
sum(cdd(high,low,65)) as cdd65,
sum(hdd(high,low,60)) as hdd60,
sum(hdd(high,low,65)) as hdd65,
sum(case when precip >= 0.01 then 1 else 0 end) as rain_days,
sum(case when snow >= 0.1 then 1 else 0 end) as snow_days
from """+table+""" WHERE station = %s GROUP by year, month
""", pgconn, params=(station,), index_col=None)
if df.empty:
raise NoDataFound("No Data Found.")
df['monthdate'] = df[['year', 'month']].apply(
lambda x: datetime.date(x[0], x[1], 1), axis=1)
df.set_index('monthdate', inplace=True)
res = """\
# IEM Climodat https://mesonet.agron.iastate.edu/climodat/
# Report Generated: %s
# Climate Record: %s -> %s
# Site Information: [%s] %s
# Contact Information: Daryl Herzmann akrherz@iastate.edu 515.294.5978
""" % (datetime.date.today().strftime("%d %b %Y"),
ctx['_nt'].sts[station]['archive_begin'].date(),
datetime.date.today(), station, ctx['_nt'].sts[station]['name'])
res += """# THESE ARE THE MONTHLY %s (base=65) FOR STATION %s
YEAR JAN FEB MAR APR MAY JUN JUL AUG SEP \
OCT NOV DEC
""" % (PDICT[varname].upper(), station)
second = """# THESE ARE THE MONTHLY %s (base=60) FOR STATION %s
YEAR JAN FEB MAR APR MAY JUN JUL AUG SEP \
OCT NOV DEC
""" % (
PDICT[varname].upper(), station)
minyear = df['year'].min()
maxyear = df['year'].max()
for yr in range(minyear, maxyear + 1):
res += ("%4i" % (yr,))
second += "%4i" % (yr,)
for mo in range(1, 13):
ts = datetime.date(yr, mo, 1)
if ts not in df.index:
res += ("%7s" % ("M",))
second += "%7s" % ("M",)
continue
row = df.loc[ts]
res += ("%7.0f" % (row[varname+"65"],))
second += "%7.0f" % (row[varname+"60"],)
res += ("\n")
second += "\n"
res += ("MEAN")
second += "MEAN"
for mo in range(1, 13):
df2 = df[df['month'] == mo]
res += ("%7.0f" % (df2[varname+"65"].mean(), ))
second += "%7.0f" % (df2[varname+"60"].mean(), )
res += ("\n")
second += "\n"
res += second
y1 = int(fdict.get('syear', 1990))
fig, ax = plt.subplots(1, 1, figsize=(8., 6.))
fig.text(0.5, 0.95, "[%s] %s (%s-%s)" % (
station, ctx['_nt'].sts[station]['name'], y1, y1 + 20), ha='center',
fontsize=16)
ax.set_title(r"%s base=60$^\circ$F" % (PDICT[varname], ))
filtered = df[(df['year'] >= y1) & (df['year'] <= (y1 + 20))]
df2 = filtered[
['month', 'year', varname + '60']
].pivot('year', 'month', varname + '60')
sns.heatmap(df2, annot=True, fmt=".0f", linewidths=.5, ax=ax)
return fig, df, res
if __name__ == '__main__':
plotter(dict(syear=1990))
| [((1275, 1293), 'pyiem.util.get_dbconn', 'get_dbconn', (['"""coop"""'], {}), "('coop')\n", (1285, 1293), False, 'from pyiem.util import get_dbconn, get_autoplot_context\n'), ((1403, 1960), 'pandas.io.sql.read_sql', 'read_sql', (['(\n """\n SELECT year, month, sum(precip) as sum_precip,\n avg(high) as avg_high,\n avg(low) as avg_low,\n sum(cdd(high,low,60)) as cdd60,\n sum(cdd(high,low,65)) as cdd65,\n sum(hdd(high,low,60)) as hdd60,\n sum(hdd(high,low,65)) as hdd65,\n sum(case when precip >= 0.01 then 1 else 0 end) as rain_days,\n sum(case when snow >= 0.1 then 1 else 0 end) as snow_days\n from """\n + table + \' WHERE station = %s GROUP by year, month\\n \')', 'pgconn'], {'params': '(station,)', 'index_col': 'None'}), '(\n """\n SELECT year, month, sum(precip) as sum_precip,\n avg(high) as avg_high,\n avg(low) as avg_low,\n sum(cdd(high,low,60)) as cdd60,\n sum(cdd(high,low,65)) as cdd65,\n sum(hdd(high,low,60)) as hdd60,\n sum(hdd(high,low,65)) as hdd65,\n sum(case when precip >= 0.01 then 1 else 0 end) as rain_days,\n sum(case when snow >= 0.1 then 1 else 0 end) as snow_days\n from """\n + table + \' WHERE station = %s GROUP by year, month\\n \', pgconn,\n params=(station,), index_col=None)\n', (1411, 1960), False, 'from pandas.io.sql import read_sql\n'), ((3860, 3898), 'pyiem.plot.use_agg.plt.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8.0, 6.0)'}), '(1, 1, figsize=(8.0, 6.0))\n', (3872, 3898), False, 'from pyiem.plot.use_agg import plt\n'), ((4289, 4351), 'seaborn.heatmap', 'sns.heatmap', (['df2'], {'annot': '(True)', 'fmt': '""".0f"""', 'linewidths': '(0.5)', 'ax': 'ax'}), "(df2, annot=True, fmt='.0f', linewidths=0.5, ax=ax)\n", (4300, 4351), True, 'import seaborn as sns\n'), ((1977, 2006), 'pyiem.exceptions.NoDataFound', 'NoDataFound', (['"""No Data Found."""'], {}), "('No Data Found.')\n", (1988, 2006), False, 'from pyiem.exceptions import NoDataFound\n'), ((710, 731), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (729, 731), False, 'import datetime\n'), ((2076, 2104), 'datetime.date', 'datetime.date', (['x[0]', 'x[1]', '(1)'], {}), '(x[0], x[1], 1)\n', (2089, 2104), False, 'import datetime\n'), ((2496, 2517), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2515, 2517), False, 'import datetime\n'), ((3186, 3210), 'datetime.date', 'datetime.date', (['yr', 'mo', '(1)'], {}), '(yr, mo, 1)\n', (3199, 3210), False, 'import datetime\n'), ((2389, 2410), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2408, 2410), False, 'import datetime\n')] |
krfricke/ray_shuffling_data_loader | examples/horovod/ray_torch_shuffle.py | b238871d45218c655cd0fcd78b8bf2a3940087f9 | import os
import pickle
import time
import timeit
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import torch
import tempfile
import horovod.torch as hvd
from horovod.ray import RayExecutor
from ray_shuffling_data_loader.torch_dataset import (TorchShufflingDataset)
from ray_shuffling_data_loader.data_generation import (generate_data,
DATA_SPEC)
import argparse
DEFAULT_DATA_DIR = "s3://shuffling-data-loader-benchmarks/data/"
numpy_to_torch_dtype = {
np.bool: torch.bool,
np.uint8: torch.uint8,
np.int8: torch.int8,
np.int16: torch.int16,
np.int32: torch.int32,
np.int64: torch.int64,
np.float16: torch.float16,
np.float32: torch.float32,
np.float64: torch.float64,
np.complex64: torch.complex64,
np.complex128: torch.complex128
}
# Training settings
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=250000,
metavar="N",
help="input batch size for training (default: 64)")
parser.add_argument(
"--test-batch-size",
type=int,
default=250000,
metavar="N",
help="input batch size for testing (default: 1000)")
parser.add_argument(
"--epochs",
type=int,
default=10,
metavar="N",
help="number of epochs to train (default: 10)")
parser.add_argument(
"--lr",
type=float,
default=0.01,
metavar="LR",
help="learning rate (default: 0.01)")
parser.add_argument(
"--momentum",
type=float,
default=0.5,
metavar="M",
help="SGD momentum (default: 0.5)")
parser.add_argument(
"--no-cuda",
action="store_true",
default=False,
help="disables CUDA training")
parser.add_argument(
"--seed",
type=int,
default=42,
metavar="S",
help="random seed (default: 42)")
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help=("how many batches to wait before logging training "
"status"))
parser.add_argument(
"--fp16-allreduce",
action="store_true",
default=False,
help="use fp16 compression during allreduce")
parser.add_argument(
"--use-adasum",
action="store_true",
default=False,
help="use adasum algorithm to do reduction")
parser.add_argument(
"--gradient-predivide-factor",
type=float,
default=1.0,
help=("apply gradient predivide factor in optimizer "
"(default: 1.0)"))
parser.add_argument("--num-workers", type=int, default=None)
parser.add_argument("--num-hosts", type=int, default=None)
parser.add_argument("--num-workers-per-host", type=int, default=None)
parser.add_argument("--cpus-per-worker", type=int, default=1)
parser.add_argument("--mock-train-step-time", type=float, default=1.0)
# Synthetic training data generation settings.
parser.add_argument("--cache-files", action="store_true", default=False)
parser.add_argument("--num-rows", type=int, default=2 * (10**7))
parser.add_argument("--num-files", type=int, default=25)
parser.add_argument("--max-row-group-skew", type=float, default=0.0)
parser.add_argument("--num-row-groups-per-file", type=int, default=5)
parser.add_argument("--data-dir", type=str, default=DEFAULT_DATA_DIR)
# Shuffling data loader settings.
parser.add_argument("--num-reducers", type=int, default=32)
parser.add_argument("--max-concurrent-epochs", type=int, default=2)
parser.add_argument("--address", default="auto")
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
def train_main(args, filenames):
# Horovod: initialize library.
hvd.init()
torch.manual_seed(args.seed)
if torch.cuda.is_available() and not args.no_cuda:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(args.seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
rank = hvd.rank()
train_dataset = create_dataset(
filenames,
batch_size=args.batch_size,
rank=rank,
num_epochs=args.epochs,
world_size=hvd.size(),
num_reducers=args.num_reducers,
max_concurrent_epochs=args.max_concurrent_epochs)
model = Net()
# By default, Adasum doesn"t need scaling up learning rate.
lr_scaler = hvd.size() if not args.use_adasum else 1
if torch.cuda.is_available() and not args.no_cuda:
# Move model to GPU.
model.cuda()
# If using GPU Adasum allreduce, scale learning rate by local_size.
if args.use_adasum and hvd.nccl_built():
lr_scaler = hvd.local_size()
# Horovod: scale learning rate by lr_scaler.
optimizer = optim.SGD(
model.parameters(), lr=args.lr * lr_scaler, momentum=args.momentum)
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
# Horovod: (optional) compression algorithm.
compression = (hvd.Compression.fp16
if args.fp16_allreduce else hvd.Compression.none)
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
compression=compression,
op=hvd.Adasum if args.use_adasum else hvd.Average,
gradient_predivide_factor=args.gradient_predivide_factor)
def _train(epoch):
model.train()
# Horovod: set epoch to sampler for shuffling.
train_dataset.set_epoch(epoch)
start_epoch = timeit.default_timer()
last_batch_time = start_epoch
batch_wait_times = []
for batch_idx, (data, target) in enumerate(train_dataset):
batch_wait_times.append(timeit.default_timer() - last_batch_time)
if torch.cuda.is_available() and not args.no_cuda:
if isinstance(data, list):
data = [t.cuda() for t in data]
target = target.cuda()
optimizer.zero_grad()
# output = model(data)
if batch_idx % args.log_interval == 0:
print(
f"Processing batch {batch_idx} in epoch {epoch} on worker "
f"{rank}.")
time.sleep(args.mock_train_step_time)
# TODO(Clark): Add worker synchronization barrier here.
# loss = F.nll_loss(output, target)
# loss.backward()
# optimizer.step()
last_batch_time = timeit.default_timer()
epoch_duration = timeit.default_timer() - start_epoch
avg_batch_wait_time = np.mean(batch_wait_times)
std_batch_wait_time = np.std(batch_wait_times)
max_batch_wait_time = np.max(batch_wait_times)
min_batch_wait_time = np.min(batch_wait_times)
print(f"\nEpoch {epoch}, worker {rank} stats over "
f"{len(batch_wait_times)} steps: {epoch_duration:.3f}")
print(f"Mean batch wait time: {avg_batch_wait_time:.3f}s +- "
f"{std_batch_wait_time}")
print(f"Max batch wait time: {max_batch_wait_time:.3f}s")
print(f"Min batch wait time: {min_batch_wait_time:.3f}s")
return batch_wait_times
print(f"Starting training on worker {rank}.")
batch_wait_times = []
for epoch in range(args.epochs):
batch_wait_times.extend(_train(epoch))
batch_wait_times.pop(0)
print(f"Done training on worker {rank}.")
avg_batch_wait_time = np.mean(batch_wait_times)
std_batch_wait_time = np.std(batch_wait_times)
max_batch_wait_time = np.max(batch_wait_times)
min_batch_wait_time = np.min(batch_wait_times)
print(f"\nWorker {rank} training stats over {args.epochs} epochs:")
print(f"Mean batch wait time: {avg_batch_wait_time:.3f}s +- "
f"{std_batch_wait_time}")
print(f"Max batch wait time: {max_batch_wait_time:.3f}s")
print(f"Min batch wait time: {min_batch_wait_time:.3f}s")
# TODO(Clark): Add logic to the dataset abstraction so we don't have to do
# this.
if rank == 0:
print("Waiting in rank 0 worker to let other workers consume queue...")
time.sleep(10)
print("Done waiting in rank 0 worker.")
def create_dataset(filenames, *, batch_size, rank, num_epochs, world_size,
num_reducers, max_concurrent_epochs):
print(f"Creating Torch shuffling dataset for worker {rank} with "
f"{batch_size} batch size, {num_epochs} epochs, {num_reducers} "
f"reducers, and {world_size} trainers.")
feature_columns = list(DATA_SPEC.keys())
feature_types = [
numpy_to_torch_dtype[dtype] for _, _, dtype in DATA_SPEC.values()
]
label_column = feature_columns.pop()
label_type = feature_types.pop()
return TorchShufflingDataset(
filenames,
num_epochs,
world_size,
batch_size,
rank,
num_reducers=num_reducers,
max_concurrent_epochs=max_concurrent_epochs,
feature_columns=feature_columns,
feature_types=feature_types,
label_column=label_column,
label_type=label_type)
if __name__ == "__main__":
args = parser.parse_args()
from ray_shuffling_data_loader.stats import human_readable_size
import ray
print("Connecting to Ray cluster...")
ray.init(address=args.address)
num_rows = args.num_rows
num_files = args.num_files
num_row_groups_per_file = args.num_row_groups_per_file
max_row_group_skew = args.max_row_group_skew
data_dir = args.data_dir
cache_path = os.path.join(tempfile.gettempdir(), "data_cache")
filenames = None
if args.cache_files and os.path.exists(cache_path):
try:
with open(cache_path, "rb") as f:
filenames, num_bytes = pickle.load(f)
except Exception as exc:
print(f"Cache load failed - {exc}")
if not filenames:
print(f"Generating {num_rows} rows over {num_files} files, with "
f"{num_row_groups_per_file} row groups per file and at most "
f"{100 * max_row_group_skew:.1f}% row group skew.")
filenames, num_bytes = generate_data(num_rows, num_files,
num_row_groups_per_file,
max_row_group_skew, data_dir)
if args.cache_files:
with open(os.path.join(tempfile.gettempdir(), "data_cache"),
"wb") as f:
pickle.dump((filenames, num_bytes), f)
print(f"Generated {len(filenames)} files containing {num_rows} rows "
f"with {num_row_groups_per_file} row groups per file, totalling "
f"{human_readable_size(num_bytes)}.")
print("Create Ray executor")
worker_kwargs = {}
num_workers = args.num_workers
num_hosts = args.num_hosts
num_workers_per_host = args.num_workers_per_host
if num_workers is not None:
if num_hosts is not None:
raise ValueError(
"Only one of --num-workers and --num-hosts should be used.")
worker_kwargs["num_workers"] = num_workers
elif num_hosts is not None:
worker_kwargs["num_hosts"] = num_hosts
if num_workers_per_host is None:
raise ValueError("When giving --num-hosts, --num-workers-per-host "
"must also be given.")
worker_kwargs["num_workers_per_host"] = num_workers_per_host
cpus_per_worker = args.cpus_per_worker
settings = RayExecutor.create_settings(timeout_s=30)
executor = RayExecutor(
settings,
use_gpu=True,
gpus_per_worker=1,
cpus_per_worker=cpus_per_worker,
**worker_kwargs)
executor.start()
executor.run(train_main, args=[args, filenames])
executor.shutdown()
print("Done consuming batches.")
| [((922, 982), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch MNIST Example"""'}), "(description='PyTorch MNIST Example')\n", (945, 982), False, 'import argparse\n'), ((4215, 4225), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (4223, 4225), True, 'import horovod.torch as hvd\n'), ((4230, 4258), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4247, 4258), False, 'import torch\n'), ((4513, 4537), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (4534, 4537), False, 'import torch\n'), ((4549, 4559), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (4557, 4559), True, 'import horovod.torch as hvd\n'), ((5517, 5570), 'horovod.torch.broadcast_optimizer_state', 'hvd.broadcast_optimizer_state', (['optimizer'], {'root_rank': '(0)'}), '(optimizer, root_rank=0)\n', (5546, 5570), True, 'import horovod.torch as hvd\n'), ((8136, 8161), 'numpy.mean', 'np.mean', (['batch_wait_times'], {}), '(batch_wait_times)\n', (8143, 8161), True, 'import numpy as np\n'), ((8188, 8212), 'numpy.std', 'np.std', (['batch_wait_times'], {}), '(batch_wait_times)\n', (8194, 8212), True, 'import numpy as np\n'), ((8239, 8263), 'numpy.max', 'np.max', (['batch_wait_times'], {}), '(batch_wait_times)\n', (8245, 8263), True, 'import numpy as np\n'), ((8290, 8314), 'numpy.min', 'np.min', (['batch_wait_times'], {}), '(batch_wait_times)\n', (8296, 8314), True, 'import numpy as np\n'), ((9439, 9709), 'ray_shuffling_data_loader.torch_dataset.TorchShufflingDataset', 'TorchShufflingDataset', (['filenames', 'num_epochs', 'world_size', 'batch_size', 'rank'], {'num_reducers': 'num_reducers', 'max_concurrent_epochs': 'max_concurrent_epochs', 'feature_columns': 'feature_columns', 'feature_types': 'feature_types', 'label_column': 'label_column', 'label_type': 'label_type'}), '(filenames, num_epochs, world_size, batch_size, rank,\n num_reducers=num_reducers, max_concurrent_epochs=max_concurrent_epochs,\n feature_columns=feature_columns, feature_types=feature_types,\n label_column=label_column, label_type=label_type)\n', (9460, 9709), False, 'from ray_shuffling_data_loader.torch_dataset import TorchShufflingDataset\n'), ((9976, 10006), 'ray.init', 'ray.init', ([], {'address': 'args.address'}), '(address=args.address)\n', (9984, 10006), False, 'import ray\n'), ((12163, 12204), 'horovod.ray.RayExecutor.create_settings', 'RayExecutor.create_settings', ([], {'timeout_s': '(30)'}), '(timeout_s=30)\n', (12190, 12204), False, 'from horovod.ray import RayExecutor\n'), ((12220, 12329), 'horovod.ray.RayExecutor', 'RayExecutor', (['settings'], {'use_gpu': '(True)', 'gpus_per_worker': '(1)', 'cpus_per_worker': 'cpus_per_worker'}), '(settings, use_gpu=True, gpus_per_worker=1, cpus_per_worker=\n cpus_per_worker, **worker_kwargs)\n', (12231, 12329), False, 'from horovod.ray import RayExecutor\n'), ((3628, 3659), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(10)'], {'kernel_size': '(5)'}), '(1, 10, kernel_size=5)\n', (3637, 3659), True, 'import torch.nn as nn\n'), ((3681, 3713), 'torch.nn.Conv2d', 'nn.Conv2d', (['(10)', '(20)'], {'kernel_size': '(5)'}), '(10, 20, kernel_size=5)\n', (3690, 3713), True, 'import torch.nn as nn\n'), ((3740, 3754), 'torch.nn.Dropout2d', 'nn.Dropout2d', ([], {}), '()\n', (3752, 3754), True, 'import torch.nn as nn\n'), ((3774, 3792), 'torch.nn.Linear', 'nn.Linear', (['(320)', '(50)'], {}), '(320, 50)\n', (3783, 3792), True, 'import torch.nn as nn\n'), ((3812, 3829), 'torch.nn.Linear', 'nn.Linear', (['(50)', '(10)'], {}), '(50, 10)\n', (3821, 3829), True, 'import torch.nn as nn\n'), ((4048, 4084), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'training': 'self.training'}), '(x, training=self.training)\n', (4057, 4084), True, 'import torch.nn.functional as F\n'), ((4124, 4140), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {}), '(x)\n', (4137, 4140), True, 'import torch.nn.functional as F\n'), ((4267, 4292), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4290, 4292), False, 'import torch\n'), ((4413, 4446), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4435, 4446), False, 'import torch\n'), ((4929, 4939), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (4937, 4939), True, 'import horovod.torch as hvd\n'), ((4978, 5003), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5001, 5003), False, 'import torch\n'), ((6220, 6242), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (6240, 6242), False, 'import timeit\n'), ((7280, 7305), 'numpy.mean', 'np.mean', (['batch_wait_times'], {}), '(batch_wait_times)\n', (7287, 7305), True, 'import numpy as np\n'), ((7336, 7360), 'numpy.std', 'np.std', (['batch_wait_times'], {}), '(batch_wait_times)\n', (7342, 7360), True, 'import numpy as np\n'), ((7391, 7415), 'numpy.max', 'np.max', (['batch_wait_times'], {}), '(batch_wait_times)\n', (7397, 7415), True, 'import numpy as np\n'), ((7446, 7470), 'numpy.min', 'np.min', (['batch_wait_times'], {}), '(batch_wait_times)\n', (7452, 7470), True, 'import numpy as np\n'), ((8810, 8824), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (8820, 8824), False, 'import time\n'), ((9230, 9246), 'ray_shuffling_data_loader.data_generation.DATA_SPEC.keys', 'DATA_SPEC.keys', ([], {}), '()\n', (9244, 9246), False, 'from ray_shuffling_data_loader.data_generation import generate_data, DATA_SPEC\n'), ((10236, 10257), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (10255, 10257), False, 'import tempfile\n'), ((10322, 10348), 'os.path.exists', 'os.path.exists', (['cache_path'], {}), '(cache_path)\n', (10336, 10348), False, 'import os\n'), ((10815, 10908), 'ray_shuffling_data_loader.data_generation.generate_data', 'generate_data', (['num_rows', 'num_files', 'num_row_groups_per_file', 'max_row_group_skew', 'data_dir'], {}), '(num_rows, num_files, num_row_groups_per_file,\n max_row_group_skew, data_dir)\n', (10828, 10908), False, 'from ray_shuffling_data_loader.data_generation import generate_data, DATA_SPEC\n'), ((4387, 4403), 'horovod.torch.local_rank', 'hvd.local_rank', ([], {}), '()\n', (4401, 4403), True, 'import horovod.torch as hvd\n'), ((4721, 4731), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (4729, 4731), True, 'import horovod.torch as hvd\n'), ((5183, 5199), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (5197, 5199), True, 'import horovod.torch as hvd\n'), ((5225, 5241), 'horovod.torch.local_size', 'hvd.local_size', ([], {}), '()\n', (5239, 5241), True, 'import horovod.torch as hvd\n'), ((6920, 6957), 'time.sleep', 'time.sleep', (['args.mock_train_step_time'], {}), '(args.mock_train_step_time)\n', (6930, 6957), False, 'import time\n'), ((7165, 7187), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (7185, 7187), False, 'import timeit\n'), ((7213, 7235), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (7233, 7235), False, 'import timeit\n'), ((9325, 9343), 'ray_shuffling_data_loader.data_generation.DATA_SPEC.values', 'DATA_SPEC.values', ([], {}), '()\n', (9341, 9343), False, 'from ray_shuffling_data_loader.data_generation import generate_data, DATA_SPEC\n'), ((6471, 6496), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6494, 6496), False, 'import torch\n'), ((10448, 10462), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10459, 10462), False, 'import pickle\n'), ((11147, 11185), 'pickle.dump', 'pickle.dump', (['(filenames, num_bytes)', 'f'], {}), '((filenames, num_bytes), f)\n', (11158, 11185), False, 'import pickle\n'), ((11197, 11227), 'ray_shuffling_data_loader.stats.human_readable_size', 'human_readable_size', (['num_bytes'], {}), '(num_bytes)\n', (11216, 11227), False, 'from ray_shuffling_data_loader.stats import human_readable_size\n'), ((6414, 6436), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (6434, 6436), False, 'import timeit\n'), ((11059, 11080), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (11078, 11080), False, 'import tempfile\n')] |
PitonX60/django-firebird | tests/test_main/test_base/tests.py | 407bd5916a8ae37184d06adb3b943d6bb4f7076f | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.conf import settings
from django.db import connection, DatabaseError
from django.db.models import F, DateField, DateTimeField, IntegerField, TimeField, CASCADE
from django.db.models.fields.related import ForeignKey
from django.db.models.functions import (
Extract, ExtractDay, ExtractHour, ExtractMinute, ExtractMonth,
ExtractSecond, ExtractWeek, ExtractWeekDay, ExtractYear, Trunc, TruncDate,
TruncDay, TruncHour, TruncMinute, TruncMonth, TruncSecond, TruncTime,
TruncYear,
)
from django.test import TestCase, TransactionTestCase, override_settings
from django.utils import timezone
from .models import BigS, FieldsTest, Foo, Bar, DTModel
def microsecond_support(value):
return value if connection.features.supports_microsecond_precision else value.replace(microsecond=0)
def truncate_to(value, kind, tzinfo=None):
# Convert to target timezone before truncation
if tzinfo is not None:
value = value.astimezone(tzinfo)
def truncate(value, kind):
if kind == 'second':
return value.replace(microsecond=0)
if kind == 'minute':
return value.replace(second=0, microsecond=0)
if kind == 'hour':
return value.replace(minute=0, second=0, microsecond=0)
if kind == 'day':
if isinstance(value, datetime):
return value.replace(hour=0, minute=0, second=0, microsecond=0)
return value
if kind == 'month':
if isinstance(value, datetime):
return value.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
return value.replace(day=1)
# otherwise, truncate to year
if isinstance(value, datetime):
return value.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
return value.replace(month=1, day=1)
value = truncate(value, kind)
if tzinfo is not None:
# If there was a daylight saving transition, then reset the timezone.
value = timezone.make_aware(value.replace(tzinfo=None), tzinfo)
return value
class FirebirdTest(TestCase):
def setUp(self):
pass
def test_server_version(self):
version = connection.server_version
self.assertNotEqual(version, '')
def test_firebird_version(self):
version = connection.ops.firebird_version
self.assertNotEqual(version, [])
class DatabaseOperationsTest(TestCase):
def setUp(self):
self.ops = connection.ops
def test_get_sequence_name(self):
sq_name = self.ops.get_sequence_name('TEST')
self.assertEqual(sq_name, '"TEST_SQ"')
def test_drop_sequence_sql(self):
sql = self.ops.drop_sequence_sql('TEST')
self.assertEqual(sql, 'DROP SEQUENCE "TEST_SQ"')
def test_date_extract_sql(self):
sql = self.ops.date_extract_sql('week_day', 'DATE_FIELD')
value = "EXTRACT(WEEKDAY FROM DATE_FIELD) + 1"
self.assertEqual(sql, value)
sql = self.ops.date_extract_sql('year', 'DATE_FIELD')
value = "EXTRACT(YEAR FROM DATE_FIELD)"
self.assertEqual(sql, value)
sql = self.ops.date_extract_sql('month', 'DATE_FIELD')
value = "EXTRACT(MONTH FROM DATE_FIELD)"
self.assertEqual(sql, value)
sql = self.ops.date_extract_sql('day', 'DATE_FIELD')
value = "EXTRACT(DAY FROM DATE_FIELD)"
self.assertEqual(sql, value)
def test_datetime_trunc_sql(self):
sql = self.ops.datetime_trunc_sql('year', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-01-01 00:00:00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('month', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-01 00:00:00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('day', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-'||EXTRACT(day FROM DATE_FIELD)||' 00:00:00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('hour', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-'||EXTRACT(day FROM DATE_FIELD)||' '||EXTRACT(hour FROM DATE_FIELD)||':00:00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('minute', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-'||EXTRACT(day FROM DATE_FIELD)||' '||EXTRACT(hour FROM DATE_FIELD)||':'||EXTRACT(minute FROM DATE_FIELD)||':00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('second', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-'||EXTRACT(day FROM DATE_FIELD)||' '||EXTRACT(hour FROM DATE_FIELD)||':'||EXTRACT(minute FROM DATE_FIELD)||':'||TRUNC(EXTRACT(second FROM DATE_FIELD)) AS TIMESTAMP)"
self.assertEqual(sql, value)
def test_time_trunc_sql(self):
sql = self.ops.time_trunc_sql('hour', 'TIME_FIELD')
out = "CAST(EXTRACT(hour FROM TIME_FIELD) || ':00:00' AS TIME)"
self.assertEqual(sql, out)
sql = self.ops.time_trunc_sql('minute', 'TIME_FIELD')
out = "CAST(EXTRACT(hour FROM TIME_FIELD) || ':' || EXTRACT(minute FROM TIME_FIELD) || ':00' AS TIME)"
self.assertEqual(sql, out)
sql = self.ops.time_trunc_sql('second', 'TIME_FIELD')
out = "CAST(EXTRACT(hour FROM TIME_FIELD) || ':' || EXTRACT(minute FROM TIME_FIELD) || ':' || TRUNC(EXTRACT(second FROM TIME_FIELD)) AS TIME)"
self.assertEqual(sql, out)
class DatabaseSchemaTests(TransactionTestCase):
def test_no_index_for_foreignkey(self):
"""
FirebirdSQL already creates indexes automatically for foreign keys. (#70).
"""
index_sql = connection.schema_editor()._model_indexes_sql(Bar)
self.assertEqual(index_sql, [])
def test_fk_index_creation(self):
new_field = ForeignKey(Foo, on_delete=CASCADE)
new_field.set_attributes_from_name(None)
with connection.schema_editor() as editor:
editor.add_field(
Bar,
new_field
)
# Just return indexes others that not automaically created by Fk
indexes = editor._get_field_indexes(Bar, new_field)
self.assertEqual(indexes, [])
def test_fk_remove_issue70(self):
with connection.schema_editor() as editor:
editor.remove_field(
Bar,
Bar._meta.get_field("a")
)
self.assertRaises(DatabaseError)
class SlugFieldTests(TestCase):
def test_slugfield_max_length(self):
"""
Make sure SlugField honors max_length (#9706)
"""
bs = BigS.objects.create(s='slug' * 50)
bs = BigS.objects.get(pk=bs.pk)
self.assertEqual(bs.s, 'slug' * 50)
class DateFieldTests(TestCase):
def tests_date_interval(self):
obj = FieldsTest()
obj.pub_date = datetime.now()
obj.mod_date = obj.pub_date + timedelta(days=3)
obj.save()
objs = FieldsTest.objects.filter(mod_date__gte=F('pub_date') + timedelta(days=3)).all()
self.assertEqual(len(objs), 1)
@override_settings(USE_TZ=False)
class DateFunctionTests(TestCase):
def create_model(self, start_datetime, end_datetime):
return DTModel.objects.create(
name=start_datetime.isoformat(),
start_datetime=start_datetime, end_datetime=end_datetime,
start_date=start_datetime.date(), end_date=end_datetime.date(),
start_time=start_datetime.time(), end_time=end_datetime.time(),
duration=(end_datetime - start_datetime),
)
def test_trunc_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
msg = 'output_field must be either DateField, TimeField, or DateTimeField'
with self.assertRaisesMessage(ValueError, msg):
list(DTModel.objects.annotate(truncated=Trunc('start_datetime', 'year', output_field=IntegerField())))
with self.assertRaisesMessage(AssertionError, "'name' isn't a DateField, TimeField, or DateTimeField."):
list(DTModel.objects.annotate(truncated=Trunc('name', 'year', output_field=DateTimeField())))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_date', 'second')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_time', 'month')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_date', 'month', output_field=DateTimeField())))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_time', 'second', output_field=DateTimeField())))
def test_datetime_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_datetime', kind, output_field=DateTimeField())
).order_by('start_datetime'),
[
(truncate_to(start_datetime, kind)),
(truncate_to(end_datetime, kind))
],
lambda m: (m.truncated)
)
def test_date_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_date', kind, output_field=DateField())
).order_by('start_datetime'),
[
(truncate_to(start_datetime.date(), kind)),
(truncate_to(end_datetime.date(), kind))
],
lambda m: (m.truncated)
)
def test_time_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_time', kind, output_field=TimeField())
).order_by('start_datetime'),
[
(truncate_to(start_datetime.time(), kind)),
(truncate_to(end_datetime.time(), kind))
],
lambda m: (m.truncated)
)
test_date_kind('year')
test_date_kind('month')
test_date_kind('day')
test_time_kind('hour')
test_time_kind('minute')
test_time_kind('second')
test_datetime_kind('year')
test_datetime_kind('month')
test_datetime_kind('day')
test_datetime_kind('hour')
test_datetime_kind('minute')
test_datetime_kind('second')
qs = DTModel.objects.filter(start_datetime__date=Trunc('start_datetime', 'day', output_field=DateField()))
self.assertEqual(qs.count(), 2)
def test_trunc_time_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321000))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123000))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncTime('start_datetime')).order_by('start_datetime'),
[
(start_datetime.time()),
(end_datetime.time()),
],
lambda m: (m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__time=TruncTime('start_datetime')).count(), 2)
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to TimeField"):
list(DTModel.objects.annotate(truncated=TruncTime('start_date')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to TimeField"):
list(DTModel.objects.annotate(truncated=TruncTime('start_date', output_field=DateField())))
| [((7508, 7539), 'django.test.override_settings', 'override_settings', ([], {'USE_TZ': '(False)'}), '(USE_TZ=False)\n', (7525, 7539), False, 'from django.test import TestCase, TransactionTestCase, override_settings\n'), ((6230, 6264), 'django.db.models.fields.related.ForeignKey', 'ForeignKey', (['Foo'], {'on_delete': 'CASCADE'}), '(Foo, on_delete=CASCADE)\n', (6240, 6264), False, 'from django.db.models.fields.related import ForeignKey\n'), ((7279, 7293), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7291, 7293), False, 'from datetime import datetime, timedelta\n'), ((6327, 6353), 'django.db.connection.schema_editor', 'connection.schema_editor', ([], {}), '()\n', (6351, 6353), False, 'from django.db import connection, DatabaseError\n'), ((6687, 6713), 'django.db.connection.schema_editor', 'connection.schema_editor', ([], {}), '()\n', (6711, 6713), False, 'from django.db import connection, DatabaseError\n'), ((7332, 7349), 'datetime.timedelta', 'timedelta', ([], {'days': '(3)'}), '(days=3)\n', (7341, 7349), False, 'from datetime import datetime, timedelta\n'), ((8081, 8119), 'datetime.datetime', 'datetime', (['(2015)', '(6)', '(15)', '(14)', '(30)', '(50)', '(321)'], {}), '(2015, 6, 15, 14, 30, 50, 321)\n', (8089, 8119), False, 'from datetime import datetime, timedelta\n'), ((8164, 8202), 'datetime.datetime', 'datetime', (['(2016)', '(6)', '(15)', '(14)', '(10)', '(50)', '(123)'], {}), '(2016, 6, 15, 14, 10, 50, 123)\n', (8172, 8202), False, 'from datetime import datetime, timedelta\n'), ((8261, 8310), 'django.utils.timezone.make_aware', 'timezone.make_aware', (['start_datetime'], {'is_dst': '(False)'}), '(start_datetime, is_dst=False)\n', (8280, 8310), False, 'from django.utils import timezone\n'), ((8338, 8385), 'django.utils.timezone.make_aware', 'timezone.make_aware', (['end_datetime'], {'is_dst': '(False)'}), '(end_datetime, is_dst=False)\n', (8357, 8385), False, 'from django.utils import timezone\n'), ((11829, 11870), 'datetime.datetime', 'datetime', (['(2015)', '(6)', '(15)', '(14)', '(30)', '(50)', '(321000)'], {}), '(2015, 6, 15, 14, 30, 50, 321000)\n', (11837, 11870), False, 'from datetime import datetime, timedelta\n'), ((11915, 11956), 'datetime.datetime', 'datetime', (['(2016)', '(6)', '(15)', '(14)', '(10)', '(50)', '(123000)'], {}), '(2016, 6, 15, 14, 10, 50, 123000)\n', (11923, 11956), False, 'from datetime import datetime, timedelta\n'), ((12015, 12064), 'django.utils.timezone.make_aware', 'timezone.make_aware', (['start_datetime'], {'is_dst': '(False)'}), '(start_datetime, is_dst=False)\n', (12034, 12064), False, 'from django.utils import timezone\n'), ((12092, 12139), 'django.utils.timezone.make_aware', 'timezone.make_aware', (['end_datetime'], {'is_dst': '(False)'}), '(end_datetime, is_dst=False)\n', (12111, 12139), False, 'from django.utils import timezone\n'), ((6080, 6106), 'django.db.connection.schema_editor', 'connection.schema_editor', ([], {}), '()\n', (6104, 6106), False, 'from django.db import connection, DatabaseError\n'), ((9136, 9165), 'django.db.models.functions.Trunc', 'Trunc', (['"""start_date"""', '"""second"""'], {}), "('start_date', 'second')\n", (9141, 9165), False, 'from django.db.models.functions import Extract, ExtractDay, ExtractHour, ExtractMinute, ExtractMonth, ExtractSecond, ExtractWeek, ExtractWeekDay, ExtractYear, Trunc, TruncDate, TruncDay, TruncHour, TruncMinute, TruncMonth, TruncSecond, TruncTime, TruncYear\n'), ((9331, 9359), 'django.db.models.functions.Trunc', 'Trunc', (['"""start_time"""', '"""month"""'], {}), "('start_time', 'month')\n", (9336, 9359), False, 'from django.db.models.functions import Extract, ExtractDay, ExtractHour, ExtractMinute, ExtractMonth, ExtractSecond, ExtractWeek, ExtractWeekDay, ExtractYear, Trunc, TruncDate, TruncDay, TruncHour, TruncMinute, TruncMonth, TruncSecond, TruncTime, TruncYear\n'), ((11693, 11704), 'django.db.models.DateField', 'DateField', ([], {}), '()\n', (11702, 11704), False, 'from django.db.models import F, DateField, DateTimeField, IntegerField, TimeField, CASCADE\n'), ((12814, 12837), 'django.db.models.functions.TruncTime', 'TruncTime', (['"""start_date"""'], {}), "('start_date')\n", (12823, 12837), False, 'from django.db.models.functions import Extract, ExtractDay, ExtractHour, ExtractMinute, ExtractMonth, ExtractSecond, ExtractWeek, ExtractWeekDay, ExtractYear, Trunc, TruncDate, TruncDay, TruncHour, TruncMinute, TruncMonth, TruncSecond, TruncTime, TruncYear\n'), ((7425, 7438), 'django.db.models.F', 'F', (['"""pub_date"""'], {}), "('pub_date')\n", (7426, 7438), False, 'from django.db.models import F, DateField, DateTimeField, IntegerField, TimeField, CASCADE\n'), ((7441, 7458), 'datetime.timedelta', 'timedelta', ([], {'days': '(3)'}), '(days=3)\n', (7450, 7458), False, 'from datetime import datetime, timedelta\n'), ((12333, 12360), 'django.db.models.functions.TruncTime', 'TruncTime', (['"""start_datetime"""'], {}), "('start_datetime')\n", (12342, 12360), False, 'from django.db.models.functions import Extract, ExtractDay, ExtractHour, ExtractMinute, ExtractMonth, ExtractSecond, ExtractWeek, ExtractWeekDay, ExtractYear, Trunc, TruncDate, TruncDay, TruncHour, TruncMinute, TruncMonth, TruncSecond, TruncTime, TruncYear\n'), ((12614, 12641), 'django.db.models.functions.TruncTime', 'TruncTime', (['"""start_datetime"""'], {}), "('start_datetime')\n", (12623, 12641), False, 'from django.db.models.functions import Extract, ExtractDay, ExtractHour, ExtractMinute, ExtractMonth, ExtractSecond, ExtractWeek, ExtractWeekDay, ExtractYear, Trunc, TruncDate, TruncDay, TruncHour, TruncMinute, TruncMonth, TruncSecond, TruncTime, TruncYear\n'), ((8735, 8749), 'django.db.models.IntegerField', 'IntegerField', ([], {}), '()\n', (8747, 8749), False, 'from django.db.models import F, DateField, DateTimeField, IntegerField, TimeField, CASCADE\n'), ((8954, 8969), 'django.db.models.DateTimeField', 'DateTimeField', ([], {}), '()\n', (8967, 8969), False, 'from django.db.models import F, DateField, DateTimeField, IntegerField, TimeField, CASCADE\n'), ((9567, 9582), 'django.db.models.DateTimeField', 'DateTimeField', ([], {}), '()\n', (9580, 9582), False, 'from django.db.models import F, DateField, DateTimeField, IntegerField, TimeField, CASCADE\n'), ((9792, 9807), 'django.db.models.DateTimeField', 'DateTimeField', ([], {}), '()\n', (9805, 9807), False, 'from django.db.models import F, DateField, DateTimeField, IntegerField, TimeField, CASCADE\n'), ((13036, 13047), 'django.db.models.DateField', 'DateField', ([], {}), '()\n', (13045, 13047), False, 'from django.db.models import F, DateField, DateTimeField, IntegerField, TimeField, CASCADE\n'), ((10003, 10018), 'django.db.models.DateTimeField', 'DateTimeField', ([], {}), '()\n', (10016, 10018), False, 'from django.db.models import F, DateField, DateTimeField, IntegerField, TimeField, CASCADE\n'), ((10452, 10463), 'django.db.models.DateField', 'DateField', ([], {}), '()\n', (10461, 10463), False, 'from django.db.models import F, DateField, DateTimeField, IntegerField, TimeField, CASCADE\n'), ((10911, 10922), 'django.db.models.TimeField', 'TimeField', ([], {}), '()\n', (10920, 10922), False, 'from django.db.models import F, DateField, DateTimeField, IntegerField, TimeField, CASCADE\n')] |
justinbois/eqtk | tests/test_past_failures.py | 7363b8c09e35088d2cb2cb5a62d315b52cce0d9b | import pytest
import numpy as np
import eqtk
def test_promiscuous_binding_failure():
A = np.array(
[
[
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
1.0,
1.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
],
[
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
1.0,
1.0,
],
[
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
],
[
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
],
[
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
1.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
],
[
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
1.0,
1.0,
],
]
)
G = np.array(
[
-0.51720535,
-0.69471304,
-1.78260496,
-1.32337777,
-0.63267947,
-0.57923893,
-0.78718634,
-0.27521037,
-0.13733511,
-0.69433251,
1.6858364,
-0.43683479,
0.39312096,
-0.0625205,
0.23139303,
0.07680628,
-0.52774543,
1.74592678,
]
)
x0 = np.array(
[
[
2.48257788e01,
1.72132293e-01,
1.14833731e-02,
5.00547317e-02,
1.38949549e-01,
1.93069773e01,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
]
]
)
def test_spontaneous_production_failure():
N = np.array(
[[1, 0, 1, 0, -1, 0], [1, 0, 0, 1, 0, -1], [1, 1, 1, 0, 0, 0]], dtype=float
)
A = np.array(
[[0, 0, 0, 1, 0, 1], [1, 0, -1, 0, 0, 1], [0, -1, 1, 0, 1, 0]], dtype=float
)
G = np.array([0, 1, 2, 3, 4, 5])
K = np.exp(-np.dot(N, G))
for x0_val in [
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 0, 0],
[1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
]:
x0 = np.array(x0_val, dtype=float)
x_NK = eqtk.solve(c0=x0, N=N, K=K)
with pytest.raises(ValueError) as excinfo:
x_AG = eqtk.solve(c0=x0, A=A, G=G)
excinfo.match("`A` must have all nonnegative entries.")
assert eqtk.eqcheck(x_NK, x0, N=N, K=K)
def test_scale_factor_failure():
A = np.array([[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]])
G = np.array([0.0, 0.0, 0.77428976, -5.64873697, -0.95863043])
x0 = np.array(
[
[
5.50293892e-05,
6.49273515e-08,
2.75796219e-05,
1.29854703e-07,
3.24636758e-08,
]
]
)
x = eqtk.solve(c0=x0, A=A, G=G)
assert eqtk.eqcheck(x, x0, A=A, G=G)
def test_trivial_elemental_failure():
A = np.array([[1.0, 0.0], [0.0, 1.0]])
G = np.array([0.0, 0.0])
x0 = np.array([[3.48219906e-06, 1.32719868e-10]])
assert np.allclose(eqtk.solve(c0=x0, A=A, G=G), x0)
A = np.array([[1.0, 0.0], [0.0, 1.0]])
G = np.array([0.0, 0.0])
x0 = np.array([[2.24222410e-08, 1.63359284e-04]])
assert np.allclose(eqtk.solve(c0=x0, A=A, G=G), x0)
A = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
G = np.array([0.0, 0.0, 0.0])
x0 = np.array([[2.63761955e-04, 4.93360042e-07, 4.88340687e-07]])
assert np.allclose(eqtk.solve(c0=x0, A=A, G=G), x0)
def test_past_failure_1():
A = np.array([[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]])
G = np.array([0.0, 0.0, -16.76857677, -2.38430181, 1.22028775])
x0 = np.array(
[
[
1.65989040e-10,
1.07630096e-04,
1.65989040e-10,
1.65989040e-10,
5.38150479e-05,
]
]
)
x = eqtk.solve(x0, A=A, G=G)
assert eqtk.eqcheck(x, x0, A=A, G=G)
def test_past_failure_2():
N = np.array([[-2.0, 1.0, 0.0, 0.0], [-3.0, 0.0, 1.0, 0.0], [-4.0, 0.0, 0.0, 1.0]])
minus_log_K = np.array([-43.66660344, -68.14676841, -92.28023823])
x0 = np.array([[1.87852623e-06, 3.75705246e-06, 1.25235082e-06, 4.69631557e-07]])
K = np.exp(-minus_log_K)
x = eqtk.solve(x0, N, K)
assert eqtk.eqcheck(x, x0, N, K)
def test_small_conc_failure():
A = np.array(
[
[1.0, 0.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 2.0],
[1.0, 0.0, 0.0, 1.0, 2.0],
]
)
G = np.array(
[
-1.1323012373599138e02,
-2.7028447814426110e-01,
-2.3382656193096754e01,
-1.0088531260804201e02,
-5.7676558386243052e01,
]
)
x0 = np.array(
[
[
1.8134373707286439e-08,
3.5913242229740680e-14,
3.5913242229740680e-14,
3.5913242229740680e-14,
1.7956621114870340e-14,
]
]
)
x = eqtk.solve(c0=x0, A=A, G=G)
assert eqtk.eqcheck(x, x0, A=A, G=G)
| [((96, 687), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0,\n 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0,\n 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0,\n 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, \n 1.0], [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0,\n 0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0,\n 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0,\n 1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,\n 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 1.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0], [0.0,\n 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, \n 1.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0,\n 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0,\n 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0]])\n', (104, 687), True, 'import numpy as np\n'), ((2582, 2833), 'numpy.array', 'np.array', (['[-0.51720535, -0.69471304, -1.78260496, -1.32337777, -0.63267947, -\n 0.57923893, -0.78718634, -0.27521037, -0.13733511, -0.69433251, \n 1.6858364, -0.43683479, 0.39312096, -0.0625205, 0.23139303, 0.07680628,\n -0.52774543, 1.74592678]'], {}), '([-0.51720535, -0.69471304, -1.78260496, -1.32337777, -0.63267947, \n -0.57923893, -0.78718634, -0.27521037, -0.13733511, -0.69433251, \n 1.6858364, -0.43683479, 0.39312096, -0.0625205, 0.23139303, 0.07680628,\n -0.52774543, 1.74592678])\n', (2590, 2833), True, 'import numpy as np\n'), ((3070, 3224), 'numpy.array', 'np.array', (['[[24.8257788, 0.172132293, 0.0114833731, 0.0500547317, 0.138949549, \n 19.3069773, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[24.8257788, 0.172132293, 0.0114833731, 0.0500547317, 0.138949549,\n 19.3069773, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n', (3078, 3224), True, 'import numpy as np\n'), ((3749, 3838), 'numpy.array', 'np.array', (['[[1, 0, 1, 0, -1, 0], [1, 0, 0, 1, 0, -1], [1, 1, 1, 0, 0, 0]]'], {'dtype': 'float'}), '([[1, 0, 1, 0, -1, 0], [1, 0, 0, 1, 0, -1], [1, 1, 1, 0, 0, 0]],\n dtype=float)\n', (3757, 3838), True, 'import numpy as np\n'), ((3858, 3947), 'numpy.array', 'np.array', (['[[0, 0, 0, 1, 0, 1], [1, 0, -1, 0, 0, 1], [0, -1, 1, 0, 1, 0]]'], {'dtype': 'float'}), '([[0, 0, 0, 1, 0, 1], [1, 0, -1, 0, 0, 1], [0, -1, 1, 0, 1, 0]],\n dtype=float)\n', (3866, 3947), True, 'import numpy as np\n'), ((3967, 3995), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (3975, 3995), True, 'import numpy as np\n'), ((4507, 4571), 'numpy.array', 'np.array', (['[[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]]'], {}), '([[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]])\n', (4515, 4571), True, 'import numpy as np\n'), ((4580, 4638), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.77428976, -5.64873697, -0.95863043]'], {}), '([0.0, 0.0, 0.77428976, -5.64873697, -0.95863043])\n', (4588, 4638), True, 'import numpy as np\n'), ((4648, 4745), 'numpy.array', 'np.array', (['[[5.50293892e-05, 6.49273515e-08, 2.75796219e-05, 1.29854703e-07, \n 3.24636758e-08]]'], {}), '([[5.50293892e-05, 6.49273515e-08, 2.75796219e-05, 1.29854703e-07, \n 3.24636758e-08]])\n', (4656, 4745), True, 'import numpy as np\n'), ((4880, 4907), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'A': 'A', 'G': 'G'}), '(c0=x0, A=A, G=G)\n', (4890, 4907), False, 'import eqtk\n'), ((4919, 4948), 'eqtk.eqcheck', 'eqtk.eqcheck', (['x', 'x0'], {'A': 'A', 'G': 'G'}), '(x, x0, A=A, G=G)\n', (4931, 4948), False, 'import eqtk\n'), ((4997, 5031), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (5005, 5031), True, 'import numpy as np\n'), ((5040, 5060), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (5048, 5060), True, 'import numpy as np\n'), ((5070, 5114), 'numpy.array', 'np.array', (['[[3.48219906e-06, 1.32719868e-10]]'], {}), '([[3.48219906e-06, 1.32719868e-10]])\n', (5078, 5114), True, 'import numpy as np\n'), ((5180, 5214), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (5188, 5214), True, 'import numpy as np\n'), ((5223, 5243), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (5231, 5243), True, 'import numpy as np\n'), ((5253, 5296), 'numpy.array', 'np.array', (['[[2.2422241e-08, 0.000163359284]]'], {}), '([[2.2422241e-08, 0.000163359284]])\n', (5261, 5296), True, 'import numpy as np\n'), ((5363, 5424), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (5371, 5424), True, 'import numpy as np\n'), ((5433, 5458), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (5441, 5458), True, 'import numpy as np\n'), ((5468, 5528), 'numpy.array', 'np.array', (['[[0.000263761955, 4.93360042e-07, 4.88340687e-07]]'], {}), '([[0.000263761955, 4.93360042e-07, 4.88340687e-07]])\n', (5476, 5528), True, 'import numpy as np\n'), ((5622, 5686), 'numpy.array', 'np.array', (['[[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]]'], {}), '([[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]])\n', (5630, 5686), True, 'import numpy as np\n'), ((5695, 5754), 'numpy.array', 'np.array', (['[0.0, 0.0, -16.76857677, -2.38430181, 1.22028775]'], {}), '([0.0, 0.0, -16.76857677, -2.38430181, 1.22028775])\n', (5703, 5754), True, 'import numpy as np\n'), ((5764, 5858), 'numpy.array', 'np.array', (['[[1.6598904e-10, 0.000107630096, 1.6598904e-10, 1.6598904e-10, 5.38150479e-05]]'], {}), '([[1.6598904e-10, 0.000107630096, 1.6598904e-10, 1.6598904e-10, \n 5.38150479e-05]])\n', (5772, 5858), True, 'import numpy as np\n'), ((5996, 6020), 'eqtk.solve', 'eqtk.solve', (['x0'], {'A': 'A', 'G': 'G'}), '(x0, A=A, G=G)\n', (6006, 6020), False, 'import eqtk\n'), ((6032, 6061), 'eqtk.eqcheck', 'eqtk.eqcheck', (['x', 'x0'], {'A': 'A', 'G': 'G'}), '(x, x0, A=A, G=G)\n', (6044, 6061), False, 'import eqtk\n'), ((6099, 6178), 'numpy.array', 'np.array', (['[[-2.0, 1.0, 0.0, 0.0], [-3.0, 0.0, 1.0, 0.0], [-4.0, 0.0, 0.0, 1.0]]'], {}), '([[-2.0, 1.0, 0.0, 0.0], [-3.0, 0.0, 1.0, 0.0], [-4.0, 0.0, 0.0, 1.0]])\n', (6107, 6178), True, 'import numpy as np\n'), ((6197, 6249), 'numpy.array', 'np.array', (['[-43.66660344, -68.14676841, -92.28023823]'], {}), '([-43.66660344, -68.14676841, -92.28023823])\n', (6205, 6249), True, 'import numpy as np\n'), ((6259, 6335), 'numpy.array', 'np.array', (['[[1.87852623e-06, 3.75705246e-06, 1.25235082e-06, 4.69631557e-07]]'], {}), '([[1.87852623e-06, 3.75705246e-06, 1.25235082e-06, 4.69631557e-07]])\n', (6267, 6335), True, 'import numpy as np\n'), ((6344, 6364), 'numpy.exp', 'np.exp', (['(-minus_log_K)'], {}), '(-minus_log_K)\n', (6350, 6364), True, 'import numpy as np\n'), ((6373, 6393), 'eqtk.solve', 'eqtk.solve', (['x0', 'N', 'K'], {}), '(x0, N, K)\n', (6383, 6393), False, 'import eqtk\n'), ((6405, 6430), 'eqtk.eqcheck', 'eqtk.eqcheck', (['x', 'x0', 'N', 'K'], {}), '(x, x0, N, K)\n', (6417, 6430), False, 'import eqtk\n'), ((6472, 6568), 'numpy.array', 'np.array', (['[[1.0, 0.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0, 2.0], [1.0, 0.0, 0.0, 1.0,\n 2.0]]'], {}), '([[1.0, 0.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0, 2.0], [1.0, 0.0, \n 0.0, 1.0, 2.0]])\n', (6480, 6568), True, 'import numpy as np\n'), ((6633, 6752), 'numpy.array', 'np.array', (['[-113.23012373599138, -0.2702844781442611, -23.382656193096754, -\n 100.88531260804201, -57.67655838624305]'], {}), '([-113.23012373599138, -0.2702844781442611, -23.382656193096754, -\n 100.88531260804201, -57.67655838624305])\n', (6641, 6752), True, 'import numpy as np\n'), ((6859, 6991), 'numpy.array', 'np.array', (['[[1.813437370728644e-08, 3.591324222974068e-14, 3.591324222974068e-14, \n 3.591324222974068e-14, 1.795662111487034e-14]]'], {}), '([[1.813437370728644e-08, 3.591324222974068e-14, \n 3.591324222974068e-14, 3.591324222974068e-14, 1.795662111487034e-14]])\n', (6867, 6991), True, 'import numpy as np\n'), ((7131, 7158), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'A': 'A', 'G': 'G'}), '(c0=x0, A=A, G=G)\n', (7141, 7158), False, 'import eqtk\n'), ((7170, 7199), 'eqtk.eqcheck', 'eqtk.eqcheck', (['x', 'x0'], {'A': 'A', 'G': 'G'}), '(x, x0, A=A, G=G)\n', (7182, 7199), False, 'import eqtk\n'), ((4179, 4208), 'numpy.array', 'np.array', (['x0_val'], {'dtype': 'float'}), '(x0_val, dtype=float)\n', (4187, 4208), True, 'import numpy as np\n'), ((4224, 4251), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'N': 'N', 'K': 'K'}), '(c0=x0, N=N, K=K)\n', (4234, 4251), False, 'import eqtk\n'), ((4431, 4463), 'eqtk.eqcheck', 'eqtk.eqcheck', (['x_NK', 'x0'], {'N': 'N', 'K': 'K'}), '(x_NK, x0, N=N, K=K)\n', (4443, 4463), False, 'import eqtk\n'), ((5138, 5165), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'A': 'A', 'G': 'G'}), '(c0=x0, A=A, G=G)\n', (5148, 5165), False, 'import eqtk\n'), ((5321, 5348), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'A': 'A', 'G': 'G'}), '(c0=x0, A=A, G=G)\n', (5331, 5348), False, 'import eqtk\n'), ((5552, 5579), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'A': 'A', 'G': 'G'}), '(c0=x0, A=A, G=G)\n', (5562, 5579), False, 'import eqtk\n'), ((4012, 4024), 'numpy.dot', 'np.dot', (['N', 'G'], {}), '(N, G)\n', (4018, 4024), True, 'import numpy as np\n'), ((4266, 4291), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4279, 4291), False, 'import pytest\n'), ((4323, 4350), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'A': 'A', 'G': 'G'}), '(c0=x0, A=A, G=G)\n', (4333, 4350), False, 'import eqtk\n')] |
suresh198526/pulumi-azure | sdk/python/pulumi_azure/lb/outbound_rule.py | bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['OutboundRule']
class OutboundRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allocated_outbound_ports: Optional[pulumi.Input[int]] = None,
backend_address_pool_id: Optional[pulumi.Input[str]] = None,
enable_tcp_reset: Optional[pulumi.Input[bool]] = None,
frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleFrontendIpConfigurationArgs']]]]] = None,
idle_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
loadbalancer_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages a Load Balancer Outbound Rule.
> **NOTE** When using this resource, the Load Balancer needs to have a FrontEnd IP Configuration and a Backend Address Pool Attached.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West US")
example_public_ip = azure.network.PublicIp("examplePublicIp",
location="West US",
resource_group_name=example_resource_group.name,
allocation_method="Static")
example_load_balancer = azure.lb.LoadBalancer("exampleLoadBalancer",
location="West US",
resource_group_name=example_resource_group.name,
frontend_ip_configurations=[azure.lb.LoadBalancerFrontendIpConfigurationArgs(
name="PublicIPAddress",
public_ip_address_id=example_public_ip.id,
)])
example_backend_address_pool = azure.lb.BackendAddressPool("exampleBackendAddressPool",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id)
example_outbound_rule = azure.lb.OutboundRule("exampleOutboundRule",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id,
protocol="Tcp",
backend_address_pool_id=example_backend_address_pool.id,
frontend_ip_configurations=[azure.lb.OutboundRuleFrontendIpConfigurationArgs(
name="PublicIPAddress",
)])
```
## Import
Load Balancer Outbound Rules can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:lb/outboundRule:OutboundRule example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/lb1/outboundRules/rule1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] allocated_outbound_ports: The number of outbound ports to be used for NAT.
:param pulumi.Input[str] backend_address_pool_id: The ID of the Backend Address Pool. Outbound traffic is randomly load balanced across IPs in the backend IPs.
:param pulumi.Input[bool] enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleFrontendIpConfigurationArgs']]]] frontend_ip_configurations: One or more `frontend_ip_configuration` blocks as defined below.
:param pulumi.Input[int] idle_timeout_in_minutes: The timeout for the TCP idle connection
:param pulumi.Input[str] loadbalancer_id: The ID of the Load Balancer in which to create the Outbound Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Outbound Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] protocol: The transport protocol for the external endpoint. Possible values are `Udp`, `Tcp` or `All`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the resource. Changing this forces a new resource to be created.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['allocated_outbound_ports'] = allocated_outbound_ports
if backend_address_pool_id is None:
raise TypeError("Missing required property 'backend_address_pool_id'")
__props__['backend_address_pool_id'] = backend_address_pool_id
__props__['enable_tcp_reset'] = enable_tcp_reset
__props__['frontend_ip_configurations'] = frontend_ip_configurations
__props__['idle_timeout_in_minutes'] = idle_timeout_in_minutes
if loadbalancer_id is None:
raise TypeError("Missing required property 'loadbalancer_id'")
__props__['loadbalancer_id'] = loadbalancer_id
__props__['name'] = name
if protocol is None:
raise TypeError("Missing required property 'protocol'")
__props__['protocol'] = protocol
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
super(OutboundRule, __self__).__init__(
'azure:lb/outboundRule:OutboundRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allocated_outbound_ports: Optional[pulumi.Input[int]] = None,
backend_address_pool_id: Optional[pulumi.Input[str]] = None,
enable_tcp_reset: Optional[pulumi.Input[bool]] = None,
frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleFrontendIpConfigurationArgs']]]]] = None,
idle_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
loadbalancer_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None) -> 'OutboundRule':
"""
Get an existing OutboundRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] allocated_outbound_ports: The number of outbound ports to be used for NAT.
:param pulumi.Input[str] backend_address_pool_id: The ID of the Backend Address Pool. Outbound traffic is randomly load balanced across IPs in the backend IPs.
:param pulumi.Input[bool] enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleFrontendIpConfigurationArgs']]]] frontend_ip_configurations: One or more `frontend_ip_configuration` blocks as defined below.
:param pulumi.Input[int] idle_timeout_in_minutes: The timeout for the TCP idle connection
:param pulumi.Input[str] loadbalancer_id: The ID of the Load Balancer in which to create the Outbound Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Outbound Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] protocol: The transport protocol for the external endpoint. Possible values are `Udp`, `Tcp` or `All`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the resource. Changing this forces a new resource to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["allocated_outbound_ports"] = allocated_outbound_ports
__props__["backend_address_pool_id"] = backend_address_pool_id
__props__["enable_tcp_reset"] = enable_tcp_reset
__props__["frontend_ip_configurations"] = frontend_ip_configurations
__props__["idle_timeout_in_minutes"] = idle_timeout_in_minutes
__props__["loadbalancer_id"] = loadbalancer_id
__props__["name"] = name
__props__["protocol"] = protocol
__props__["resource_group_name"] = resource_group_name
return OutboundRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allocatedOutboundPorts")
def allocated_outbound_ports(self) -> pulumi.Output[Optional[int]]:
"""
The number of outbound ports to be used for NAT.
"""
return pulumi.get(self, "allocated_outbound_ports")
@property
@pulumi.getter(name="backendAddressPoolId")
def backend_address_pool_id(self) -> pulumi.Output[str]:
"""
The ID of the Backend Address Pool. Outbound traffic is randomly load balanced across IPs in the backend IPs.
"""
return pulumi.get(self, "backend_address_pool_id")
@property
@pulumi.getter(name="enableTcpReset")
def enable_tcp_reset(self) -> pulumi.Output[Optional[bool]]:
"""
Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
"""
return pulumi.get(self, "enable_tcp_reset")
@property
@pulumi.getter(name="frontendIpConfigurations")
def frontend_ip_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.OutboundRuleFrontendIpConfiguration']]]:
"""
One or more `frontend_ip_configuration` blocks as defined below.
"""
return pulumi.get(self, "frontend_ip_configurations")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> pulumi.Output[Optional[int]]:
"""
The timeout for the TCP idle connection
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter(name="loadbalancerId")
def loadbalancer_id(self) -> pulumi.Output[str]:
"""
The ID of the Load Balancer in which to create the Outbound Rule. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "loadbalancer_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Outbound Rule. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
The transport protocol for the external endpoint. Possible values are `Udp`, `Tcp` or `All`.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to create the resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [((10406, 10450), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""allocatedOutboundPorts"""'}), "(name='allocatedOutboundPorts')\n", (10419, 10450), False, 'import pulumi\n'), ((10684, 10726), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""backendAddressPoolId"""'}), "(name='backendAddressPoolId')\n", (10697, 10726), False, 'import pulumi\n'), ((11009, 11045), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""enableTcpReset"""'}), "(name='enableTcpReset')\n", (11022, 11045), False, 'import pulumi\n'), ((11369, 11415), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""frontendIpConfigurations"""'}), "(name='frontendIpConfigurations')\n", (11382, 11415), False, 'import pulumi\n'), ((11721, 11763), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""idleTimeoutInMinutes"""'}), "(name='idleTimeoutInMinutes')\n", (11734, 11763), False, 'import pulumi\n'), ((11986, 12022), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""loadbalancerId"""'}), "(name='loadbalancerId')\n", (11999, 12022), False, 'import pulumi\n'), ((12785, 12824), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""resourceGroupName"""'}), "(name='resourceGroupName')\n", (12798, 12824), False, 'import pulumi\n'), ((10619, 10663), 'pulumi.get', 'pulumi.get', (['self', '"""allocated_outbound_ports"""'], {}), "(self, 'allocated_outbound_ports')\n", (10629, 10663), False, 'import pulumi\n'), ((10945, 10988), 'pulumi.get', 'pulumi.get', (['self', '"""backend_address_pool_id"""'], {}), "(self, 'backend_address_pool_id')\n", (10955, 10988), False, 'import pulumi\n'), ((11312, 11348), 'pulumi.get', 'pulumi.get', (['self', '"""enable_tcp_reset"""'], {}), "(self, 'enable_tcp_reset')\n", (11322, 11348), False, 'import pulumi\n'), ((11654, 11700), 'pulumi.get', 'pulumi.get', (['self', '"""frontend_ip_configurations"""'], {}), "(self, 'frontend_ip_configurations')\n", (11664, 11700), False, 'import pulumi\n'), ((11922, 11965), 'pulumi.get', 'pulumi.get', (['self', '"""idle_timeout_in_minutes"""'], {}), "(self, 'idle_timeout_in_minutes')\n", (11932, 11965), False, 'import pulumi\n'), ((12240, 12275), 'pulumi.get', 'pulumi.get', (['self', '"""loadbalancer_id"""'], {}), "(self, 'loadbalancer_id')\n", (12250, 12275), False, 'import pulumi\n'), ((12491, 12515), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (12501, 12515), False, 'import pulumi\n'), ((12736, 12764), 'pulumi.get', 'pulumi.get', (['self', '"""protocol"""'], {}), "(self, 'protocol')\n", (12746, 12764), False, 'import pulumi\n'), ((13044, 13083), 'pulumi.get', 'pulumi.get', (['self', '"""resource_group_name"""'], {}), "(self, 'resource_group_name')\n", (13054, 13083), False, 'import pulumi\n'), ((4969, 5044), 'warnings.warn', 'warnings.warn', (['"""explicit use of __name__ is deprecated"""', 'DeprecationWarning'], {}), "('explicit use of __name__ is deprecated', DeprecationWarning)\n", (4982, 5044), False, 'import warnings\n'), ((5127, 5226), 'warnings.warn', 'warnings.warn', (['"""explicit use of __opts__ is deprecated, use \'opts\' instead"""', 'DeprecationWarning'], {}), '("explicit use of __opts__ is deprecated, use \'opts\' instead",\n DeprecationWarning)\n', (5140, 5226), False, 'import warnings\n'), ((5295, 5319), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {}), '()\n', (5317, 5319), False, 'import pulumi\n'), ((9710, 9739), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'id': 'id'}), '(id=id)\n', (9732, 9739), False, 'import pulumi\n')] |
Juanlu001/orbit-predictor | orbit_predictor/predictors/base.py | ca67e2e859932938627ed24e5cbf58c887cd99c0 | # MIT License
#
# Copyright (c) 2017 Satellogic SA
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime as dt
import logging
import warnings
from collections import namedtuple
from math import pi, acos, degrees, radians
import numpy as np
try:
from scipy.optimize import brentq, minimize_scalar
except ImportError:
warnings.warn('scipy module was not found, some features may not work properly.',
ImportWarning)
from orbit_predictor.constants import MU_E
from orbit_predictor.exceptions import NotReachable, PropagationError
from orbit_predictor import coordinate_systems
from orbit_predictor.keplerian import rv2coe
from orbit_predictor.utils import (
angle_between,
cross_product,
dot_product,
reify,
vector_diff,
vector_norm,
gstime_from_datetime,
get_shadow,
get_sun,
eclipse_duration,
get_satellite_minus_penumbra_verticals,
)
logger = logging.getLogger(__name__)
ONE_SECOND = dt.timedelta(seconds=1)
def round_datetime(dt_):
return dt_
class Position(namedtuple(
"Position", ['when_utc', 'position_ecef', 'velocity_ecef', 'error_estimate'])):
@reify
def position_llh(self):
"""Latitude (deg), longitude (deg), altitude (km)."""
return coordinate_systems.ecef_to_llh(self.position_ecef)
@reify
def osculating_elements(self):
"""Osculating Keplerian orbital elements.
Semimajor axis (km), eccentricity, inclination (deg),
right ascension of the ascending node or RAAN (deg),
argument of perigee (deg), true anomaly (deg).
"""
gmst = gstime_from_datetime(self.when_utc)
position_eci = coordinate_systems.ecef_to_eci(self.position_ecef, gmst)
velocity_eci = coordinate_systems.ecef_to_eci(self.velocity_ecef, gmst)
# Convert position to Keplerian osculating elements
p, ecc, inc, raan, argp, ta = rv2coe(
MU_E, np.array(position_eci), np.array(velocity_eci)
)
# Transform to more familiar semimajor axis
sma = p / (1 - ecc ** 2)
return sma, ecc, degrees(inc), degrees(raan), degrees(argp), degrees(ta)
class PredictedPass:
def __init__(self, location, sate_id,
max_elevation_deg,
aos, los, duration_s,
max_elevation_position=None,
max_elevation_date=None):
self.location = location
self.sate_id = sate_id
self.max_elevation_position = max_elevation_position
self.max_elevation_date = max_elevation_date
self.max_elevation_deg = max_elevation_deg
self.aos = aos
self.los = los
self.duration_s = duration_s
@property
def midpoint(self):
"""Returns a datetime of the midpoint of the pass"""
return self.aos + (self.los - self.aos) / 2
def __repr__(self):
return "<PredictedPass {} over {} on {}>".format(self.sate_id, self.location, self.aos)
def __eq__(self, other):
return all([issubclass(other.__class__, PredictedPass),
self.location == other.location,
self.sate_id == other.sate_id,
self.max_elevation_position == other.max_elevation_position,
self.max_elevation_date == other.max_elevation_date,
self.max_elevation_deg == other.max_elevation_deg,
self.aos == other.aos,
self.los == other.los,
self.duration_s == other.duration_s])
def get_off_nadir_angle(self):
warnings.warn("This method is deprecated!", DeprecationWarning)
return self.off_nadir_deg
@reify
def off_nadir_deg(self):
"""Computes off-nadir angle calculation
Given satellite position ``sate_pos``, velocity ``sate_vel``, and
location ``target`` in a common frame, off-nadir angle ``off_nadir_angle``
is given by:
t2b = sate_pos - target
cos(off_nadir_angle) = (sate_pos · t2b) # Vectorial dot product
_______________________
|| sate_pos || || t2b||
Sign for the rotation is calculated this way
cross = target ⨯ sate_pos
sign = cross · sate_vel
____________________
| cross · sate_vel |
"""
sate_pos = self.max_elevation_position.position_ecef
sate_vel = self.max_elevation_position.velocity_ecef
target = self.location.position_ecef
t2b = vector_diff(sate_pos, target)
angle = acos(
dot_product(sate_pos, t2b) / (vector_norm(sate_pos) * vector_norm(t2b))
)
cross = cross_product(target, sate_pos)
dot = dot_product(cross, sate_vel)
try:
sign = dot / abs(dot)
except ZeroDivisionError:
sign = 1
return degrees(angle) * sign
class Predictor:
@property
def sate_id(self):
raise NotImplementedError
def propagate_eci(self, when_utc=None):
raise NotImplementedError
def get_position(self, when_utc=None):
raise NotImplementedError("You have to implement it!")
def get_shadow(self, when_utc=None):
"""Gives illumination at given time (2 for illuminated, 1 for penumbra, 0 for umbra)."""
if when_utc is None:
when_utc = dt.datetime.utcnow()
return get_shadow(
self.get_position(when_utc).position_ecef,
when_utc
)
def get_normal_vector(self, when_utc=None):
"""Gets unitary normal vector (orthogonal to orbital plane) at given time."""
if when_utc is None:
when_utc = dt.datetime.utcnow()
position, velocity = self.propagate_eci(when_utc)
orbital_plane_normal = np.cross(position, velocity)
return orbital_plane_normal / vector_norm(orbital_plane_normal)
def get_beta(self, when_utc=None):
"""Gets angle between orbital plane and Sun direction (beta) at given time, in degrees."""
if when_utc is None:
when_utc = dt.datetime.utcnow()
# Here we calculate the complementary angle of beta,
# because we use the normal vector of the orbital plane
beta_comp = angle_between(
get_sun(when_utc),
self.get_normal_vector(when_utc)
)
# We subtract from 90 degrees to return the real beta angle
return 90 - beta_comp
class CartesianPredictor(Predictor):
def _propagate_ecef(self, when_utc=None):
"""Return position and velocity in the given date using ECEF coordinate system."""
if when_utc is None:
when_utc = dt.datetime.utcnow()
position_eci, velocity_eci = self.propagate_eci(when_utc)
gmst = gstime_from_datetime(when_utc)
position_ecef = coordinate_systems.eci_to_ecef(position_eci, gmst)
velocity_ecef = coordinate_systems.eci_to_ecef(velocity_eci, gmst)
return position_ecef, velocity_ecef
@reify
def mean_motion(self):
"""Mean motion, in radians per minute"""
raise NotImplementedError
@reify
def period(self):
"""Orbital period, in minutes"""
return 2 * pi / self.mean_motion
def get_position(self, when_utc=None):
"""Return a Position namedtuple in ECEF coordinate system"""
if when_utc is None:
when_utc = dt.datetime.utcnow()
position_ecef, velocity_ecef = self._propagate_ecef(when_utc)
return Position(when_utc=when_utc, position_ecef=position_ecef,
velocity_ecef=velocity_ecef, error_estimate=None)
def get_only_position(self, when_utc=None):
"""Return a tuple in ECEF coordinate system"""
return self.get_position(when_utc).position_ecef
def get_eclipse_duration(self, when_utc=None, tolerance=1e-1):
"""Gets eclipse duration at given time, in minutes"""
ecc = self.get_position(when_utc).osculating_elements[1]
if ecc > tolerance:
raise NotImplementedError("Non circular orbits are not supported")
beta = self.get_beta(when_utc)
return eclipse_duration(beta, self.period)
def passes_over(self, location, when_utc, limit_date=None, max_elevation_gt=0, aos_at_dg=0):
return LocationPredictor(location, self, when_utc, limit_date,
max_elevation_gt, aos_at_dg)
def get_next_pass(self, location, when_utc=None, max_elevation_gt=5,
aos_at_dg=0, limit_date=None):
"""Return a PredictedPass instance with the data of the next pass over the given location
location_llh: point on Earth we want to see from the satellite.
when_utc: datetime UTC after which the pass is calculated, default to now.
max_elevation_gt: filter passes with max_elevation under it.
aos_at_dg: This is if we want to start the pass at a specific elevation.
The next pass with a LOS strictly after when_utc will be returned,
possibly the current pass.
"""
if when_utc is None:
when_utc = dt.datetime.utcnow()
for pass_ in self.passes_over(location, when_utc, limit_date,
max_elevation_gt=max_elevation_gt,
aos_at_dg=aos_at_dg):
return pass_
else:
raise NotReachable('Propagation limit date exceeded')
def eclipses_since(self, when_utc=None, limit_date=None):
"""
An iterator that yields all eclipses start and end times between
when_utc and limit_date.
The next eclipse with a end strictly after when_utc will be returned,
possibly the current eclipse.
The last eclipse returned starts before limit_date, but it can end
strictly after limit_date.
No circular orbits are not supported, and will raise NotImplementedError.
"""
def _get_illumination(t):
my_start = start + dt.timedelta(seconds=t)
result = get_satellite_minus_penumbra_verticals(
self.get_only_position(my_start),
my_start
)
return result
if when_utc is None:
when_utc = dt.datetime.utcnow()
orbital_period_s = self.period * 60
# A third of the orbit period is used as the base window of the search.
# This window ensures the function get_satellite_minus_penumbra_verticals
# will not have more than one local minimum (one in the illuminated phase and
# the other in penumbra).
base_search_window_s = orbital_period_s / 3
start = when_utc
while limit_date is None or start < limit_date:
# a minimum negative value is aproximatelly the middle point of the eclipse
minimum_illumination = minimize_scalar(
_get_illumination,
bounds=(0, base_search_window_s),
method="bounded",
options={"xatol": 1e-2},
)
eclipse_center_candidate_delta_s = minimum_illumination.x
# If found a minimum that is not illuminated, there is an eclipse here
if _get_illumination(eclipse_center_candidate_delta_s) < 0:
# The small time interval to search zeros around the center
# is estimated with the expected eclipse duration (which generally
# is smaller than expected, and that is the reason of the 1.5 coeficient).
# Also a minimum of 180 seconds was added because
# in some cases the estimation is 0 even though there is an eclipse.
eclipse_duration_estimation_s = self.get_eclipse_duration(start) * 60
zero_search_window_s = max(180, 1.5 * eclipse_duration_estimation_s)
# Search now both zeros to get the start and end of the eclipse
eclipse_start_delta_s = brentq(
_get_illumination,
eclipse_center_candidate_delta_s - zero_search_window_s,
eclipse_center_candidate_delta_s,
xtol=1e-2,
full_output=False,
)
eclipse_end_delta_s = brentq(
_get_illumination,
eclipse_center_candidate_delta_s,
eclipse_center_candidate_delta_s + zero_search_window_s,
xtol=1e-2,
full_output=False,
)
eclipse_start = start + dt.timedelta(seconds=eclipse_start_delta_s)
eclipse_end = start + dt.timedelta(seconds=eclipse_end_delta_s)
yield eclipse_start, eclipse_end
start = eclipse_end + dt.timedelta(seconds=base_search_window_s)
else:
start += dt.timedelta(seconds=base_search_window_s)
class GPSPredictor(Predictor):
pass
class LocationPredictor:
"""Predicts passes over a given location
Exposes an iterable interface
"""
def __init__(self, location, predictor, start_date, limit_date=None,
max_elevation_gt=0, aos_at_dg=0, *, propagator=None):
if propagator is not None:
warnings.warn(
"propagator parameter was renamed to predictor "
"and will be removed in a future release",
DeprecationWarning
)
predictor = propagator
self.location = location
self.predictor = predictor
self.start_date = start_date
self.limit_date = limit_date
self.max_elevation_gt = radians(max([max_elevation_gt, aos_at_dg]))
self.aos_at = radians(aos_at_dg)
@property
def propagator(self):
warnings.warn(
"propagator parameter was renamed to predictor "
"and will be removed in a future release",
DeprecationWarning
)
return self.predictor
def __iter__(self):
"""Returns one pass each time"""
current_date = self.start_date
while True:
if self.is_ascending(current_date):
# we need a descending point
ascending_date = current_date
descending_date = self._find_nearest_descending(ascending_date)
pass_ = self._refine_pass(ascending_date, descending_date)
if pass_.valid:
if self.limit_date is not None and pass_.aos > self.limit_date:
break
yield self._build_predicted_pass(pass_)
if self.limit_date is not None and current_date > self.limit_date:
break
current_date = pass_.tca + self._orbit_step(0.6)
else:
current_date = self._find_nearest_ascending(current_date)
def _build_predicted_pass(self, accuratepass):
"""Returns a classic predicted pass"""
tca_position = self.predictor.get_position(accuratepass.tca)
return PredictedPass(self.location, self.predictor.sate_id,
max_elevation_deg=accuratepass.max_elevation_deg,
aos=accuratepass.aos,
los=accuratepass.los,
duration_s=accuratepass.duration.total_seconds(),
max_elevation_position=tca_position,
max_elevation_date=accuratepass.tca,
)
def _find_nearest_descending(self, ascending_date):
for candidate in self._sample_points(ascending_date):
if not self.is_ascending(candidate):
return candidate
else:
logger.error('Could not find a descending pass over %s start date: %s - TLE: %s',
self.location, ascending_date, self.predictor.tle)
raise PropagationError("Can not find an descending phase")
def _find_nearest_ascending(self, descending_date):
for candidate in self._sample_points(descending_date):
if self.is_ascending(candidate):
return candidate
else:
logger.error('Could not find an ascending pass over %s start date: %s - TLE: %s',
self.location, descending_date, self.predictor.tle)
raise PropagationError('Can not find an ascending phase')
def _sample_points(self, date):
"""Helper method to found ascending or descending phases of elevation"""
start = date
end = date + self._orbit_step(0.99)
mid = self.midpoint(start, end)
mid_right = self.midpoint(mid, end)
mid_left = self.midpoint(start, mid)
return [end, mid, mid_right, mid_left]
def _refine_pass(self, ascending_date, descending_date):
tca = self._find_tca(ascending_date, descending_date)
elevation = self._elevation_at(tca)
if elevation > self.max_elevation_gt:
aos = self._find_aos(tca)
los = self._find_los(tca)
else:
aos = los = None
return AccuratePredictedPass(aos, tca, los, elevation)
def _find_tca(self, ascending_date, descending_date):
while not self._precision_reached(ascending_date, descending_date):
midpoint = self.midpoint(ascending_date, descending_date)
if self.is_ascending(midpoint):
ascending_date = midpoint
else:
descending_date = midpoint
return ascending_date
def _precision_reached(self, start, end):
# TODO: Allow the precision to change from the outside
return end - start <= ONE_SECOND
@staticmethod
def midpoint(start, end):
"""Returns the midpoint between two dates"""
return start + (end - start) / 2
def _elevation_at(self, when_utc):
position = self.predictor.get_only_position(when_utc)
return self.location.elevation_for(position)
def is_passing(self, when_utc):
"""Returns a boolean indicating if satellite is actually visible"""
return bool(self._elevation_at(when_utc))
def is_ascending(self, when_utc):
"""Check is elevation is ascending or descending on a given point"""
elevation = self._elevation_at(when_utc)
next_elevation = self._elevation_at(when_utc + ONE_SECOND)
return elevation <= next_elevation
def _orbit_step(self, size):
"""Returns a time step, that will make the satellite advance a given number of orbits"""
step_in_radians = size * 2 * pi
seconds = (step_in_radians / self.predictor.mean_motion) * 60
return dt.timedelta(seconds=seconds)
def _find_aos(self, tca):
end = tca
start = tca - self._orbit_step(0.34) # On third of the orbit
elevation = self._elevation_at(start)
assert elevation < 0
while not self._precision_reached(start, end):
midpoint = self.midpoint(start, end)
elevation = self._elevation_at(midpoint)
if elevation < self.aos_at:
start = midpoint
else:
end = midpoint
return end
def _find_los(self, tca):
start = tca
end = tca + self._orbit_step(0.34)
while not self._precision_reached(start, end):
midpoint = self.midpoint(start, end)
elevation = self._elevation_at(midpoint)
if elevation < self.aos_at:
end = midpoint
else:
start = midpoint
return start
class AccuratePredictedPass:
def __init__(self, aos, tca, los, max_elevation):
self.aos = round_datetime(aos) if aos is not None else None
self.tca = round_datetime(tca)
self.los = round_datetime(los) if los is not None else None
self.max_elevation = max_elevation
@property
def valid(self):
return self.max_elevation > 0 and self.aos is not None and self.los is not None
@reify
def max_elevation_deg(self):
return degrees(self.max_elevation)
@reify
def duration(self):
return self.los - self.aos
| [((1935, 1962), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1952, 1962), False, 'import logging\n'), ((1978, 2001), 'datetime.timedelta', 'dt.timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (1990, 2001), True, 'import datetime as dt\n'), ((2061, 2153), 'collections.namedtuple', 'namedtuple', (['"""Position"""', "['when_utc', 'position_ecef', 'velocity_ecef', 'error_estimate']"], {}), "('Position', ['when_utc', 'position_ecef', 'velocity_ecef',\n 'error_estimate'])\n", (2071, 2153), False, 'from collections import namedtuple\n'), ((1345, 1450), 'warnings.warn', 'warnings.warn', (['"""scipy module was not found, some features may not work properly."""', 'ImportWarning'], {}), "(\n 'scipy module was not found, some features may not work properly.',\n ImportWarning)\n", (1358, 1450), False, 'import warnings\n'), ((2278, 2328), 'orbit_predictor.coordinate_systems.ecef_to_llh', 'coordinate_systems.ecef_to_llh', (['self.position_ecef'], {}), '(self.position_ecef)\n', (2308, 2328), False, 'from orbit_predictor import coordinate_systems\n'), ((2633, 2668), 'orbit_predictor.utils.gstime_from_datetime', 'gstime_from_datetime', (['self.when_utc'], {}), '(self.when_utc)\n', (2653, 2668), False, 'from orbit_predictor.utils import angle_between, cross_product, dot_product, reify, vector_diff, vector_norm, gstime_from_datetime, get_shadow, get_sun, eclipse_duration, get_satellite_minus_penumbra_verticals\n'), ((2692, 2748), 'orbit_predictor.coordinate_systems.ecef_to_eci', 'coordinate_systems.ecef_to_eci', (['self.position_ecef', 'gmst'], {}), '(self.position_ecef, gmst)\n', (2722, 2748), False, 'from orbit_predictor import coordinate_systems\n'), ((2772, 2828), 'orbit_predictor.coordinate_systems.ecef_to_eci', 'coordinate_systems.ecef_to_eci', (['self.velocity_ecef', 'gmst'], {}), '(self.velocity_ecef, gmst)\n', (2802, 2828), False, 'from orbit_predictor import coordinate_systems\n'), ((4603, 4666), 'warnings.warn', 'warnings.warn', (['"""This method is deprecated!"""', 'DeprecationWarning'], {}), "('This method is deprecated!', DeprecationWarning)\n", (4616, 4666), False, 'import warnings\n'), ((5598, 5627), 'orbit_predictor.utils.vector_diff', 'vector_diff', (['sate_pos', 'target'], {}), '(sate_pos, target)\n', (5609, 5627), False, 'from orbit_predictor.utils import angle_between, cross_product, dot_product, reify, vector_diff, vector_norm, gstime_from_datetime, get_shadow, get_sun, eclipse_duration, get_satellite_minus_penumbra_verticals\n'), ((5761, 5792), 'orbit_predictor.utils.cross_product', 'cross_product', (['target', 'sate_pos'], {}), '(target, sate_pos)\n', (5774, 5792), False, 'from orbit_predictor.utils import angle_between, cross_product, dot_product, reify, vector_diff, vector_norm, gstime_from_datetime, get_shadow, get_sun, eclipse_duration, get_satellite_minus_penumbra_verticals\n'), ((5807, 5835), 'orbit_predictor.utils.dot_product', 'dot_product', (['cross', 'sate_vel'], {}), '(cross, sate_vel)\n', (5818, 5835), False, 'from orbit_predictor.utils import angle_between, cross_product, dot_product, reify, vector_diff, vector_norm, gstime_from_datetime, get_shadow, get_sun, eclipse_duration, get_satellite_minus_penumbra_verticals\n'), ((6877, 6905), 'numpy.cross', 'np.cross', (['position', 'velocity'], {}), '(position, velocity)\n', (6885, 6905), True, 'import numpy as np\n'), ((7868, 7898), 'orbit_predictor.utils.gstime_from_datetime', 'gstime_from_datetime', (['when_utc'], {}), '(when_utc)\n', (7888, 7898), False, 'from orbit_predictor.utils import angle_between, cross_product, dot_product, reify, vector_diff, vector_norm, gstime_from_datetime, get_shadow, get_sun, eclipse_duration, get_satellite_minus_penumbra_verticals\n'), ((7923, 7973), 'orbit_predictor.coordinate_systems.eci_to_ecef', 'coordinate_systems.eci_to_ecef', (['position_eci', 'gmst'], {}), '(position_eci, gmst)\n', (7953, 7973), False, 'from orbit_predictor import coordinate_systems\n'), ((7998, 8048), 'orbit_predictor.coordinate_systems.eci_to_ecef', 'coordinate_systems.eci_to_ecef', (['velocity_eci', 'gmst'], {}), '(velocity_eci, gmst)\n', (8028, 8048), False, 'from orbit_predictor import coordinate_systems\n'), ((9253, 9288), 'orbit_predictor.utils.eclipse_duration', 'eclipse_duration', (['beta', 'self.period'], {}), '(beta, self.period)\n', (9269, 9288), False, 'from orbit_predictor.utils import angle_between, cross_product, dot_product, reify, vector_diff, vector_norm, gstime_from_datetime, get_shadow, get_sun, eclipse_duration, get_satellite_minus_penumbra_verticals\n'), ((14855, 14873), 'math.radians', 'radians', (['aos_at_dg'], {}), '(aos_at_dg)\n', (14862, 14873), False, 'from math import pi, acos, degrees, radians\n'), ((14923, 15055), 'warnings.warn', 'warnings.warn', (['"""propagator parameter was renamed to predictor and will be removed in a future release"""', 'DeprecationWarning'], {}), "(\n 'propagator parameter was renamed to predictor and will be removed in a future release'\n , DeprecationWarning)\n", (14936, 15055), False, 'import warnings\n'), ((19872, 19901), 'datetime.timedelta', 'dt.timedelta', ([], {'seconds': 'seconds'}), '(seconds=seconds)\n', (19884, 19901), True, 'import datetime as dt\n'), ((21278, 21305), 'math.degrees', 'degrees', (['self.max_elevation'], {}), '(self.max_elevation)\n', (21285, 21305), False, 'from math import pi, acos, degrees, radians\n'), ((2954, 2976), 'numpy.array', 'np.array', (['position_eci'], {}), '(position_eci)\n', (2962, 2976), True, 'import numpy as np\n'), ((2978, 3000), 'numpy.array', 'np.array', (['velocity_eci'], {}), '(velocity_eci)\n', (2986, 3000), True, 'import numpy as np\n'), ((3122, 3134), 'math.degrees', 'degrees', (['inc'], {}), '(inc)\n', (3129, 3134), False, 'from math import pi, acos, degrees, radians\n'), ((3136, 3149), 'math.degrees', 'degrees', (['raan'], {}), '(raan)\n', (3143, 3149), False, 'from math import pi, acos, degrees, radians\n'), ((3151, 3164), 'math.degrees', 'degrees', (['argp'], {}), '(argp)\n', (3158, 3164), False, 'from math import pi, acos, degrees, radians\n'), ((3166, 3177), 'math.degrees', 'degrees', (['ta'], {}), '(ta)\n', (3173, 3177), False, 'from math import pi, acos, degrees, radians\n'), ((5954, 5968), 'math.degrees', 'degrees', (['angle'], {}), '(angle)\n', (5961, 5968), False, 'from math import pi, acos, degrees, radians\n'), ((6444, 6464), 'datetime.datetime.utcnow', 'dt.datetime.utcnow', ([], {}), '()\n', (6462, 6464), True, 'import datetime as dt\n'), ((6766, 6786), 'datetime.datetime.utcnow', 'dt.datetime.utcnow', ([], {}), '()\n', (6784, 6786), True, 'import datetime as dt\n'), ((6944, 6977), 'orbit_predictor.utils.vector_norm', 'vector_norm', (['orbital_plane_normal'], {}), '(orbital_plane_normal)\n', (6955, 6977), False, 'from orbit_predictor.utils import angle_between, cross_product, dot_product, reify, vector_diff, vector_norm, gstime_from_datetime, get_shadow, get_sun, eclipse_duration, get_satellite_minus_penumbra_verticals\n'), ((7169, 7189), 'datetime.datetime.utcnow', 'dt.datetime.utcnow', ([], {}), '()\n', (7187, 7189), True, 'import datetime as dt\n'), ((7363, 7380), 'orbit_predictor.utils.get_sun', 'get_sun', (['when_utc'], {}), '(when_utc)\n', (7370, 7380), False, 'from orbit_predictor.utils import angle_between, cross_product, dot_product, reify, vector_diff, vector_norm, gstime_from_datetime, get_shadow, get_sun, eclipse_duration, get_satellite_minus_penumbra_verticals\n'), ((7765, 7785), 'datetime.datetime.utcnow', 'dt.datetime.utcnow', ([], {}), '()\n', (7783, 7785), True, 'import datetime as dt\n'), ((8496, 8516), 'datetime.datetime.utcnow', 'dt.datetime.utcnow', ([], {}), '()\n', (8514, 8516), True, 'import datetime as dt\n'), ((10226, 10246), 'datetime.datetime.utcnow', 'dt.datetime.utcnow', ([], {}), '()\n', (10244, 10246), True, 'import datetime as dt\n'), ((10508, 10555), 'orbit_predictor.exceptions.NotReachable', 'NotReachable', (['"""Propagation limit date exceeded"""'], {}), "('Propagation limit date exceeded')\n", (10520, 10555), False, 'from orbit_predictor.exceptions import NotReachable, PropagationError\n'), ((11376, 11396), 'datetime.datetime.utcnow', 'dt.datetime.utcnow', ([], {}), '()\n', (11394, 11396), True, 'import datetime as dt\n'), ((11982, 12098), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['_get_illumination'], {'bounds': '(0, base_search_window_s)', 'method': '"""bounded"""', 'options': "{'xatol': 0.01}"}), "(_get_illumination, bounds=(0, base_search_window_s), method\n ='bounded', options={'xatol': 0.01})\n", (11997, 12098), False, 'from scipy.optimize import brentq, minimize_scalar\n'), ((14390, 14522), 'warnings.warn', 'warnings.warn', (['"""propagator parameter was renamed to predictor and will be removed in a future release"""', 'DeprecationWarning'], {}), "(\n 'propagator parameter was renamed to predictor and will be removed in a future release'\n , DeprecationWarning)\n", (14403, 14522), False, 'import warnings\n'), ((17082, 17134), 'orbit_predictor.exceptions.PropagationError', 'PropagationError', (['"""Can not find an descending phase"""'], {}), "('Can not find an descending phase')\n", (17098, 17134), False, 'from orbit_predictor.exceptions import NotReachable, PropagationError\n'), ((17536, 17587), 'orbit_predictor.exceptions.PropagationError', 'PropagationError', (['"""Can not find an ascending phase"""'], {}), "('Can not find an ascending phase')\n", (17552, 17587), False, 'from orbit_predictor.exceptions import NotReachable, PropagationError\n'), ((5662, 5688), 'orbit_predictor.utils.dot_product', 'dot_product', (['sate_pos', 't2b'], {}), '(sate_pos, t2b)\n', (5673, 5688), False, 'from orbit_predictor.utils import angle_between, cross_product, dot_product, reify, vector_diff, vector_norm, gstime_from_datetime, get_shadow, get_sun, eclipse_duration, get_satellite_minus_penumbra_verticals\n'), ((11123, 11146), 'datetime.timedelta', 'dt.timedelta', ([], {'seconds': 't'}), '(seconds=t)\n', (11135, 11146), True, 'import datetime as dt\n'), ((13092, 13246), 'scipy.optimize.brentq', 'brentq', (['_get_illumination', '(eclipse_center_candidate_delta_s - zero_search_window_s)', 'eclipse_center_candidate_delta_s'], {'xtol': '(0.01)', 'full_output': '(False)'}), '(_get_illumination, eclipse_center_candidate_delta_s -\n zero_search_window_s, eclipse_center_candidate_delta_s, xtol=0.01,\n full_output=False)\n', (13098, 13246), False, 'from scipy.optimize import brentq, minimize_scalar\n'), ((13396, 13551), 'scipy.optimize.brentq', 'brentq', (['_get_illumination', 'eclipse_center_candidate_delta_s', '(eclipse_center_candidate_delta_s + zero_search_window_s)'], {'xtol': '(0.01)', 'full_output': '(False)'}), '(_get_illumination, eclipse_center_candidate_delta_s, \n eclipse_center_candidate_delta_s + zero_search_window_s, xtol=0.01,\n full_output=False)\n', (13402, 13551), False, 'from scipy.optimize import brentq, minimize_scalar\n'), ((13999, 14041), 'datetime.timedelta', 'dt.timedelta', ([], {'seconds': 'base_search_window_s'}), '(seconds=base_search_window_s)\n', (14011, 14041), True, 'import datetime as dt\n'), ((5692, 5713), 'orbit_predictor.utils.vector_norm', 'vector_norm', (['sate_pos'], {}), '(sate_pos)\n', (5703, 5713), False, 'from orbit_predictor.utils import angle_between, cross_product, dot_product, reify, vector_diff, vector_norm, gstime_from_datetime, get_shadow, get_sun, eclipse_duration, get_satellite_minus_penumbra_verticals\n'), ((5716, 5732), 'orbit_predictor.utils.vector_norm', 'vector_norm', (['t2b'], {}), '(t2b)\n', (5727, 5732), False, 'from orbit_predictor.utils import angle_between, cross_product, dot_product, reify, vector_diff, vector_norm, gstime_from_datetime, get_shadow, get_sun, eclipse_duration, get_satellite_minus_penumbra_verticals\n'), ((13702, 13745), 'datetime.timedelta', 'dt.timedelta', ([], {'seconds': 'eclipse_start_delta_s'}), '(seconds=eclipse_start_delta_s)\n', (13714, 13745), True, 'import datetime as dt\n'), ((13784, 13825), 'datetime.timedelta', 'dt.timedelta', ([], {'seconds': 'eclipse_end_delta_s'}), '(seconds=eclipse_end_delta_s)\n', (13796, 13825), True, 'import datetime as dt\n'), ((13913, 13955), 'datetime.timedelta', 'dt.timedelta', ([], {'seconds': 'base_search_window_s'}), '(seconds=base_search_window_s)\n', (13925, 13955), True, 'import datetime as dt\n')] |
jbdel/vilmedic | vilmedic/scorers/NLG/__init__.py | 17d462a540a2632811cc2a78edd2861800a33b07 | from .rouge import ROUGEScorer
from .bleu.bleu import BLEUScorer
from .meteor.meteor import METEORScorer
from .cider.cider import Cider
from .ciderd.ciderd import CiderD
| [] |
Yshuo-Li/mmediting-test | tests/test_liif.py | ff8349a183b3d266495a53be0c8ad8e342e8b461 | import numpy as np
import torch
import torch.nn as nn
from mmcv.runner import obj_from_dict
from mmcv.utils.config import Config
from mmedit.models import build_model
from mmedit.models.losses import L1Loss
from mmedit.models.registry import COMPONENTS
@COMPONENTS.register_module()
class BP(nn.Module):
"""A simple BP network for testing LIIF.
Args:
in_dim (int): Input dimension.
out_dim (int): Output dimension.
"""
def __init__(self, in_dim, out_dim):
super().__init__()
self.layer = nn.Linear(in_dim, out_dim)
def forward(self, x):
shape = x.shape[:-1]
x = self.layer(x.view(-1, x.shape[-1]))
return x.view(*shape, -1)
def test_liif():
model_cfg = dict(
type='LIIF',
generator=dict(
type='EDSR',
in_channels=3,
out_channels=3,
mid_channels=8,
num_blocks=1),
imnet=dict(type='BP', in_dim=8, out_dim=3),
local_ensemble=True,
feat_unfold=True,
cell_decode=True,
rgb_mean=(0.4488, 0.4371, 0.4040),
rgb_std=(1., 1., 1.),
eval_bsize=30000,
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'))
scale_max = 4
train_cfg = None
test_cfg = Config(dict(metrics=['PSNR', 'SSIM'], crop_border=scale_max))
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'LIIF'
assert isinstance(restorer.imnet, BP)
assert isinstance(restorer.pixel_loss, L1Loss)
# prepare data
inputs = torch.rand(1, 3, 22, 11)
targets = torch.rand(1, 128 * 64, 3)
coord = torch.rand(1, 128 * 64, 2)
cell = torch.rand(1, 128 * 64, 2)
data_batch = {'lq': inputs, 'gt': targets, 'coord': coord, 'cell': cell}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))
optimizer = obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
# test train_step and forward_test (cpu)
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert outputs['results']['lq'].shape == data_batch['lq'].shape
assert outputs['results']['gt'].shape == data_batch['gt'].shape
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 128 * 64, 3)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
data_batch = {
'lq': inputs.cuda(),
'gt': targets.cuda(),
'coord': coord.cuda(),
'cell': cell.cuda()
}
# train_step
optimizer = obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert outputs['results']['lq'].shape == data_batch['lq'].shape
assert outputs['results']['gt'].shape == data_batch['gt'].shape
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 128 * 64, 3)
# val_step
result = restorer.val_step(data_batch, meta=[{'gt_path': ''}])
assert isinstance(result, dict)
assert isinstance(result['eval_result'], dict)
assert result['eval_result'].keys() == set({'PSNR', 'SSIM'})
assert isinstance(result['eval_result']['PSNR'], np.float64)
assert isinstance(result['eval_result']['SSIM'], np.float64)
| [((257, 285), 'mmedit.models.registry.COMPONENTS.register_module', 'COMPONENTS.register_module', ([], {}), '()\n', (283, 285), False, 'from mmedit.models.registry import COMPONENTS\n'), ((1389, 1451), 'mmedit.models.build_model', 'build_model', (['model_cfg'], {'train_cfg': 'train_cfg', 'test_cfg': 'test_cfg'}), '(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)\n', (1400, 1451), False, 'from mmedit.models import build_model\n'), ((1650, 1674), 'torch.rand', 'torch.rand', (['(1)', '(3)', '(22)', '(11)'], {}), '(1, 3, 22, 11)\n', (1660, 1674), False, 'import torch\n'), ((1689, 1715), 'torch.rand', 'torch.rand', (['(1)', '(128 * 64)', '(3)'], {}), '(1, 128 * 64, 3)\n', (1699, 1715), False, 'import torch\n'), ((1728, 1754), 'torch.rand', 'torch.rand', (['(1)', '(128 * 64)', '(2)'], {}), '(1, 128 * 64, 2)\n', (1738, 1754), False, 'import torch\n'), ((1766, 1792), 'torch.rand', 'torch.rand', (['(1)', '(128 * 64)', '(2)'], {}), '(1, 128 * 64, 2)\n', (1776, 1792), False, 'import torch\n'), ((2515, 2560), 'torch.is_tensor', 'torch.is_tensor', (["outputs['results']['output']"], {}), "(outputs['results']['output'])\n", (2530, 2560), False, 'import torch\n'), ((2681, 2706), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2704, 2706), False, 'import torch\n'), ((541, 567), 'torch.nn.Linear', 'nn.Linear', (['in_dim', 'out_dim'], {}), '(in_dim, out_dim)\n', (550, 567), True, 'import torch.nn as nn\n'), ((3483, 3528), 'torch.is_tensor', 'torch.is_tensor', (["outputs['results']['output']"], {}), "(outputs['results']['output'])\n", (3498, 3528), False, 'import torch\n')] |
ccraddock/beiwe-backend-cc | database/signals.py | b37c2604800aafcf81c93bc14673ada6aed17a39 |
from django.utils import timezone
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from database.study_models import DeviceSettings, Study, Survey, SurveyArchive
@receiver(post_save, sender=Study)
def populate_study_device_settings(sender, **kwargs):
"""
Ensure that every newly created Study object has a DeviceSettings object. This essentially
makes the OneToOneField have null=False in both directions.
"""
my_study = kwargs['instance']
if kwargs['created'] and not hasattr(my_study, 'device_settings'):
# If my_study has just been created and doesn't have a DeviceSettings
# attached to it, create one with the default parameters.
DeviceSettings.objects.create(study=my_study)
@receiver(pre_save, sender=Survey)
def create_survey_archive(sender, **kwargs):
"""
Ensure that every time a Survey is edited, a SurveyArchive (SA) is stored which holds the
current contents of the Survey before saving, as well as a pair of timestamps marking the
time range over which the SA applies.
"""
# The Survey instance being passed has the updated contents of the Survey. To get
# the preexisting contents of the Survey, make a database call using the passed
# instance's primary key. If we get an ObjectDoesNotExist error short-circuit because
# that means it is the initial save operation.
my_survey_plus_updates = kwargs['instance']
try:
my_survey = Survey.objects.get(pk=my_survey_plus_updates.pk)
except ObjectDoesNotExist:
return
# All fields present in AbstractSurvey, plus the study foreign key which is
# separately present in Survey and SurveyArchive.
survey_fields = [f.name for f in super(Survey, my_survey)._meta.fields]
survey_fields.append('study_id')
# Prepare a new archive containing the archive-specific information
new_archive = SurveyArchive(survey=my_survey, archive_start=my_survey.last_updated)
try:
# Get the most recent archive for this Survey, to check whether the Survey has been edited
last_archive = my_survey.archives.latest('archive_end')
except SurveyArchive.DoesNotExist:
survey_dirty = True # If there is no previous archive, we automatically make a new one
else:
survey_dirty = False
for shared_field in survey_fields:
# Update all of the shared fields in the archive to have the original survey's values
if shared_field == 'name':
setattr(new_archive, shared_field, '{0} {1}'.format(getattr(my_survey, shared_field), timezone.now().isoformat()))
else:
setattr(new_archive, shared_field, getattr(my_survey, shared_field))
if not survey_dirty and getattr(my_survey, shared_field) != getattr(last_archive, shared_field):
# If the survey has been edited since the last archive was made, mark the survey as
# dirty. This tells us that we have to make a new archive object.
survey_dirty = True
if survey_dirty:
# If the survey has been edited, save the new archive. This automatically sets the
# archive_end field to be the current time.
new_archive.save()
else:
# If the survey has not been edited, we don't save the new archive. Update the
# previous archive to extend to the current time. Note that object.update saves the
# object, unlike QuerySet.update. See base_models.AbstractModel for details.
last_archive.update(archive_end=timezone.now())
| [((275, 308), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'Study'}), '(post_save, sender=Study)\n', (283, 308), False, 'from django.dispatch import receiver\n'), ((859, 892), 'django.dispatch.receiver', 'receiver', (['pre_save'], {'sender': 'Survey'}), '(pre_save, sender=Survey)\n', (867, 892), False, 'from django.dispatch import receiver\n'), ((2043, 2112), 'database.study_models.SurveyArchive', 'SurveyArchive', ([], {'survey': 'my_survey', 'archive_start': 'my_survey.last_updated'}), '(survey=my_survey, archive_start=my_survey.last_updated)\n', (2056, 2112), False, 'from database.study_models import DeviceSettings, Study, Survey, SurveyArchive\n'), ((807, 852), 'database.study_models.DeviceSettings.objects.create', 'DeviceSettings.objects.create', ([], {'study': 'my_study'}), '(study=my_study)\n', (836, 852), False, 'from database.study_models import DeviceSettings, Study, Survey, SurveyArchive\n'), ((1591, 1639), 'database.study_models.Survey.objects.get', 'Survey.objects.get', ([], {'pk': 'my_survey_plus_updates.pk'}), '(pk=my_survey_plus_updates.pk)\n', (1609, 1639), False, 'from database.study_models import DeviceSettings, Study, Survey, SurveyArchive\n'), ((3722, 3736), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (3734, 3736), False, 'from django.utils import timezone\n'), ((2750, 2764), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2762, 2764), False, 'from django.utils import timezone\n')] |
Blakstar26/npyscreen | docs/examples/notify/notify_skeleton.py | d47f9c78dc9fea6f66aaef60403e748bb89e52f7 | import npyscreen
class NotifyBaseExample(npyscreen.Form):
def create(self):
key_of_choice = 'p'
what_to_display = 'Press {} for popup \n Press escape key to quit'.format(key_of_choice)
self.how_exited_handers[npyscreen.wgwidget.EXITED_ESCAPE] = self.exit_application
self.add(npyscreen.FixedText, value=what_to_display)
def exit_application(self):
self.parentApp.setNextForm(None)
self.editing = False
class MyApplication(npyscreen.NPSAppManaged):
def onStart(self):
self.addForm('MAIN', NotifyBaseExample, name='To be improved upon')
if __name__ == '__main__':
TestApp = MyApplication().run() | [] |
romanroson/pis_code | practicioner_bundle/ch15-neural_style/pyimagesearch/nn/conv/minigooglenet.py | 1221c39c23bec62ba419f9a324f88b0d8e5e4b5b | # -*- coding: utf-8 -*-
"""Implementation of MiniGoogLeNet architecture.
This implementation is based on the original implemetation of GoogLeNet.
The authors of the net used BN before Activation layer.
This should be switched.
"""
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import AveragePooling2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Dropout
from keras.layers.core import Dense
from keras.layers import Flatten
from keras.layers import Input
from keras.models import Model
from keras.layers import concatenate
from keras import backend as K
class MiniGoogLeNet:
"""Implementation of MiniGoogLeNet architecture
"""
@staticmethod
def conv_module(x, filter_num, filter_x_size, filter_y_size, stride, chanel_dim, padding="same"):
"""Define conv layer
Arguments:
x {Tensor} -- input layer to the function
filter_num {int} -- number of filters our CONV layer is going to learn
filter_x_size {int} -- x-size of each of the filter_num filters that will be learned
filter_y_size {int} -- y-size of each of the filter_num filters that will be learned
stride {int} -- stride of the CONV layer
chanel_dim {int} -- channel dimension, derived from “channels last” or “channels first”
Keyword Arguments:
padding {str} -- type of padding to be applied to the CONV layer (default: {"same"})
Returns:
Tensor -- convolutional module
"""
# define a CONV => BN => RELU pattern
x = Conv2D(filter_num, (filter_x_size, filter_y_size), strides=stride, padding=padding)(x)
x = BatchNormalization(axis=chanel_dim)(x)
x = Activation("relu")(x)
# return the block
return x
@staticmethod
def inception_module(x, numK1x1, numK3x3, chanel_dim): # pylint: disable=invalid-name
"""Define inception module
Arguments:
x {Tensor} -- input layer
numK1x1 {int} -- number of 1x1 filters
numK3x3 {int} -- number of 3x3 filters
chanel_dim {int} -- channel dimension, derived from “channels last” or “channels first”
Returns:
Tensor -- inception module
"""
# define two CONV modules, then concatenate across the channel dimension
conv_1x1 = MiniGoogLeNet.conv_module(x, numK1x1, 1, 1, (1, 1), chanel_dim)
conv_3x3 = MiniGoogLeNet.conv_module(x, numK3x3, 3, 3, (1, 1), chanel_dim)
x = concatenate([conv_1x1, conv_3x3], axis=chanel_dim)
# return the block
return x
@staticmethod
def downsample_module(x, filter_num, chanel_dim):
"""Define downsample module
Arguments:
x {Tensor} -- input layer
filter_num {int} -- number of filters our CONV layer is going to learn
chanel_dim {int} -- channel dimension, derived from “channels last” or “channels first”
Returns:
Tensor -- downsample module
"""
# define the CONV module and POOL, then concatenate across the channel dimensions
conv_3x3 = MiniGoogLeNet.conv_module(x, filter_num, 3, 3, (2, 2), chanel_dim, padding="valid")
pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = concatenate([conv_3x3, pool], axis=chanel_dim)
# return the block
return x
@staticmethod
def build(width, height, depth, classes):
"""Build MiniGoogLeNet architecture
Arguments:
width {int} -- [description]
height {int} -- [description]
depth {int} -- [description]
classes {int} -- [description]
Returns:
obj -- MiniGoogLeNet model
"""
# initialize the input shape to be "channels last" and the channels dimension itself
input_shape = (height, width, depth)
chanel_dim = -1
# if we are using "channels first", update the input shape and channels dimension
if K.image_data_format() == "channels_first":
input_shape = (depth, height, width)
chanel_dim = 1
# define the model input and first CONV module
inputs = Input(shape=input_shape)
x = MiniGoogLeNet.conv_module(inputs, 96, 3, 3, (1, 1), chanel_dim)
# two Inception modules followed by a downsample module
x = MiniGoogLeNet.inception_module(x, 32, 32, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 32, 48, chanel_dim)
x = MiniGoogLeNet.downsample_module(x, 80, chanel_dim)
# four Inception modules followed by a downsample module
x = MiniGoogLeNet.inception_module(x, 112, 48, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 96, 64, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 80, 80, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 48, 96, chanel_dim)
x = MiniGoogLeNet.downsample_module(x, 96, chanel_dim)
# two Inception modules followed by global POOL and dropout
x = MiniGoogLeNet.inception_module(x, 176, 160, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 176, 160, chanel_dim)
x = AveragePooling2D((7, 7))(x)
x = Dropout(0.5)(x)
# softmax classifier
x = Flatten()(x)
x = Dense(classes)(x)
x = Activation("softmax")(x)
# create the model
model = Model(inputs, x, name="googlenet")
# return the constructed network architecture
return model
| [((2664, 2714), 'keras.layers.concatenate', 'concatenate', (['[conv_1x1, conv_3x3]'], {'axis': 'chanel_dim'}), '([conv_1x1, conv_3x3], axis=chanel_dim)\n', (2675, 2714), False, 'from keras.layers import concatenate\n'), ((3439, 3485), 'keras.layers.concatenate', 'concatenate', (['[conv_3x3, pool]'], {'axis': 'chanel_dim'}), '([conv_3x3, pool], axis=chanel_dim)\n', (3450, 3485), False, 'from keras.layers import concatenate\n'), ((4351, 4375), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (4356, 4375), False, 'from keras.layers import Input\n'), ((5545, 5579), 'keras.models.Model', 'Model', (['inputs', 'x'], {'name': '"""googlenet"""'}), "(inputs, x, name='googlenet')\n", (5550, 5579), False, 'from keras.models import Model\n'), ((1715, 1803), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['filter_num', '(filter_x_size, filter_y_size)'], {'strides': 'stride', 'padding': 'padding'}), '(filter_num, (filter_x_size, filter_y_size), strides=stride, padding=\n padding)\n', (1721, 1803), False, 'from keras.layers.convolutional import Conv2D\n'), ((1814, 1849), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'chanel_dim'}), '(axis=chanel_dim)\n', (1832, 1849), False, 'from keras.layers.normalization import BatchNormalization\n'), ((1865, 1883), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1875, 1883), False, 'from keras.layers.core import Activation\n'), ((3387, 3423), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', (['(3, 3)'], {'strides': '(2, 2)'}), '((3, 3), strides=(2, 2))\n', (3399, 3423), False, 'from keras.layers.convolutional import MaxPooling2D\n'), ((4159, 4180), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (4178, 4180), True, 'from keras import backend as K\n'), ((5323, 5347), 'keras.layers.convolutional.AveragePooling2D', 'AveragePooling2D', (['(7, 7)'], {}), '((7, 7))\n', (5339, 5347), False, 'from keras.layers.convolutional import AveragePooling2D\n'), ((5363, 5375), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (5370, 5375), False, 'from keras.layers.core import Dropout\n'), ((5421, 5430), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5428, 5430), False, 'from keras.layers import Flatten\n'), ((5446, 5460), 'keras.layers.core.Dense', 'Dense', (['classes'], {}), '(classes)\n', (5451, 5460), False, 'from keras.layers.core import Dense\n'), ((5476, 5497), 'keras.layers.core.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (5486, 5497), False, 'from keras.layers.core import Activation\n')] |
PaulDoessel/gaffer-play | python/GafferUI/ScriptEditor.py | 8b72dabb388e12424c230acfb0bd209049b01bd6 | ##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import ast
import sys
import traceback
import IECore
import Gaffer
import GafferUI
QtGui = GafferUI._qtImport( "QtGui" )
QtCore = GafferUI._qtImport( "QtCore" )
## \todo Custom right click menu with script load, save, execute file, undo, redo etc.
## \todo Standard way for users to customise all menus
## \todo Tab completion and popup help. rlcompleter module should be useful for tab completion. Completer( dict ) constructs a completer
# that works in a specific namespace.
class ScriptEditor( GafferUI.EditorWidget ) :
def __init__( self, scriptNode, **kw ) :
self.__splittable = GafferUI.SplitContainer()
GafferUI.EditorWidget.__init__( self, self.__splittable, scriptNode, **kw )
self.__outputWidget = GafferUI.MultiLineTextWidget(
editable = False,
wrapMode = GafferUI.MultiLineTextWidget.WrapMode.None,
role = GafferUI.MultiLineTextWidget.Role.Code,
)
self.__inputWidget = GafferUI.MultiLineTextWidget(
wrapMode = GafferUI.MultiLineTextWidget.WrapMode.None,
role = GafferUI.MultiLineTextWidget.Role.Code,
)
self.__splittable.append( self.__outputWidget )
self.__splittable.append( self.__inputWidget )
self.__inputWidgetActivatedConnection = self.__inputWidget.activatedSignal().connect( Gaffer.WeakMethod( self.__activated ) )
self.__inputWidgetDropTextConnection = self.__inputWidget.dropTextSignal().connect( Gaffer.WeakMethod( self.__dropText ) )
self.__executionDict = {
"IECore" : IECore,
"Gaffer" : Gaffer,
"GafferUI" : GafferUI,
"script" : scriptNode,
"parent" : scriptNode
}
def inputWidget( self ) :
return self.__inputWidget
def execute( self ) :
# decide what to execute
haveSelection = True
toExecute = self.__inputWidget.selectedText()
if not toExecute :
haveSelection = False
toExecute = self.__inputWidget.getText()
# parse it first. this lets us give better error formatting
# for syntax errors, and also figure out whether we can eval()
# and display the result or must exec() only.
try :
parsed = ast.parse( toExecute )
except SyntaxError, e :
self.__outputWidget.appendHTML( self.__syntaxErrorToHTML( e ) )
return
# execute it
self.__outputWidget.appendHTML( self.__codeToHTML( toExecute ) )
with Gaffer.OutputRedirection( stdOut = Gaffer.WeakMethod( self.__redirectOutput ), stdErr = Gaffer.WeakMethod( self.__redirectOutput ) ) :
with _MessageHandler( self.__outputWidget ) :
with Gaffer.UndoContext( self.scriptNode() ) :
with self.getContext() :
try :
if len( parsed.body ) == 1 and isinstance( parsed.body[0], ast.Expr ) :
result = eval( toExecute, self.__executionDict, self.__executionDict )
if result is not None :
self.__outputWidget.appendText( str( result ) )
else :
exec( toExecute, self.__executionDict, self.__executionDict )
if not haveSelection :
self.__inputWidget.setText( "" )
except Exception, e :
self.__outputWidget.appendHTML( self.__exceptionToHTML() )
def __repr__( self ) :
return "GafferUI.ScriptEditor( scriptNode )"
def __activated( self, widget ) :
self.execute()
return True
def __dropText( self, widget, dragData ) :
if isinstance( dragData, IECore.StringVectorData ) :
return repr( list( dragData ) )
elif isinstance( dragData, Gaffer.GraphComponent ) :
if self.scriptNode().isAncestorOf( dragData ) :
return "script['" + dragData.relativeName( self.scriptNode() ).replace( ".", "']['" ) + "']"
elif isinstance( dragData, Gaffer.Set ) :
if len( dragData ) == 1 :
return self.__dropText( widget, dragData[0] )
else :
return "[ " + ", ".join( [ self.__dropText( widget, d ) for d in dragData ] ) + " ]"
elif isinstance( dragData, IECore.Data ) and hasattr( dragData, "value" ) :
return repr( dragData.value )
return None
def __codeToHTML( self, code ) :
code = code.replace( "<", "<" ).replace( ">", ">" )
return "<pre>" + code + "</pre>"
def __syntaxErrorToHTML( self, syntaxError ) :
formatted = traceback.format_exception_only( SyntaxError, syntaxError )
lineNumber = formatted[0].rpartition( "," )[2].strip()
headingText = formatted[-1].replace( ":", " : " + lineNumber + " : ", 1 )
result = "<h1 class='ERROR'>%s</h1>" % headingText
result += "<br>" + self.__codeToHTML( "".join( formatted[1:-1] ) )
return result
def __exceptionToHTML( self ) :
t = traceback.extract_tb( sys.exc_info()[2] )
lineNumber = str( t[1][1] )
headingText = traceback.format_exception_only( *(sys.exc_info()[:2]) )[0].replace( ":", " : line " + lineNumber + " : ", 1 )
result = "<h1 class='ERROR'>%s</h1>" % headingText
if len( t ) > 2 :
result += "<br>" + self.__codeToHTML( "".join( traceback.format_list( t[2:] ) ) )
return result
def __redirectOutput( self, output ) :
if output != "\n" :
self.__outputWidget.appendText( output )
# update the gui so messages are output as they occur, rather than all getting queued
# up till the end.
QtGui.QApplication.instance().processEvents( QtCore.QEventLoop.ExcludeUserInputEvents )
GafferUI.EditorWidget.registerType( "ScriptEditor", ScriptEditor )
class _MessageHandler( IECore.MessageHandler ) :
def __init__( self, textWidget ) :
IECore.MessageHandler.__init__( self )
self.__textWidget = textWidget
def handle( self, level, context, message ) :
html = formatted = "<h1 class='%s'>%s : %s </h1><span class='message'>%s</span><br>" % (
IECore.Msg.levelAsString( level ),
IECore.Msg.levelAsString( level ),
context,
message.replace( "\n", "<br>" )
)
self.__textWidget.appendHTML( html )
# update the gui so messages are output as they occur, rather than all getting queued
# up till the end.
QtGui.QApplication.instance().processEvents( QtCore.QEventLoop.ExcludeUserInputEvents )
| [] |
machinelearningdeveloper/aoc_2016 | 03/triangle.py | e2c2f7909b09c2ad27f87e05a80f2b2feee6a3a2 | """Test whether putative triangles, specified as triples of side lengths,
in fact are possible."""
def load_triangles(filename):
"""Load triangles from filename."""
triangles = []
with open(filename) as f:
for line in f:
if line.strip():
triangles.append(tuple([int(side) for side in line.split()]))
return triangles
def load_triangles_from_cols(filename):
"""Instead of loading one triangle per line,
load one-third each of three triangles per line."""
xs = []
ys = []
zs = []
with open(filename) as f:
for line in f:
if line.strip():
x, y, z = [int(side) for side in line.split()]
xs.append(x)
ys.append(y)
zs.append(z)
return ([(xs[i], xs[i+1], xs[i+2]) for i in range(0, len(xs), 3)]
+ [(ys[i], ys[i+1], ys[i+2]) for i in range(0, len(ys), 3)]
+ [(zs[i], zs[i+1], zs[i+2]) for i in range(0, len(zs), 3)])
def is_possible(*sides):
"""The sum of the lengths of every pair of sides in a, b, c
must be larger than the length of the remaining side,
or the putative triangle is impossible."""
for a in [0, 1]:
for b in range(a + 1, 3):
if a == 0:
c = 1 if b == 2 else 2
elif a == 1:
c = 0
if sum([sides[a], sides[b]]) <= sides[c]:
return False
return True
| [] |
aleksandromelo/Exercicios | ExerciciosdePython/ex049.py | 782ff539efa1286180eaf8df8c25c4eca7a5e669 | num = int(input('Digite um número para ver sua tabuada: '))
for i in range(1, 11):
print('{} x {:2} = {}'.format(num, i, num * i)) | [] |
bkidwell/ebmeta-old | ebmeta/actions/version.py | 2279ddd14235ea31b27f0eaa7e9bb26cb43d4133 | """Print ebmeta version number."""
import sys
import ebmeta
def run():
print "{} {}".format(ebmeta.PROGRAM_NAME, ebmeta.VERSION)
sys.exit(0)
| [] |
amichard/tfrs | backend/api/tests/mixins/credit_trade_relationship.py | ed3973016cc5c2ae48999d550a23b41a5ddad807 | # -*- coding: utf-8 -*-
# pylint: disable=no-member,invalid-name,duplicate-code
"""
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline
compliance reporting for transportation fuel suppliers in accordance with
the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import json
import logging
from typing import Callable
from collections import namedtuple, defaultdict
from enum import Enum
from api.models.CreditTrade import CreditTrade
from api.models.CreditTradeStatus import CreditTradeStatus
class CreditTradeRelationshipMixin(object):
"""
Mixin to provide user mapping for related parties to credit transactions
"""
class UserRelationship(Enum):
"""
Enumerates the ways in which a client (user) can be related to a
credit trade
"""
INITIATOR = 1
RESPONDENT = 2
THIRD_PARTY = 3
GOVERNMENT_ANALYST = 4
GOVERNMENT_DIRECTOR = 5
user_map = {
UserRelationship.INITIATOR: 'fs_user_1',
UserRelationship.RESPONDENT: 'fs_user_2',
UserRelationship.THIRD_PARTY: 'fs_user_3',
UserRelationship.GOVERNMENT_ANALYST: 'gov_analyst',
UserRelationship.GOVERNMENT_DIRECTOR: 'gov_director'
}
class CreditTradeFlowHooksMixin(object):
ChangeRecord = namedtuple('ChangeRecord', [
'trade_id',
'requesting_username',
'relationship',
'expected_to_be_successful',
'data_before_request',
'data_after_request',
'response_code'
])
PreChangeRecord = namedtuple('PreChangeRecord', [
'trade_id',
'current_status',
'rescinded',
'status_change'
])
StatusChange = namedtuple('StatusChange', [
'relationship',
'status',
'rescinded'
])
def _sensible_status_changes(self, current_status, rescinded):
"""
Return a list of valid potential status changes for a given starting
state
"""
status_changes = defaultdict(lambda: [])
status_changes[('Draft', False)] = [
self.StatusChange(self.UserRelationship.INITIATOR,
'Submitted', False),
self.StatusChange(self.UserRelationship.INITIATOR,
'Cancelled', False)
]
status_changes[('Submitted', False)] = [
self.StatusChange(self.UserRelationship.INITIATOR,
'Submitted', True), # rescind
self.StatusChange(self.UserRelationship.RESPONDENT,
'Accepted', False),
self.StatusChange(self.UserRelationship.RESPONDENT,
'Refused', False)
]
status_changes[('Accepted', False)] = [
self.StatusChange(self.UserRelationship.INITIATOR,
'Accepted', True), # rescind
self.StatusChange(self.UserRelationship.RESPONDENT,
'Accepted', True), # rescind
self.StatusChange(self.UserRelationship.GOVERNMENT_ANALYST,
'Recommended', False),
self.StatusChange(self.UserRelationship.GOVERNMENT_ANALYST,
'Not Recommended', False)
]
status_changes[('Recommended', False)] = [
self.StatusChange(self.UserRelationship.INITIATOR,
'Recommended', True), # rescind
self.StatusChange(self.UserRelationship.RESPONDENT,
'Recommended', True), # rescind
self.StatusChange(self.UserRelationship.GOVERNMENT_DIRECTOR,
'Approved', False),
self.StatusChange(self.UserRelationship.GOVERNMENT_DIRECTOR,
'Declined', False)
]
status_changes[('Not Recommended', False)] = [
self.StatusChange(self.UserRelationship.INITIATOR,
'Not Recommended', True), # rescind
self.StatusChange(self.UserRelationship.RESPONDENT,
'Not Recommended', True), # rescind
self.StatusChange(self.UserRelationship.GOVERNMENT_DIRECTOR,
'Approved', False),
self.StatusChange(self.UserRelationship.GOVERNMENT_DIRECTOR,
'Declined', False)
]
return status_changes[(current_status, rescinded)]
def _path_builder(self, node, path=[], valid_paths=[]):
"""
Recursively build an array of valid paths through the status tree
"""
s = self._sensible_status_changes(node.status, node.rescinded)
is_leaf = not s
path = path + [node]
if is_leaf:
valid_paths.append(path) # end of the line
for branch in s:
self._path_builder(branch, path, valid_paths)
return valid_paths
def check_credit_trade_workflow(
self,
before_change_callback: Callable[[PreChangeRecord], None] = lambda x: None,
after_change_callback: Callable[[ChangeRecord], None] = lambda x: None,
path_end_callback: Callable[[], None] = lambda: None,
modify_request_payload: Callable[[dict], None] = lambda x: None
):
"""
Evaluate all normal status paths through the application via
REST API as appropriate users
with callbacks for tests:
before_change_callback called just before a status change.
Initial status and trade_id may be None
after_change_callback called after a change
data_before_request can be None if this was a creation
path_end_callback called when this pathway is done
(another will begin unless this was the last)
"""
initiating_org = self.users[
self.user_map[
self.UserRelationship.INITIATOR
]].organization
responding_org = self.users[
self.user_map[
self.UserRelationship.RESPONDENT
]].organization
payload = {
'fairMarketValuePerCredit': 1,
'initiator': initiating_org.id,
'numberOfCredits': 1,
'respondent': responding_org.id,
'tradeEffectiveDate': datetime.datetime.today().strftime('%Y-%m-%d'),
'type': self.credit_trade_types['sell'].id,
'zeroReason': None
}
valid_paths = (self._path_builder(
self.StatusChange(self.UserRelationship.INITIATOR, 'Draft', False)
))
for path in valid_paths:
logging.debug('evaluating path: {}'.format(
'\n'.join(
[
'{} sets status to {} and rescinded to {}'.format(
c.relationship, c.status, c.rescinded) for c in path
]
)))
trade_id = None
response_data = None
for node in path:
before_change_callback(self.PreChangeRecord(
trade_id,
CreditTrade.objects.filter(
id=trade_id
).first().status.status if trade_id else None,
CreditTrade.objects.filter(
id=trade_id
).first().is_rescinded if trade_id else None,
node
))
payload['status'] = CreditTradeStatus.objects.get_by_natural_key(node.status).id
payload['is_rescinded'] = node.rescinded
modify_request_payload(payload)
if not trade_id:
response = self.clients[self.user_map[node.relationship]].post(
'/api/credit_trades',
content_type='application/json',
data=json.dumps(payload)
)
else:
response = self.clients[self.user_map[node.relationship]].put(
'/api/credit_trades/{}'.format(trade_id),
content_type='application/json',
data=json.dumps(payload)
)
previous_response_data = response_data
response_data = json.loads(response.content.decode('utf-8'))
trade_id = response_data['id'] if 'id' in response_data else trade_id
after_change_callback(self.ChangeRecord(
trade_id,
self.user_map[node.relationship],
node.relationship,
True,
previous_response_data,
response_data,
response.status_code
))
path_end_callback()
| [((1998, 2176), 'collections.namedtuple', 'namedtuple', (['"""ChangeRecord"""', "['trade_id', 'requesting_username', 'relationship',\n 'expected_to_be_successful', 'data_before_request',\n 'data_after_request', 'response_code']"], {}), "('ChangeRecord', ['trade_id', 'requesting_username',\n 'relationship', 'expected_to_be_successful', 'data_before_request',\n 'data_after_request', 'response_code'])\n", (2008, 2176), False, 'from collections import namedtuple, defaultdict\n'), ((2254, 2349), 'collections.namedtuple', 'namedtuple', (['"""PreChangeRecord"""', "['trade_id', 'current_status', 'rescinded', 'status_change']"], {}), "('PreChangeRecord', ['trade_id', 'current_status', 'rescinded',\n 'status_change'])\n", (2264, 2349), False, 'from collections import namedtuple, defaultdict\n'), ((2404, 2471), 'collections.namedtuple', 'namedtuple', (['"""StatusChange"""', "['relationship', 'status', 'rescinded']"], {}), "('StatusChange', ['relationship', 'status', 'rescinded'])\n", (2414, 2471), False, 'from collections import namedtuple, defaultdict\n'), ((2711, 2735), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (2722, 2735), False, 'from collections import namedtuple, defaultdict\n'), ((7055, 7080), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (7078, 7080), False, 'import datetime\n'), ((8238, 8295), 'api.models.CreditTradeStatus.CreditTradeStatus.objects.get_by_natural_key', 'CreditTradeStatus.objects.get_by_natural_key', (['node.status'], {}), '(node.status)\n', (8282, 8295), False, 'from api.models.CreditTradeStatus import CreditTradeStatus\n'), ((8655, 8674), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (8665, 8674), False, 'import json\n'), ((8954, 8973), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (8964, 8973), False, 'import json\n'), ((8027, 8066), 'api.models.CreditTrade.CreditTrade.objects.filter', 'CreditTrade.objects.filter', ([], {'id': 'trade_id'}), '(id=trade_id)\n', (8053, 8066), False, 'from api.models.CreditTrade import CreditTrade\n'), ((7876, 7915), 'api.models.CreditTrade.CreditTrade.objects.filter', 'CreditTrade.objects.filter', ([], {'id': 'trade_id'}), '(id=trade_id)\n', (7902, 7915), False, 'from api.models.CreditTrade import CreditTrade\n')] |
panchohumeres/dynamo-covid | superset/superset_config.py | cf473be3eeca436efccd8891a61b721192cf6d34 | import os
SERVER_NAME = os.getenv('DOMAIN_SUPERSET')
PUBLIC_ROLE_LIKE_GAMMA = True
SESSION_COOKIE_SAMESITE = None # One of [None, 'Lax', 'Strict']
SESSION_COOKIE_HTTPONLY = False
MAPBOX_API_KEY = os.getenv('MAPBOX_API_KEY', '')
POSTGRES_DB=os.getenv('POSTGRES_DB')
POSTGRES_PASSWORD=os.getenv('POSTGRES_PASSWORD')
POSTGRES_USER=os.getenv('POSTGRES_USER')
POSTGRES_PORT=str(os.getenv('POSTGRES_PORT'))
HTTP_HEADERS = {'X-Frame-Options': 'ALLOWALL'}
sql_alchemy_string='postgresql+psycopg2://'+POSTGRES_USER+':'+POSTGRES_PASSWORD+'@postgres:'+POSTGRES_PORT+'/'+POSTGRES_DB
CACHE_CONFIG = {
'CACHE_TYPE': 'redis',
'CACHE_DEFAULT_TIMEOUT': 300,
'CACHE_KEY_PREFIX': 'superset_',
'CACHE_REDIS_HOST': 'redis',
'CACHE_REDIS_PORT': 6379,
'CACHE_REDIS_DB': 1,
'CACHE_REDIS_URL': 'redis://redis:6379/1'}
SQLALCHEMY_DATABASE_URI = \
sql_alchemy_string
SQLALCHEMY_TRACK_MODIFICATIONS = True
SECRET_KEY = 'thisISaSECRET_1234' | [((25, 53), 'os.getenv', 'os.getenv', (['"""DOMAIN_SUPERSET"""'], {}), "('DOMAIN_SUPERSET')\n", (34, 53), False, 'import os\n'), ((197, 228), 'os.getenv', 'os.getenv', (['"""MAPBOX_API_KEY"""', '""""""'], {}), "('MAPBOX_API_KEY', '')\n", (206, 228), False, 'import os\n'), ((241, 265), 'os.getenv', 'os.getenv', (['"""POSTGRES_DB"""'], {}), "('POSTGRES_DB')\n", (250, 265), False, 'import os\n'), ((284, 314), 'os.getenv', 'os.getenv', (['"""POSTGRES_PASSWORD"""'], {}), "('POSTGRES_PASSWORD')\n", (293, 314), False, 'import os\n'), ((329, 355), 'os.getenv', 'os.getenv', (['"""POSTGRES_USER"""'], {}), "('POSTGRES_USER')\n", (338, 355), False, 'import os\n'), ((374, 400), 'os.getenv', 'os.getenv', (['"""POSTGRES_PORT"""'], {}), "('POSTGRES_PORT')\n", (383, 400), False, 'import os\n')] |
johnnyboiii3020/matchmaking-bot | mybot.py | c36df430fd8b3292f34fb2e156e65d9914e0e497 | import discord
import json
import random
import os
from discord.ext import commands
TOKEN = ""
client = commands.Bot(command_prefix = '--')
os.chdir(r'D:\Programming\Projects\Discord bot\jsonFiles')
SoloCounter = 30
SolominCounter = 10
Queueiter = 1
T_Queueiter = 1
TeamCounter = 50
TeamminCounter = 20
extensions = [
"cogs.Matchmaking",
"cogs.Moderator"
]
@client.event
async def on_ready():
botInfo = await client.application_info()
oauthlink = discord.utils.oauth_url(botInfo.id)
print('---------')
print('Username: {}'.format(client.user.name))
print('ID: {}'.format(client.user.id))
print('Server count: {}'.format(str(len(client.servers))))
print('Member count: {}'.format(str(len(set(client.get_all_members())))))
print('OAuth URL: {}'.format(oauthlink))
print('Cogs: {}'.format(client.cogs))
print('---------')
######################### Register Team #################################
@client.command(pass_context = True)
@commands.has_role('Registered')
async def registerTeam( ctx , teamName , player1: discord.Member , player2: discord.Member , player3: discord.Member , player4: discord.Member , player5: discord.Member):
if ctx.message.channel.id == "549911021511245834":
with open('Teams.json' , 'r') as f:
Teams = json.load(f)
players = [player1 , player2 , player3 , player4 , player5]
await update_data_Team(ctx , Teams , teamName , players)
with open('Teams.json' , 'w') as f:
json.dump(Teams , f , indent = 2)
async def update_data_Team(ctx , Teams , teamName , players):
if not teamName in Teams:
Teams[teamName] = {}
Teams[teamName]["teamElo"] = 0
Teams[teamName]["Players"] = []
Role = teamName
await client.create_role(ctx.message.server , name = Role, hoist = True , mentionable = True )
TeamRole = discord.utils.get(ctx.message.server.roles , name = Role)
for player in players:
print(player)
Teams[teamName]["Players"].append(player.mention)
await client.add_roles(player , TeamRole)
await client.say("{} is Registered as Team Cheers!!!!".format(teamName))
else:
await client.say("you are already registered")
############################ Register Solo ###################################
@client.command(pass_context = True)
async def registersolo( ctx , name: discord.Member):
if ctx.message.channel.id == "549911021511245834":
with open('Solo.json' , 'r') as f:
Solo = json.load(f)
await update_data_solo(Solo , name , ctx)
with open('Solo.json' , 'w') as f:
json.dump(Solo , f , indent = 2)
async def update_data_solo( Solo , name , player):
if not player.message.author.mention in Solo:
author = player.message.author.mention
member = player.message.author
Solo[author] = {}
Solo[author]["name"] = name
Solo[author]["Elo"] = 0
nickname = str(Solo[author]["Elo"]) + "~" + Solo[author]["name"]
Role = discord.utils.get(player.message.server.roles , name = 'Registered')
member.nick = nickname
await client.add_roles(member , Role)
await client.say("{} is Registered as Solo Cheers Guys!!!!".format(author))
else:
await client.say("you are already registered")
############################### Win Team ################################
@client.command(pass_context = True)
@commands.has_role('Mod')
async def winT(ctx , T_Queueno , Team , Team2):
with open('Teams_Queue.json' , 'r') as f:
Teams_Queue = json.load(f)
with open('Teams.json' , 'r') as f:
Teams = json.load(f)
Teams[Team]["teamElo"] = Teams[Team]["teamElo"] + TeamCounter
Teams[Team2]["teamElo"] = Teams[Team2]["teamElo"] - TeamminCounter
await display_win_team(Team , Team2)
with open('Teams.json' , 'r') as f:
json.dump(Teams , f , indent = 2)
###############CReate Team Queue Channel###########################
@client.command(pass_context = True)
@commands.has_role('Mod')
async def CreateTQueueChannel(ctx):
with open('Teams_Queue.json' , 'r') as f:
Teams_Queue = json.load(f)
Teams_Queue["1"] = []
with open('Teams_Queue.json' , 'w') as f:
json.dump(Teams_Queue , f , indent = 2)
########################## Join Team Queue ###################
@client.command(pass_context = True)
@commands.has_role('Registered')
async def joinQT(ctx , TeamName):
if ctx.message.channel.id == "549910313995206687":
with open('Teams.json' , 'r') as f:
Teams = json.load(f)
if "{}".format(TeamName) in Teams:
with open('Teams_Queue.json' , 'r') as f:
Teams_Queue = json.load(f)
await update_data_Team_Queue(Teams_Queue , TeamName)
with open('Teams_Queue.json' , 'w') as f:
json.dump(Teams_Queue , f , indent = 2)
else:
await client.say("{} is not registerd".format(TeamName))
async def update_data_Team_Queue(Teams_Queue , TeamName):
global T_Queueiter
T_Queueno = T_Queueiter
if len(Teams_Queue["{}".format(T_Queueno)]) >= 1:
Teams_Queue[str(T_Queueno)].append(TeamName)
await display_Team_Queue(T_Queueno , Teams_Queue , TeamName)
await display_match(T_Queueno , Teams_Queue)
T_Queueiter += 1
T_Queueno = T_Queueiter
Teams_Queue[str(T_Queueno)] = []
else:
if not TeamName in Teams_Queue[str(T_Queueno)]:
Teams_Queue[str(T_Queueno)].append(TeamName)
await display_Team_Queue(T_Queueno , Teams_Queue , TeamName)
else:
await client.say("{} is already in queue" .format(TeamName))
async def display_Team_Queue(T_Queueno , Teams_Queue , TeamName):
embed = discord.Embed(
title = "Team Queue : {}".format(T_Queueno),
description = "5 v 5 Custom Games"
)
embed.add_field(name = 'Team:' , value = "\n".join("<@{}>".format(Teams_Queue[T_Queueno])) , inline = False)
await client.say(embed = embed)
async def display_match(T_Queueno , Teams_Queue):
embed = discord.Embed(
title= "Team Matchup Queue : {}".format(T_Queueno),
description = "5 v 5 Custom Games"
)
embed.add_field(name = 'Teams:' , value = "\n".join(Teams_Queue[str(T_Queueno)]) , inline = False)
with open('Maps.json' , 'r') as f:
Maps = json.load(f)
embed.add_field(name = 'Map:' , value = random.choice(Maps["Maps"]))
await client.say(embed = embed)
################Show Queue#################
@client.command(pass_context = True)
@commands.has_role('Registered')
async def showQ(ctx , Queueno):
if ctx.message.channel.id == "549910313995206687":
with open('Queue.json' , 'r') as f:
Queue = json.load(f)
if len(Queue[str(Queueno)]) < 0 :
await client.say("Queue is empty")
else:
await DisplayQueue(Queue , Queueno)
###############Show Team Points##########
@client.command(pass_context = True)
@commadns.has_role('Registered')
async def pointsT(ctx , TeamName):
if ctx.message.channel.id == "551095980251021323":
with open('Teams.json' , 'r') as f:
Teams = json.load(f)
if TeamName in Teams:
await client.say("{}".format(Teams[TeamName][teamElo]))
####################Show Points ###############
@client.command(pass_context = True)
@commands.has_role('Registered')
async def points(ctx):
if ctx.message.channel.id == "551095980251021323":
with open('Solo.json' , 'r') as f:
Solo = json.load(f)
if ctx.message.author.mention in Solo:
await client.say("{}".format(Solo[ctx.message.author.mention]["Elo"]) + " points{}".format(ctx.message.author.mention))
######################### Win Solo ##############################
@client.command(pass_context = True)
@commands.has_role('Mod' )
async def winS(ctx , Queueno , Teamno , Teamno2):
with open('Solo_Teams.json' , 'r') as f:
Solo_Teams = json.load(f)
with open('Solo.json' , 'r') as f:
Solo = json.load(f)
await update_winS(Solo_Teams , Solo , Queueno , Teamno , Teamno2)
with open('Solo.json' , 'w') as f:
json.dump(Solo , f , indent = 2)
async def update_winS(Solo_Teams , Solo , Queueno , Teamno , Teamno2):
for player in Solo_Teams[str(Queueno)][str(Teamno)]:
Solo[player]["Elo"] = Solo[player]["Elo"] + SoloCounter
await update_nick(player)
for players in Solo_Teams[str(Queueno)][str(Teamno2)]:
Solo[players]["Elo"] = Solo[players]["Elo"] - SolominCounter
await update_nick(player)
await display_updates(Solo_Teams , Teamno , Teamno2 , Queueno)
async def update_nick(name):
with open('Solo.json' , 'r') as f:
Solo = json.load(f)
nickname = str(Solo[name]["Elo"]) + "~" + str(Solo[name]["name"])
server = client.get_server("549553345044545536")
member = server.get_member(name[2:len(name)-1])
member.nick = nickname
async def display_updates(Solo_Teams , Teamno , Teamno2 , Queueno):
embed = discord.Embed(
title = "Updates:"
)
embed.add_field(name = 'Winning Team + {}'.format(SoloCounter) , value = '\n'.join(Solo_Teams[str(Queueno)][str(Teamno)]))
embed.add_field(name = 'Losing Team - {}'.format(SolominCounter) , value = '\n'.join(Solo_Teams[str(Queueno)][str(Teamno2)]))
await client.say(embed = embed)
####Leave Queue #####
@client.command(pass_context = True)
@commands.has_role('Registered')
async def leaveQ(ctx):
with open('Queue.json' , 'r') as f:
Queue = json.load(f)
await update_data_lQueue(Queue , ctx.message.author)
with open('Queue.json' , 'w') as f:
json.dump(Queue , f , indent = 2)
async def update_data_lQueue( Queue , author):
print(Queueiter)
if author.mention in Queue[str(Queueiter)]:
Queue[str(Queueiter)].remove(author.mention)
await client.say("{} has left the queue".format(author.mention))
else:
await client.say("{} is not in the queue".format(author.mention))
###Create Queue Channel ####
@client.command(pass_context = True)
@commands.has_role('Mod')
async def CreateQueueChannel(ctx):
with open('Queue.json' , 'r') as f:
Queue = json.load(f)
Queue[Queueiter] = []
await client.say("Queue Channel is Created")
with open('Queue.json' , 'w') as f:
json.dump(Queue , f , indent = 2)
#############Join Queue#########
@client.command(pass_context = True)
@commands.has_role('Registered')
async def joinQ(ctx):
with open('Solo.json' , 'r') as f:
Solo = json.load(f)
if ctx.message.author.mention in Solo:
with open('Queue.json' , 'r') as f:
Queue = json.load(f)
await update_data_Queue( Queue , ctx.message.author)
with open('Queue.json' , 'w') as f:
json.dump(Queue , f , indent = 2)
else:
await client.say("{} is not registered".format(ctx.message.author))
async def update_data_Queue(Queue , author):
global Queueiter
Queueno = Queueiter
if len(Queue["{}".format(Queueno)]) >= 9:
Queue[str(Queueno)].append(author.mention)
await DisplayQueue(Queue , Queueno)
await Create_solo_teams(Queue , Queueno)
Queueiter = Queueiter + 1
Queueno = Queueiter
Queue[str(Queueno)] = []
else:
if not author.mention in Queue[str(Queueno)]:
Queue[str(Queueno)].append(author.mention)
await client.say("{} joined".format(author.mention))
await DisplayQueue( Queue , Queueno)
else:
await client.say("{} already in queue" .format(author.mention))
async def DisplayQueue( Queue , Queueno):
embed = discord.Embed(
title = 'Queue:{}'.format(Queueno),
description = "5 v 5 Custom Games:"
)
embed.add_field(name = "Lobby" , value = '\n'.join(Queue[str(Queueno)]), inline = True)
await client.say(embed = embed)
async def Create_solo_teams(Queue , Queueno):
with open('Solo_Teams.json' , 'r') as f:
Solo_Teams = json.load(f)
await update_Solo_teams(Solo_Teams , Queueno , Queue)
with open('Solo_Teams.json' , 'w') as f:
json.dump(Solo_Teams , f , indent = 2)
async def update_Solo_teams( Solo_Teams , Queueno , Queue):
if not Queueno in Solo_Teams:
Solo_Teams[str(Queueno)] = {}
Solo_Teams[str(Queueno)]["Team1"] = []
Solo_Teams[str(Queueno)]["Team2"] = []
for x in range(0 , 5):
Queuerand = random.choice(Queue[str(Queueno)])
Queue[str(Queueno)].remove(Queuerand)
Solo_Teams[str(Queueno)]["Team1"].append(Queuerand)
for x in range(0 , 5):
Queuerand = random.choice(Queue[str(Queueno)])
Queue[str(Queueno)].remove(Queuerand)
Solo_Teams[str(Queueno)]["Team2"].append(Queuerand)
await Display_solo_teams(Solo_Teams , Queueno)
async def Display_solo_teams( Solo_Teams , Queueno):
embed = discord.Embed(
title = 'Queueno.:{}'.format(Queueno),
description = '5 v 5 Custom Games'
)
embed.add_field(name = "Team1:", value = '\n'.join(Solo_Teams[str(Queueno)]["Team1"]) , inline = True)
embed.add_field(name = "Team2:", value = '\n'.join(Solo_Teams[str(Queueno)]["Team2"]) , inline = False)
with open('Maps.json' , 'r') as f:
Maps = json.load(f)
embed.add_field(name = "Map:", value = random.choice(Maps["Maps"]) , inline = False)
embed.add_field(name = "Host of The Match" , value = random.choice(Solo_Teams[str(Queueno)]["Team1"]) , inline = False)
await client.say(embed = embed)
if __name__ == '__main__':
for extension in extensions:
try:
client.load_extension(extension)
except Exception as e:
print('Failed to load extension {}\n{}: {}'.format(extension, type(e).__name__, e))
client.run(TOKEN)
| [((112, 145), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""--"""'}), "(command_prefix='--')\n", (124, 145), False, 'from discord.ext import commands\n'), ((151, 212), 'os.chdir', 'os.chdir', (['"""D:\\\\Programming\\\\Projects\\\\Discord bot\\\\jsonFiles"""'], {}), "('D:\\\\Programming\\\\Projects\\\\Discord bot\\\\jsonFiles')\n", (159, 212), False, 'import os\n'), ((1025, 1056), 'discord.ext.commands.has_role', 'commands.has_role', (['"""Registered"""'], {}), "('Registered')\n", (1042, 1056), False, 'from discord.ext import commands\n'), ((3580, 3604), 'discord.ext.commands.has_role', 'commands.has_role', (['"""Mod"""'], {}), "('Mod')\n", (3597, 3604), False, 'from discord.ext import commands\n'), ((4184, 4208), 'discord.ext.commands.has_role', 'commands.has_role', (['"""Mod"""'], {}), "('Mod')\n", (4201, 4208), False, 'from discord.ext import commands\n'), ((4558, 4589), 'discord.ext.commands.has_role', 'commands.has_role', (['"""Registered"""'], {}), "('Registered')\n", (4575, 4589), False, 'from discord.ext import commands\n'), ((6826, 6857), 'discord.ext.commands.has_role', 'commands.has_role', (['"""Registered"""'], {}), "('Registered')\n", (6843, 6857), False, 'from discord.ext import commands\n'), ((7662, 7693), 'discord.ext.commands.has_role', 'commands.has_role', (['"""Registered"""'], {}), "('Registered')\n", (7679, 7693), False, 'from discord.ext import commands\n'), ((8145, 8169), 'discord.ext.commands.has_role', 'commands.has_role', (['"""Mod"""'], {}), "('Mod')\n", (8162, 8169), False, 'from discord.ext import commands\n'), ((9793, 9824), 'discord.ext.commands.has_role', 'commands.has_role', (['"""Registered"""'], {}), "('Registered')\n", (9810, 9824), False, 'from discord.ext import commands\n'), ((10481, 10505), 'discord.ext.commands.has_role', 'commands.has_role', (['"""Mod"""'], {}), "('Mod')\n", (10498, 10505), False, 'from discord.ext import commands\n'), ((10852, 10883), 'discord.ext.commands.has_role', 'commands.has_role', (['"""Registered"""'], {}), "('Registered')\n", (10869, 10883), False, 'from discord.ext import commands\n'), ((492, 527), 'discord.utils.oauth_url', 'discord.utils.oauth_url', (['botInfo.id'], {}), '(botInfo.id)\n', (515, 527), False, 'import discord\n'), ((9382, 9413), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Updates:"""'}), "(title='Updates:')\n", (9395, 9413), False, 'import discord\n'), ((1947, 2001), 'discord.utils.get', 'discord.utils.get', (['ctx.message.server.roles'], {'name': 'Role'}), '(ctx.message.server.roles, name=Role)\n', (1964, 2001), False, 'import discord\n'), ((3161, 3226), 'discord.utils.get', 'discord.utils.get', (['player.message.server.roles'], {'name': '"""Registered"""'}), "(player.message.server.roles, name='Registered')\n", (3178, 3226), False, 'import discord\n'), ((3724, 3736), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3733, 3736), False, 'import json\n'), ((3795, 3807), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3804, 3807), False, 'import json\n'), ((4039, 4068), 'json.dump', 'json.dump', (['Teams', 'f'], {'indent': '(2)'}), '(Teams, f, indent=2)\n', (4048, 4068), False, 'import json\n'), ((4316, 4328), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4325, 4328), False, 'import json\n'), ((4412, 4447), 'json.dump', 'json.dump', (['Teams_Queue', 'f'], {'indent': '(2)'}), '(Teams_Queue, f, indent=2)\n', (4421, 4447), False, 'import json\n'), ((6617, 6629), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6626, 6629), False, 'import json\n'), ((8290, 8302), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8299, 8302), False, 'import json\n'), ((8359, 8371), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8368, 8371), False, 'import json\n'), ((8492, 8520), 'json.dump', 'json.dump', (['Solo', 'f'], {'indent': '(2)'}), '(Solo, f, indent=2)\n', (8501, 8520), False, 'import json\n'), ((9079, 9091), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9088, 9091), False, 'import json\n'), ((9907, 9919), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9916, 9919), False, 'import json\n'), ((10600, 10612), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10609, 10612), False, 'import json\n'), ((10740, 10769), 'json.dump', 'json.dump', (['Queue', 'f'], {'indent': '(2)'}), '(Queue, f, indent=2)\n', (10749, 10769), False, 'import json\n'), ((10963, 10975), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10972, 10975), False, 'import json\n'), ((12482, 12494), 'json.load', 'json.load', (['f'], {}), '(f)\n', (12491, 12494), False, 'import json\n'), ((12609, 12643), 'json.dump', 'json.dump', (['Solo_Teams', 'f'], {'indent': '(2)'}), '(Solo_Teams, f, indent=2)\n', (12618, 12643), False, 'import json\n'), ((13805, 13817), 'json.load', 'json.load', (['f'], {}), '(f)\n', (13814, 13817), False, 'import json\n'), ((1351, 1363), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1360, 1363), False, 'import json\n'), ((1557, 1586), 'json.dump', 'json.dump', (['Teams', 'f'], {'indent': '(2)'}), '(Teams, f, indent=2)\n', (1566, 1586), False, 'import json\n'), ((2625, 2637), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2634, 2637), False, 'import json\n'), ((2748, 2776), 'json.dump', 'json.dump', (['Solo', 'f'], {'indent': '(2)'}), '(Solo, f, indent=2)\n', (2757, 2776), False, 'import json\n'), ((4747, 4759), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4756, 4759), False, 'import json\n'), ((6675, 6702), 'random.choice', 'random.choice', (["Maps['Maps']"], {}), "(Maps['Maps'])\n", (6688, 6702), False, 'import random\n'), ((7013, 7025), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7022, 7025), False, 'import json\n'), ((7456, 7468), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7465, 7468), False, 'import json\n'), ((7838, 7850), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7847, 7850), False, 'import json\n'), ((10040, 10069), 'json.dump', 'json.dump', (['Queue', 'f'], {'indent': '(2)'}), '(Queue, f, indent=2)\n', (10049, 10069), False, 'import json\n'), ((11086, 11098), 'json.load', 'json.load', (['f'], {}), '(f)\n', (11095, 11098), False, 'import json\n'), ((11223, 11252), 'json.dump', 'json.dump', (['Queue', 'f'], {'indent': '(2)'}), '(Queue, f, indent=2)\n', (11232, 11252), False, 'import json\n'), ((13862, 13889), 'random.choice', 'random.choice', (["Maps['Maps']"], {}), "(Maps['Maps'])\n", (13875, 13889), False, 'import random\n'), ((4890, 4902), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4899, 4902), False, 'import json\n'), ((5053, 5088), 'json.dump', 'json.dump', (['Teams_Queue', 'f'], {'indent': '(2)'}), '(Teams_Queue, f, indent=2)\n', (5062, 5088), False, 'import json\n')] |
markemus/economy | conversation.py | d7b3be9b2095393d7ee5c8967b9fcee8998776bb | import database as d
import numpy as np
import random
from transitions import Machine
#Conversations are markov chains. Works as follows: a column vector for each CURRENT state j, a row vector for each TARGET state i.
#Each entry i,j = the probability of moving to state i from state j.
#target state D = end of conversation. We start in state D when initializing conversation.
#row vectors sum to 1, internal lists are columns.
#Conversation is a singleton. DO NOT CREATE NEW CONVERSATION OBJECTS.
class Conversation(object):
#a. stores, b.manufacturers, c.friends, d. myself, e.end conversation
topicMatrix = [
[0.00,0.20,0.15,0.15,0.25],
[0.20,0.00,0.15,0.15,0.25],
[0.15,0.15,0.00,0.20,0.25],
[0.15,0.15,0.20,0.00,0.25],
[0.50,0.50,0.50,0.50,0.00]
]
#a. different store, b. new topic, c. end convo, d. prices
storeMatrix = [
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.50],
[1.0,1.0,0.25,0.00]
]
#a. different manufacturer, b. new topic, c. end convo, d. prices
manuMatrix = [
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.50],
[1.0,1.0,0.25,0.00]
]
#a. different friend, b. new topic, c. end convo, d. family, e. job, /f. skills
friendMatrix = [
[0.0,0.0,0.2,0.1,0.1],
[0.0,0.0,0.2,0.2,0.2],
[0.0,0.0,0.2,0.5,0.5],
[0.5,0.5,0.2,0.0,0.2],
[0.5,0.5,0.2,0.2,0.0]
]
# friendMatrix = [
# [0.00,0.00,0.15,0.1,0.1,0.1],
# [0.00,0.00,0.15,0.2,0.2,0.2],
# [0.00,0.00,0.15,0.5,0.5,0.5],
# [0.34,0.34,0.15,0.0,0.1,0.1],
# [0.33,0.33,0.15,0.1,0.0,0.1],
# [0.33,0.33,0.25,0.1,0.1,0.0]
# ]
#a. introduction, b. new topic, c. end convo, d. myfamily, e. myjob, /f. myskills
myselfMatrix = [
[0.00,1,0.2,0.0,0.0],
[0.25,0,0.2,0.2,0.2],
[0.25,0,0.2,0.5,0.5],
[0.25,0,0.2,0.0,0.3],
[0.25,0,0.2,0.3,0.0]
]
# myselfMatrix = [
# [0.0,1,0.15,0.00,0.00,0.00],
# [0.2,0,0.15,0.20,0.20,0.20],
# [0.2,0,0.15,0.50,0.50,0.50],
# [0.2,0,0.15,0.00,0.15,0.15],
# [0.2,0,0.15,0.15,0.00,0.15],
# [0.2,0,0.15,0.15,0.15,0.00]
# ]
states = ['topic','store','manu','friend', 'myself', 'exit']
transitions = [
{'trigger' : 'toTopic', 'source' : '*', 'dest' : 'topic'},
{'trigger' : 'toStore', 'source' : 'topic', 'dest' : 'store'},
{'trigger' : 'toManu' , 'source' : 'topic', 'dest' : 'manu' },
{'trigger' : 'toFriend', 'source' : 'topic', 'dest' : 'friend' },
{'trigger' : 'toMyself', 'source' : 'topic', 'dest' : 'myself'},
{'trigger' : 'toExit', 'source' : '*', 'dest' : 'exit'}
]
def __init__(self):
self.isPlayer = False
self.firstPerson = None
self.secondPerson = None
self.target = None
self.machine = Machine(model=self, states=Conversation.states, transitions=Conversation.transitions, initial='exit')
self.menuDict = {
'topic' : [self.toStore, self.toManu, self.toFriend, self.toMyself, self.toExit],
'store' : [self.different, self.toTopic, self.toExit, self.prices],
'manu' : [self.different, self.toTopic, self.toExit, self.prices],
'friend' : [self.different, self.toTopic, self.toExit, self.family, self.job],
'myself' : [self.introduction, self.toTopic, self.toExit, self.myfamily, self.myjob]
}
self.machine.on_enter_topic('topicHandler')
self.machine.on_enter_store('storeHandler')
self.machine.on_enter_manu('manuHandler')
self.machine.on_enter_friend('friendHandler')
self.machine.on_enter_myself('myselfHandler')
self.machine.on_enter_exit('exitHandler')
def beginConversation(self, firstPerson, secondPerson, isPlayer=False):
self.isPlayer = isPlayer
self.firstPerson = firstPerson
self.secondPerson = secondPerson
self.introduction()
self.toTopic()
def introduction(self):
p2 = self.firstPerson.peopleManager(self.secondPerson)
p1 = self.secondPerson.peopleManager(self.firstPerson)
p2.name = self.secondPerson.name
p1.name = self.firstPerson.name
p2.updateOpinion(1)
p1.updateOpinion(1)
def different(self):
if self.state == 'friend':
testTarget = self.firstPerson.randomPerson(self.target)
if testTarget is not None:
self.target = testTarget.person
else:
self.target = None
elif self.state == 'manu':
testTarget = self.firstPerson.randomManu(self.target)
if testTarget is not None:
self.target = testTarget.store
else:
self.target = None
elif self.state == 'store':
testTarget = self.firstPerson.randomStore(self.target)
if testTarget is not None:
self.target = testTarget.store
else:
self.target = None
def prices(self):
if self.target is not None:
firstProfile = self.firstPerson.unitManager(self.target, self.secondPerson)
secondProfile = self.secondPerson.unitManager(self.target, self.firstPerson)
firstPrices = firstProfile.getPricesWithDayNum()
secondPrices = secondProfile.getPricesWithDayNum()
firstDayNum = firstPrices[1]
secondDayNum = secondPrices[1]
if firstDayNum > secondDayNum:
prices = firstPrices[0]
secondProfile.updatePrices(prices, firstDayNum)
#thoughts
self.firstPerson.think("I told " + self.secondPerson.name + " about the prices at " + self.target.name + ".")
self.secondPerson.think(self.firstPerson.name + " told me about the prices at " + self.target.name + ".")
elif secondDayNum > firstDayNum:
prices = secondPrices[0]
firstProfile.updatePrices(prices, secondDayNum)
#thoughts
self.firstPerson.think(self.secondPerson.name + " told me about the prices at " + self.target.name + ".")
self.secondPerson.think("I told " + self.firstPerson.name + " about the prices at " + self.target.name + ".")
else:
self.firstPerson.think(self.secondPerson.name + " and I talked about " + self.target.name + "'s prices.")
self.secondPerson.think(self.firstPerson.name + " and I talked about " + self.target.name + "'s prices.")
else:
if self.state == 'store':
self.firstPerson.think(self.secondPerson.name + " listened to me gripe about how I can't find anywhere to shop.")
self.secondPerson.think(self.firstPerson.name + " told me that they can't find anywhere to shop.")
elif self.state == 'manu':
self.firstPerson.think("I mentioned to " + self.secondPerson.name + " that I don't know anything about the local industry.")
self.secondPerson.think(self.firstPerson.name + " told me that they don't know much about the local industry.")
else:
self.firstPerson.think("There is a bug in conversation.prices. (not manu or store)")
self.secondPerson.think("There is a bug in conversation.prices. (not manu or store)")
def family(self):
if self.target is not None:
#info: family, people
#profiles
p1 = self.firstPerson.peopleManager(self.target)
p2 = self.secondPerson.peopleManager(self.target)
#variables
f1 = p1.getFamily()
f2 = p2.getFamily()
ff = []
#update profiles
for a, b in zip(f1, f2):
if a[-1] >= b[-1]:
ff.append(a)
else:
ff.append(b)
p1.updateFamily(*ff)
p2.updateFamily(*ff)
#thoughts
self.firstPerson.think(self.secondPerson.name + " and I gossipped about " + self.target.name + "'s family.")
self.secondPerson.think(self.firstPerson.name + " and I gossipped about " + self.target.name + "'s family.")
else:
self.firstPerson.think("I don't really know anything about my friends' families.")
self.secondPerson.think("I don't really know anything about my friends' families.")
def job(self):
if self.target is not None:
#profiles
firstProfile = self.firstPerson.peopleManager(self.target)
secondProfile = self.secondPerson.peopleManager(self.target)
#variables
firstJob = firstProfile.getJob()
secondJob = secondProfile.getJob()
#update profiles
if firstJob[1] > secondJob[1]:
secondProfile.updateJob(*firstJob)
self.firstPerson.think("I told " + self.secondPerson.name + " what " + self.target.name + " does for a living.")
self.secondPerson.think(self.firstPerson.name + " told me what " + self.target.name + " does for a living.")
elif secondJob[1] > firstJob[1]:
firstProfile.updateJob(*secondJob)
self.firstPerson.think(self.secondPerson.name + " told me what " + self.target.name + " does for a living.")
self.secondPerson.think("I told " + self.firstPerson.name + " about " + self.target.name + " does for a living.")
else:
self.firstPerson.think(self.secondPerson.name + " and I talked about " + self.target.name + "'s job.")
self.secondPerson.think(self.firstPerson.name + " and I talked about " + self.target.name + "'s job.")
else:
self.firstPerson.think("I don't know what any of my friends do for a living!")
self.secondPerson.think("I don't know what any of my friends do for a living!")
# def skills(self):
# #info: skills
# if self.target is not None:
# #profiles
# firstProfile = self.firstPerson.peopleManager(self.target)
# secondProfile = self.secondPerson.peopleManager(self.target)
# #variables
# firstSkills = firstProfile.getSkills()
# secondSkills = secondProfile.getSkills()
# #update profiles
# if firstSkills[1] > secondSkills[1]:
# secondProfile.updateSkills(*firstSkills)
# self.firstPerson.think("I told " + self.secondPerson.name + " about how good " + self.target.name + " is with their hands.")
# self.secondPerson.think(self.firstPerson.name + " told me about how good " + self.target.name + " is with their hands.")
# elif secondSkills[1] > firstSkills[1]:
# firstProfile.updateSkills(*secondSkills)
# self.firstPerson.think(self.secondPerson.name + " told me about how good " + self.target.name + " is with their hands.")
# self.secondPerson.think("I told " + self.firstPerson.name + " about how good " + self.target.name + " is with their hands.")
# else:
# self.firstPerson.think(self.secondPerson.name + " and I talked about how good " + self.target.name + " is with their hands.")
# self.secondPerson.think(self.firstPerson.name + " and I talked about how good " + self.target.name + " is with their hands.")
# else:
# self.firstPerson.think("I should spend more time doing things with my friends.")
# self.secondPerson.think("I should spend more time doing things with my friends.")
def myfamily(self):
#info: family, people
#profiles
firstProfile = self.secondPerson.peopleManager(self.firstPerson)
secondProfile = self.firstPerson.peopleManager(self.secondPerson)
firstOwn = self.firstPerson.peopleManager(self.firstPerson)
secondOwn = self.secondPerson.peopleManager(self.secondPerson)
#update profiles
firstProfile.updateFamily(firstOwn.getFather(), firstOwn.getMother(), firstOwn.getSpouse(), firstOwn.getSiblings(), firstOwn.getChildren())
secondProfile.updateFamily(secondOwn.getFather(), secondOwn.getMother(), secondOwn.getSpouse(), secondOwn.getSiblings(), secondOwn.getChildren())
#thoughts
self.firstPerson.think(self.secondPerson.name + " caught me up on their family life.")
self.secondPerson.think(self.firstPerson.name + " caught me up on their family life.")
def myjob(self):
#info: jobs, jobUnits, *salaries
#profiles
firstProfile = self.secondPerson.peopleManager(self.firstPerson)
secondProfile = self.firstPerson.peopleManager(self.secondPerson)
#variables
firstJob = self.firstPerson.getJob()
secondJob = self.secondPerson.getJob()
dayNum = self.firstPerson.model.getDayNum()
try:
firstJobType = firstJob.getJobType()
firstJobUnit = firstJob.getUnit()
firstJobLoc = firstJobUnit.getName()
firstSalary = firstJob.getSalary()
except:
firstJobType = "Jobhunter"
firstJobUnit = None
firstJobLoc = "home"
firstSalary = 0
try:
secondJobType = secondJob.getJobType()
secondJobUnit = secondJob.getUnit()
secondJobLoc = secondJobUnit.getName()
secondSalary = secondJob.getSalary()
except:
secondJobType = "Jobhunter"
secondJobUnit = None
secondJobLoc = "home"
secondSalary = 0
#update profiles
if dayNum > firstProfile.getJob()[1]:
firstProfile.updateJob(firstJob, dayNum)
if dayNum > firstProfile.getSalary()[1]:
firstProfile.updateSalary(firstSalary, dayNum)
if dayNum > secondProfile.getJob()[1]:
secondProfile.updateJob(secondJob, dayNum)
if dayNum > secondProfile.getSalary()[1]:
secondProfile.updateSalary(firstSalary, dayNum)
if firstJobUnit is not None:
self.secondPerson.unitManager(firstJobUnit, self.firstPerson)
if secondJobUnit is not None:
self.firstPerson.unitManager(secondJobUnit, self.secondPerson)
#thoughts
self.firstPerson.think(self.secondPerson.name + " told me about their job as a " + secondJobType + " at " + secondJobLoc + ".")
self.secondPerson.think(self.firstPerson.name + " told me about their job as a " + firstJobType + " at " + firstJobLoc + ".")
# def myskills(self):
# #info skills
# #profiles
# firstProfile = self.secondPerson.peopleManager(self.firstPerson)
# secondProfile = self.firstPerson.peopleManager(self.secondPerson)
# #variables
# firstSkills = self.firstPerson.getSkills()
# secondSkills = self.secondPerson.getSkills()
# dayNum = self.firstPerson.model.getDayNum()
# #update profiles
# if dayNum > firstProfile.getSkills()[1]:
# firstProfile.updateSkills(firstSkills, dayNum)
# if dayNum > secondProfile.getSkills()[1]:
# secondProfile.updateSkills(secondSkills, dayNum)
# #thoughts
# self.firstPerson.think(self.secondPerson.name + " and I talked shop for a while.")
# self.secondPerson.think(self.firstPerson.name + " and I talked shop for a while.")
#dialogues are chosen here, but the actual method call is in the handler (eg prices)
def talk(self, matrix, stateVector):
if self.isPlayer:
# stateVector = playerChoice
pass
else:
#get dialogue probabilities given last dialogue
probArray = np.dot(matrix, stateVector)
prob = probArray.tolist()
#choose dialogue
choice = random.random()
stateVector = [0 for i in range(len(prob))]
for i in range(len(prob)):
outcome = prob[i]
if outcome >= choice:
stateVector[i] = 1
return stateVector
else:
choice = choice - outcome
def topicHandler(self):
matrix = Conversation.topicMatrix
stateVector = [0,0,0,0,1]
# self.firstPerson.think("topicHandler")
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def storeHandler(self):
matrix = Conversation.storeMatrix
stateVector = [0,1,0,0]
# self.firstPerson.think("storeHandler")
self.different()
while self.state == 'store':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def manuHandler(self):
matrix = Conversation.manuMatrix
stateVector = [0,1,0,0]
# self.firstPerson.think("manuHandler")
self.different()
while self.state == 'manu':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def friendHandler(self):
matrix = Conversation.friendMatrix
stateVector = [0,1,0,0,0]
# self.firstPerson.think("friendHandler")
self.different()
while self.state == 'friend':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def myselfHandler(self):
matrix = Conversation.myselfMatrix
stateVector = [0,1,0,0,0]
# self.firstPerson.think("myselfHandler")
while self.state == 'myself':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def exitHandler(self):
self.isPlayer = False
Convo = Conversation() | [((2827, 2933), 'transitions.Machine', 'Machine', ([], {'model': 'self', 'states': 'Conversation.states', 'transitions': 'Conversation.transitions', 'initial': '"""exit"""'}), "(model=self, states=Conversation.states, transitions=Conversation.\n transitions, initial='exit')\n", (2834, 2933), False, 'from transitions import Machine\n'), ((15917, 15944), 'numpy.dot', 'np.dot', (['matrix', 'stateVector'], {}), '(matrix, stateVector)\n', (15923, 15944), True, 'import numpy as np\n'), ((16034, 16049), 'random.random', 'random.random', ([], {}), '()\n', (16047, 16049), False, 'import random\n')] |
saijananiganesan/SimPathFinder | src/createData.py | 1634f2cb82c8056256d191be72589c4c531a3f67 | from __init__ import ExtractUnlabeledData, SampleUnlabeledData, ExtractLabeledData
E = ExtractLabeledData(data_dir='../labeldata/')
E.get_pathways()
E.get_pathway_names()
E.get_classes_dict()
E.create_df_all_labels()
| [((88, 132), '__init__.ExtractLabeledData', 'ExtractLabeledData', ([], {'data_dir': '"""../labeldata/"""'}), "(data_dir='../labeldata/')\n", (106, 132), False, 'from __init__ import ExtractUnlabeledData, SampleUnlabeledData, ExtractLabeledData\n')] |
farman99ahmed/diyblog | blog/views.py | 2e4548037c95b5563d2fdba3d05b488330a5e2b4 | from django.shortcuts import render, redirect
from .forms import AuthorForm, BlogForm, NewUserForm
from .models import Author, Blog
from django.contrib.auth import login, authenticate, logout
from django.contrib import messages
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.decorators import login_required
# Create your views here.
def get_authors(request):
context = {'authors': Author.objects.all()}
return render(request, "blog/get_authors.html", context)
@login_required
def get_author(request, id):
author = Author.objects.get(pk = id)
blogs = Blog.objects.filter(author = id)
context = {'author': author, 'blogs': blogs}
return render(request, "blog/get_author.html", context)
@login_required
def post_put_author(request, id = 0):
if request.method == "GET":
if id == 0:
form = AuthorForm()
else:
author = Author.objects.get(pk = id)
form = AuthorForm(instance = author)
return render(request, "blog/post_put_authors.html", {"form": form})
else:
if id == 0:
form = AuthorForm(request.POST)
else:
author = Author.objects.get(pk = id)
form = AuthorForm(request.POST, instance = author)
if form.is_valid():
form.save()
return redirect('get_authors')
@login_required
def delete_author(request, id):
author = Author.objects.get(pk = id)
author.delete()
return redirect('get_authors')
def get_blogs(request):
context = {'blogs': Blog.objects.all()}
return render(request, "blog/get_blogs.html", context)
@login_required
def get_blog(request, id):
blog = {'blog': Blog.objects.get(pk = id)}
return render(request, "blog/get_blog.html", blog)
@login_required
def post_put_blog(request, id = 0):
if request.method == "GET":
if id == 0:
form = BlogForm()
else:
blog = Blog.objects.get(pk = id)
form = BlogForm(instance = blog)
return render(request, "blog/post_put_blogs.html", {"form": form})
else:
if id == 0:
form = BlogForm(request.POST)
else:
blog = Blog.objects.get(pk = id)
form = BlogForm(request.POST, instance = blog)
if form.is_valid():
form.save()
return redirect('get_blogs')
@login_required
def delete_blog(request, id):
blog = Blog.objects.get(pk = id)
blog.delete()
return redirect('get_blogs')
def register_request(request):
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
messages.success(request, "Registration successful." )
return redirect("get_blogs")
messages.error(request, "Unsuccessful registration. Invalid information.")
form = NewUserForm()
return render (request=request, template_name="blog/register.html", context={"register_form":form})
def login_request(request):
if request.method == "POST":
form = AuthenticationForm(request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.info(request, f"You are now logged in as {username}.")
return redirect("get_blogs")
else:
messages.error(request,"Invalid username or password.")
else:
messages.error(request,"Invalid username or password.")
form = AuthenticationForm()
return render(request=request, template_name="blog/login.html", context={"login_form":form})
def logout_request(request):
logout(request)
messages.info(request, "You have successfully logged out.")
return redirect("get_blogs")
| [((457, 506), 'django.shortcuts.render', 'render', (['request', '"""blog/get_authors.html"""', 'context'], {}), "(request, 'blog/get_authors.html', context)\n", (463, 506), False, 'from django.shortcuts import render, redirect\n'), ((700, 748), 'django.shortcuts.render', 'render', (['request', '"""blog/get_author.html"""', 'context'], {}), "(request, 'blog/get_author.html', context)\n", (706, 748), False, 'from django.shortcuts import render, redirect\n'), ((1489, 1512), 'django.shortcuts.redirect', 'redirect', (['"""get_authors"""'], {}), "('get_authors')\n", (1497, 1512), False, 'from django.shortcuts import render, redirect\n'), ((1593, 1640), 'django.shortcuts.render', 'render', (['request', '"""blog/get_blogs.html"""', 'context'], {}), "(request, 'blog/get_blogs.html', context)\n", (1599, 1640), False, 'from django.shortcuts import render, redirect\n'), ((1743, 1786), 'django.shortcuts.render', 'render', (['request', '"""blog/get_blog.html"""', 'blog'], {}), "(request, 'blog/get_blog.html', blog)\n", (1749, 1786), False, 'from django.shortcuts import render, redirect\n'), ((2493, 2514), 'django.shortcuts.redirect', 'redirect', (['"""get_blogs"""'], {}), "('get_blogs')\n", (2501, 2514), False, 'from django.shortcuts import render, redirect\n'), ((2877, 2974), 'django.shortcuts.render', 'render', ([], {'request': 'request', 'template_name': '"""blog/register.html"""', 'context': "{'register_form': form}"}), "(request=request, template_name='blog/register.html', context={\n 'register_form': form})\n", (2883, 2974), False, 'from django.shortcuts import render, redirect\n'), ((3558, 3578), 'django.contrib.auth.forms.AuthenticationForm', 'AuthenticationForm', ([], {}), '()\n', (3576, 3578), False, 'from django.contrib.auth.forms import AuthenticationForm\n'), ((3587, 3678), 'django.shortcuts.render', 'render', ([], {'request': 'request', 'template_name': '"""blog/login.html"""', 'context': "{'login_form': form}"}), "(request=request, template_name='blog/login.html', context={\n 'login_form': form})\n", (3593, 3678), False, 'from django.shortcuts import render, redirect\n'), ((3704, 3719), 'django.contrib.auth.logout', 'logout', (['request'], {}), '(request)\n', (3710, 3719), False, 'from django.contrib.auth import login, authenticate, logout\n'), ((3721, 3780), 'django.contrib.messages.info', 'messages.info', (['request', '"""You have successfully logged out."""'], {}), "(request, 'You have successfully logged out.')\n", (3734, 3780), False, 'from django.contrib import messages\n'), ((3790, 3811), 'django.shortcuts.redirect', 'redirect', (['"""get_blogs"""'], {}), "('get_blogs')\n", (3798, 3811), False, 'from django.shortcuts import render, redirect\n'), ((1015, 1076), 'django.shortcuts.render', 'render', (['request', '"""blog/post_put_authors.html"""', "{'form': form}"], {}), "(request, 'blog/post_put_authors.html', {'form': form})\n", (1021, 1076), False, 'from django.shortcuts import render, redirect\n'), ((1344, 1367), 'django.shortcuts.redirect', 'redirect', (['"""get_authors"""'], {}), "('get_authors')\n", (1352, 1367), False, 'from django.shortcuts import render, redirect\n'), ((2041, 2100), 'django.shortcuts.render', 'render', (['request', '"""blog/post_put_blogs.html"""', "{'form': form}"], {}), "(request, 'blog/post_put_blogs.html', {'form': form})\n", (2047, 2100), False, 'from django.shortcuts import render, redirect\n'), ((2358, 2379), 'django.shortcuts.redirect', 'redirect', (['"""get_blogs"""'], {}), "('get_blogs')\n", (2366, 2379), False, 'from django.shortcuts import render, redirect\n'), ((2772, 2846), 'django.contrib.messages.error', 'messages.error', (['request', '"""Unsuccessful registration. Invalid information."""'], {}), "(request, 'Unsuccessful registration. Invalid information.')\n", (2786, 2846), False, 'from django.contrib import messages\n'), ((3038, 3084), 'django.contrib.auth.forms.AuthenticationForm', 'AuthenticationForm', (['request'], {'data': 'request.POST'}), '(request, data=request.POST)\n', (3056, 3084), False, 'from django.contrib.auth.forms import AuthenticationForm\n'), ((2659, 2679), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (2664, 2679), False, 'from django.contrib.auth import login, authenticate, logout\n'), ((2683, 2736), 'django.contrib.messages.success', 'messages.success', (['request', '"""Registration successful."""'], {}), "(request, 'Registration successful.')\n", (2699, 2736), False, 'from django.contrib import messages\n'), ((2748, 2769), 'django.shortcuts.redirect', 'redirect', (['"""get_blogs"""'], {}), "('get_blogs')\n", (2756, 2769), False, 'from django.shortcuts import render, redirect\n'), ((3213, 3263), 'django.contrib.auth.authenticate', 'authenticate', ([], {'username': 'username', 'password': 'password'}), '(username=username, password=password)\n', (3225, 3263), False, 'from django.contrib.auth import login, authenticate, logout\n'), ((3494, 3550), 'django.contrib.messages.error', 'messages.error', (['request', '"""Invalid username or password."""'], {}), "(request, 'Invalid username or password.')\n", (3508, 3550), False, 'from django.contrib import messages\n'), ((3292, 3312), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (3297, 3312), False, 'from django.contrib.auth import login, authenticate, logout\n'), ((3317, 3380), 'django.contrib.messages.info', 'messages.info', (['request', 'f"""You are now logged in as {username}."""'], {}), "(request, f'You are now logged in as {username}.')\n", (3330, 3380), False, 'from django.contrib import messages\n'), ((3392, 3413), 'django.shortcuts.redirect', 'redirect', (['"""get_blogs"""'], {}), "('get_blogs')\n", (3400, 3413), False, 'from django.shortcuts import render, redirect\n'), ((3427, 3483), 'django.contrib.messages.error', 'messages.error', (['request', '"""Invalid username or password."""'], {}), "(request, 'Invalid username or password.')\n", (3441, 3483), False, 'from django.contrib import messages\n')] |
SolomidHero/speech-regeneration-enhancer | tests/conftest.py | eb43907ff085d68a707ff7bc3af14e93ff66fd65 | # here we make fixtures of toy data
# real parameters are stored and accessed from config
import pytest
import librosa
import os
import numpy as np
from hydra.experimental import compose, initialize
@pytest.fixture(scope="session")
def cfg():
with initialize(config_path="../", job_name="test_app"):
config = compose(config_name="config")
config.dataset = compose(config_name="tests/test_dataset_config")
config.train = compose(config_name="tests/test_train_config")
return config
@pytest.fixture(scope="session")
def sample_rate(cfg):
return cfg.data.sample_rate
@pytest.fixture(scope="session")
def example_wav(sample_rate):
wav, sr = librosa.load(
os.path.dirname(__file__) + "/data/example.mp3",
sr=sample_rate, dtype=np.float32,
)
return { 'wav': wav, 'sr': sr }
@pytest.fixture(scope="session")
def n_fft(cfg):
return cfg.data.n_fft
@pytest.fixture(scope="session")
def hop_length(cfg):
return cfg.data.hop_length
@pytest.fixture(scope="session")
def win_length(cfg):
return cfg.data.win_length
@pytest.fixture(scope="session")
def f_min(cfg):
return cfg.data.f_min
@pytest.fixture(scope="session")
def f_max(cfg):
return cfg.data.f_max
@pytest.fixture(scope="session")
def hop_ms(example_wav, hop_length):
return 1e3 * hop_length / example_wav['sr']
@pytest.fixture(scope="session")
def n_frames(example_wav, hop_length):
return (example_wav['wav'].shape[-1] - 1) // hop_length + 1
# It is not clear if we should cleanup the test directories
# or leave them for debugging
# https://github.com/pytest-dev/pytest/issues/3051
@pytest.fixture(autouse=True, scope='session')
def clear_files_teardown():
yield None
os.system("rm -r tests/test_dataset tests/test_experiment tests/test_logs") | [((205, 236), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (219, 236), False, 'import pytest\n'), ((507, 538), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (521, 538), False, 'import pytest\n'), ((593, 624), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (607, 624), False, 'import pytest\n'), ((812, 843), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (826, 843), False, 'import pytest\n'), ((886, 917), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (900, 917), False, 'import pytest\n'), ((970, 1001), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (984, 1001), False, 'import pytest\n'), ((1054, 1085), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1068, 1085), False, 'import pytest\n'), ((1128, 1159), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1142, 1159), False, 'import pytest\n'), ((1202, 1233), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1216, 1233), False, 'import pytest\n'), ((1319, 1350), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1333, 1350), False, 'import pytest\n'), ((1595, 1640), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)', 'scope': '"""session"""'}), "(autouse=True, scope='session')\n", (1609, 1640), False, 'import pytest\n'), ((1684, 1759), 'os.system', 'os.system', (['"""rm -r tests/test_dataset tests/test_experiment tests/test_logs"""'], {}), "('rm -r tests/test_dataset tests/test_experiment tests/test_logs')\n", (1693, 1759), False, 'import os\n'), ((255, 305), 'hydra.experimental.initialize', 'initialize', ([], {'config_path': '"""../"""', 'job_name': '"""test_app"""'}), "(config_path='../', job_name='test_app')\n", (265, 305), False, 'from hydra.experimental import compose, initialize\n'), ((320, 349), 'hydra.experimental.compose', 'compose', ([], {'config_name': '"""config"""'}), "(config_name='config')\n", (327, 349), False, 'from hydra.experimental import compose, initialize\n'), ((371, 419), 'hydra.experimental.compose', 'compose', ([], {'config_name': '"""tests/test_dataset_config"""'}), "(config_name='tests/test_dataset_config')\n", (378, 419), False, 'from hydra.experimental import compose, initialize\n'), ((439, 485), 'hydra.experimental.compose', 'compose', ([], {'config_name': '"""tests/test_train_config"""'}), "(config_name='tests/test_train_config')\n", (446, 485), False, 'from hydra.experimental import compose, initialize\n'), ((685, 710), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (700, 710), False, 'import os\n')] |
CvanderStoep/VideosSampleCode | dataclassses_howto.py | 38a8d2538a041d5664d0040807ffac463d0fb79c | import dataclasses
import inspect
from dataclasses import dataclass, field
from pprint import pprint
import attr
class ManualComment:
def __init__(self, id: int, text: str):
self.id: int = id
self.text: str = text
def __repr__(self):
return "{}(id={}, text={})".format(self.__class__.__name__, self.id, self.text)
def __eq__(self, other):
if other.__class__ is self.__class__:
return (self.id, self.text) == (other.id, other.text)
else:
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return NotImplemented
else:
return not result
def __hash__(self):
return hash((self.__class__, self.id, self.text))
def __lt__(self, other):
if other.__class__ is self.__class__:
return (self.id, self.text) < (other.id, other.text)
else:
return NotImplemented
def __le__(self, other):
if other.__class__ is self.__class__:
return (self.id, self.text) <= (other.id, other.text)
else:
return NotImplemented
def __gt__(self, other):
if other.__class__ is self.__class__:
return (self.id, self.text) > (other.id, other.text)
else:
return NotImplemented
def __ge__(self, other):
if other.__class__ is self.__class__:
return (self.id, self.text) >= (other.id, other.text)
else:
return NotImplemented
@dataclass(frozen=True, order=True)
class Comment:
id: int
text: str = ""
replies: list[int] = field(default_factory=list, repr=False, compare=False)
@attr.s(frozen=True, order=True, slots=True)
class AttrComment:
id: int = 0
text: str = ""
def main():
comment = Comment(1, "I just subscribed!")
# comment.id = 3 # can't immutable
print(comment)
print(dataclasses.astuple(comment))
print(dataclasses.asdict(comment))
copy = dataclasses.replace(comment, id=3)
print(copy)
pprint(inspect.getmembers(Comment, inspect.isfunction))
if __name__ == '__main__':
main()
| [((1565, 1599), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)', 'order': '(True)'}), '(frozen=True, order=True)\n', (1574, 1599), False, 'from dataclasses import dataclass, field\n'), ((1729, 1772), 'attr.s', 'attr.s', ([], {'frozen': '(True)', 'order': '(True)', 'slots': '(True)'}), '(frozen=True, order=True, slots=True)\n', (1735, 1772), False, 'import attr\n'), ((1671, 1725), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'repr': '(False)', 'compare': '(False)'}), '(default_factory=list, repr=False, compare=False)\n', (1676, 1725), False, 'from dataclasses import dataclass, field\n'), ((2037, 2071), 'dataclasses.replace', 'dataclasses.replace', (['comment'], {'id': '(3)'}), '(comment, id=3)\n', (2056, 2071), False, 'import dataclasses\n'), ((1957, 1985), 'dataclasses.astuple', 'dataclasses.astuple', (['comment'], {}), '(comment)\n', (1976, 1985), False, 'import dataclasses\n'), ((1997, 2024), 'dataclasses.asdict', 'dataclasses.asdict', (['comment'], {}), '(comment)\n', (2015, 2024), False, 'import dataclasses\n'), ((2100, 2147), 'inspect.getmembers', 'inspect.getmembers', (['Comment', 'inspect.isfunction'], {}), '(Comment, inspect.isfunction)\n', (2118, 2147), False, 'import inspect\n')] |
yaosir0317/my_first | downloadMusic/main.py | 387fe21aa529bca1d08ed45e13269aca23dce251 | from enum import Enum
import requests
class MusicAPP(Enum):
qq = "qq"
wy = "netease"
PRE_URL = "http://www.musictool.top/"
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36"}
def get_music_list(name, app, page=1):
data = {"input": name, "filter": "name", "type": app, "page": page}
resp = requests.post(url=PRE_URL, headers=headers, data=data)
print(resp.text)
print(resp.json())
if __name__ == '__main__':
get_music_list("画", MusicAPP.qq)
| [((409, 463), 'requests.post', 'requests.post', ([], {'url': 'PRE_URL', 'headers': 'headers', 'data': 'data'}), '(url=PRE_URL, headers=headers, data=data)\n', (422, 463), False, 'import requests\n')] |
michelmarcondes/django-study-with-docker | app/api/serializers.py | 248e41db3f16a5d26662c5e93ebf32716a20195e | from rest_framework import serializers
from projects.models import Project, Tag, Review
from users.models import Profile
class ReviewSerializer(serializers.ModelSerializer):
class Meta:
model = Review
fields = '__all__'
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = '__all__'
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = '__all__'
class ProjectSerializer(serializers.ModelSerializer):
owner = ProfileSerializer(many=False)
tags = TagSerializer(many=True)
reviews = serializers.SerializerMethodField()
class Meta:
model = Project
fields = '__all__'
def get_reviews(self, obj):
reviews = obj.review_set.all()
serializer = ReviewSerializer(reviews, many=True)
return serializer.data | [((628, 663), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (661, 663), False, 'from rest_framework import serializers\n')] |
johan--/commcare-hq | corehq/apps/domain/views.py | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | import copy
import datetime
from decimal import Decimal
import logging
import uuid
import json
import cStringIO
from couchdbkit import ResourceNotFound
import dateutil
from django.core.paginator import Paginator
from django.views.generic import View
from django.db.models import Sum
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.shortcuts import redirect, render
from django.contrib import messages
from django.views.decorators.http import require_POST
from PIL import Image
from django.utils.translation import ugettext as _, ugettext_noop, ugettext_lazy
from corehq.const import USER_DATE_FORMAT
from custom.dhis2.forms import Dhis2SettingsForm
from custom.dhis2.models import Dhis2Settings
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.xml import V2
from corehq.apps.accounting.async_handlers import Select2BillingInfoHandler
from corehq.apps.accounting.invoicing import DomainWireInvoiceFactory
from corehq.apps.accounting.decorators import (
requires_privilege_with_fallback,
)
from corehq.apps.hqwebapp.tasks import send_mail_async
from corehq.apps.accounting.exceptions import (
NewSubscriptionError,
PaymentRequestError,
)
from corehq.apps.accounting.payment_handlers import (
BulkStripePaymentHandler,
CreditStripePaymentHandler,
InvoiceStripePaymentHandler,
)
from corehq.apps.accounting.subscription_changes import DomainDowngradeStatusHandler
from corehq.apps.accounting.forms import EnterprisePlanContactForm
from corehq.apps.accounting.utils import (
get_change_status, get_privileges, fmt_dollar_amount,
quantize_accounting_decimal, get_customer_cards,
)
from corehq.apps.hqwebapp.async_handler import AsyncHandlerMixin
from corehq.apps.smsbillables.async_handlers import SMSRatesAsyncHandler, SMSRatesSelect2AsyncHandler
from corehq.apps.smsbillables.forms import SMSRateCalculatorForm
from corehq.apps.users.models import DomainInvitation
from corehq.apps.fixtures.models import FixtureDataType
from corehq.toggles import NAMESPACE_DOMAIN, all_toggles, CAN_EDIT_EULA, TRANSFER_DOMAIN
from corehq.util.context_processors import get_domain_type
from dimagi.utils.couch.resource_conflict import retry_resource
from corehq import privileges, feature_previews
from django_prbac.utils import has_privilege
from corehq.apps.accounting.models import (
Subscription, CreditLine, SoftwareProductType, SubscriptionType,
DefaultProductPlan, SoftwarePlanEdition, BillingAccount,
BillingAccountType,
Invoice, BillingRecord, InvoicePdf, PaymentMethodType,
PaymentMethod, EntryPoint, WireInvoice, SoftwarePlanVisibility, FeatureType,
StripePaymentMethod,
)
from corehq.apps.accounting.usage import FeatureUsageCalculator
from corehq.apps.accounting.user_text import (
get_feature_name,
PricingTable,
DESC_BY_EDITION,
get_feature_recurring_interval,
)
from corehq.apps.hqwebapp.models import ProjectSettingsTab
from corehq.apps import receiverwrapper
from corehq.apps.domain.calculations import CALCS, CALC_FNS, CALC_ORDER, dom_calc
from corehq.apps.domain.decorators import (
domain_admin_required, login_required, require_superuser, login_and_domain_required
)
from corehq.apps.domain.forms import (
DomainGlobalSettingsForm, DomainMetadataForm, SnapshotSettingsForm,
SnapshotApplicationForm, DomainInternalForm, PrivacySecurityForm,
ConfirmNewSubscriptionForm, ProBonoForm, EditBillingAccountInfoForm,
ConfirmSubscriptionRenewalForm, SnapshotFixtureForm, TransferDomainForm,
SelectSubscriptionTypeForm, INTERNAL_SUBSCRIPTION_MANAGEMENT_FORMS)
from corehq.apps.domain.models import Domain, LICENSES, TransferDomainRequest
from corehq.apps.domain.utils import normalize_domain_name
from corehq.apps.hqwebapp.views import BaseSectionPageView, BasePageView, CRUDPaginatedViewMixin
from corehq.apps.orgs.models import Organization, OrgRequest, Team
from corehq.apps.domain.forms import ProjectSettingsForm
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.web import get_ip, json_response, get_site_domain
from corehq.apps.users.decorators import require_can_edit_web_users
from corehq.apps.receiverwrapper.forms import GenericRepeaterForm, FormRepeaterForm
from corehq.apps.receiverwrapper.models import FormRepeater, CaseRepeater, ShortFormRepeater, AppStructureRepeater, \
RepeatRecord
from dimagi.utils.post import simple_post
from toggle.models import Toggle
from corehq.apps.hqwebapp.tasks import send_html_email_async
accounting_logger = logging.getLogger('accounting')
PAYMENT_ERROR_MESSAGES = {
400: ugettext_lazy('Your request was not formatted properly.'),
403: ugettext_lazy('Forbidden.'),
404: ugettext_lazy('Page not found.'),
500: ugettext_lazy("There was an error processing your request."
" We're working quickly to fix the issue. Please try again shortly."),
}
# Domain not required here - we could be selecting it for the first time. See notes domain.decorators
# about why we need this custom login_required decorator
@login_required
def select(request, domain_select_template='domain/select.html', do_not_redirect=False):
domains_for_user = Domain.active_for_user(request.user)
if not domains_for_user:
return redirect('registration_domain', domain_type=get_domain_type(None, request))
email = request.couch_user.get_email()
open_invitations = [e for e in DomainInvitation.by_email(email) if not e.is_expired]
additional_context = {
'domains_for_user': domains_for_user,
'open_invitations': open_invitations,
}
last_visited_domain = request.session.get('last_visited_domain')
if open_invitations \
or do_not_redirect \
or not last_visited_domain:
return render(request, domain_select_template, additional_context)
else:
domain = Domain.get_by_name(last_visited_domain)
if domain and domain.is_active:
# mirrors logic in login_and_domain_required
if (
request.couch_user.is_member_of(domain) or domain.is_public
or (request.user.is_superuser and not domain.restrict_superusers)
or domain.is_snapshot
):
try:
from corehq.apps.dashboard.views import dashboard_default
return dashboard_default(request, last_visited_domain)
except Http404:
pass
del request.session['last_visited_domain']
return render(request, domain_select_template, additional_context)
@require_superuser
def incomplete_email(request,
incomplete_email_template='domain/incomplete_email.html'):
from corehq.apps.domain.tasks import (
incomplete_self_started_domains,
incomplete_domains_to_email
)
context = {
'self_started': incomplete_self_started_domains,
'dimagi_owned': incomplete_domains_to_email,
}
return render(request, incomplete_email_template, context)
class DomainViewMixin(object):
"""
Paving the way for a world of entirely class-based views.
Let's do this, guys. :-)
Set strict_domain_fetching to True in subclasses to bypass the cache.
"""
strict_domain_fetching = False
@property
@memoized
def domain(self):
domain = self.args[0] if len(self.args) > 0 else self.kwargs.get('domain', "")
return normalize_domain_name(domain)
@property
@memoized
def domain_object(self):
domain = Domain.get_by_name(self.domain, strict=self.strict_domain_fetching)
if not domain:
raise Http404()
return domain
class LoginAndDomainMixin(object):
@method_decorator(login_and_domain_required)
def dispatch(self, *args, **kwargs):
return super(LoginAndDomainMixin, self).dispatch(*args, **kwargs)
class SubscriptionUpgradeRequiredView(LoginAndDomainMixin, BasePageView,
DomainViewMixin):
page_title = ugettext_lazy("Upgrade Required")
template_name = "domain/insufficient_privilege_notification.html"
@property
def page_url(self):
return self.request.get_full_path
@property
def page_name(self):
return _("Sorry, you do not have access to %(feature_name)s") % {
'feature_name': self.feature_name,
}
@property
def is_domain_admin(self):
if not hasattr(self.request, 'couch_user'):
return False
return self.request.couch_user.is_domain_admin(self.domain)
@property
def page_context(self):
return {
'domain': self.domain,
'feature_name': self.feature_name,
'plan_name': self.required_plan_name,
'change_subscription_url': reverse(SelectPlanView.urlname,
args=[self.domain]),
'is_domain_admin': self.is_domain_admin,
}
@property
def missing_privilege(self):
return self.args[1]
@property
def feature_name(self):
return privileges.Titles.get_name_from_privilege(self.missing_privilege)
@property
def required_plan_name(self):
return DefaultProductPlan.get_lowest_edition_by_domain(
self.domain_object, [self.missing_privilege]
)
def get(self, request, *args, **kwargs):
self.request = request
self.args = args
return super(SubscriptionUpgradeRequiredView, self).get(
request, *args, **kwargs
)
class BaseDomainView(LoginAndDomainMixin, BaseSectionPageView, DomainViewMixin):
@property
def main_context(self):
main_context = super(BaseDomainView, self).main_context
main_context.update({
'domain': self.domain,
})
return main_context
@property
@memoized
def page_url(self):
if self.urlname:
return reverse(self.urlname, args=[self.domain])
class BaseProjectSettingsView(BaseDomainView):
section_name = ugettext_lazy("Project Settings")
template_name = "settings/base_template.html"
@property
def main_context(self):
main_context = super(BaseProjectSettingsView, self).main_context
main_context.update({
'active_tab': ProjectSettingsTab(
self.request,
self.urlname,
domain=self.domain,
couch_user=self.request.couch_user,
project=self.request.project
),
'is_project_settings': True,
})
return main_context
@property
@memoized
def section_url(self):
return reverse(EditMyProjectSettingsView.urlname, args=[self.domain])
class DefaultProjectSettingsView(BaseDomainView):
urlname = 'domain_settings_default'
def get(self, request, *args, **kwargs):
if request.couch_user.is_domain_admin(self.domain):
return HttpResponseRedirect(reverse(EditBasicProjectInfoView.urlname, args=[self.domain]))
return HttpResponseRedirect(reverse(EditMyProjectSettingsView.urlname, args=[self.domain]))
class BaseAdminProjectSettingsView(BaseProjectSettingsView):
"""
The base class for all project settings views that require administrative
access.
"""
@method_decorator(domain_admin_required)
def dispatch(self, request, *args, **kwargs):
return super(BaseProjectSettingsView, self).dispatch(request, *args, **kwargs)
class BaseEditProjectInfoView(BaseAdminProjectSettingsView):
"""
The base class for all the edit project information views.
"""
strict_domain_fetching = True
@property
def autocomplete_fields(self):
return []
@property
def main_context(self):
context = super(BaseEditProjectInfoView, self).main_context
context.update({
'autocomplete_fields': self.autocomplete_fields,
'commtrack_enabled': self.domain_object.commtrack_enabled,
# ideally the template gets access to the domain doc through
# some other means. otherwise it has to be supplied to every view reachable in that sidebar (every
# view whose template extends users_base.html); mike says he's refactoring all of this imminently, so
# i will not worry about it until he is done
'call_center_enabled': self.domain_object.call_center_config.enabled,
'cloudcare_releases': self.domain_object.cloudcare_releases,
})
return context
class EditBasicProjectInfoView(BaseEditProjectInfoView):
template_name = 'domain/admin/info_basic.html'
urlname = 'domain_basic_info'
page_title = ugettext_lazy("Basic")
@property
def can_user_see_meta(self):
return self.request.couch_user.is_previewer()
@property
def can_use_custom_logo(self):
return has_privilege(self.request, privileges.CUSTOM_BRANDING)
@property
@memoized
def basic_info_form(self):
initial = {
'hr_name': self.domain_object.hr_name or self.domain_object.name,
'default_timezone': self.domain_object.default_timezone,
'case_sharing': json.dumps(self.domain_object.case_sharing),
'call_center_enabled': self.domain_object.call_center_config.enabled,
'call_center_type': self.initial_call_center_type,
'call_center_case_owner': self.initial_call_center_case_owner,
'call_center_case_type': self.domain_object.call_center_config.case_type,
'commtrack_enabled': self.domain_object.commtrack_enabled,
}
if self.request.method == 'POST':
if self.can_user_see_meta:
return DomainMetadataForm(
self.request.POST,
self.request.FILES,
user=self.request.couch_user,
domain=self.domain_object.name,
can_use_custom_logo=self.can_use_custom_logo,
)
return DomainGlobalSettingsForm(
self.request.POST,
self.request.FILES,
domain=self.domain_object.name,
can_use_custom_logo=self.can_use_custom_logo
)
if self.can_user_see_meta:
initial.update({
'is_test': self.domain_object.is_test,
'cloudcare_releases': self.domain_object.cloudcare_releases,
})
return DomainMetadataForm(
can_use_custom_logo=self.can_use_custom_logo,
user=self.request.couch_user,
domain=self.domain_object.name,
initial=initial
)
return DomainGlobalSettingsForm(
initial=initial,
domain=self.domain_object.name,
can_use_custom_logo=self.can_use_custom_logo
)
@property
@memoized
def initial_call_center_case_owner(self):
config = self.domain_object.call_center_config
if config.use_user_location_as_owner:
return DomainGlobalSettingsForm.USE_LOCATIONS_CHOICE
return self.domain_object.call_center_config.case_owner_id
@property
@memoized
def initial_call_center_type(self):
if self.domain_object.call_center_config.use_fixtures:
return DomainGlobalSettingsForm.CASES_AND_FIXTURES_CHOICE
return DomainGlobalSettingsForm.CASES_ONLY_CHOICE
@property
def page_context(self):
return {
'basic_info_form': self.basic_info_form,
}
def post(self, request, *args, **kwargs):
if self.basic_info_form.is_valid():
if self.basic_info_form.save(request, self.domain_object):
messages.success(request, _("Project settings saved!"))
else:
messages.error(request, _("There seems to have been an error saving your settings. Please try again!"))
return self.get(request, *args, **kwargs)
class EditMyProjectSettingsView(BaseProjectSettingsView):
template_name = 'domain/admin/my_project_settings.html'
urlname = 'my_project_settings'
page_title = ugettext_lazy("My Timezone")
@property
@memoized
def my_project_settings_form(self):
initial = { 'global_timezone': self.domain_object.default_timezone }
if self.domain_membership:
initial.update({
'override_global_tz': self.domain_membership.override_global_tz,
'user_timezone': (self.domain_membership.timezone if self.domain_membership.override_global_tz
else self.domain_object.default_timezone),
})
else:
initial.update({
'override_global_tz': False,
'user_timezone': initial["global_timezone"],
})
if self.request.method == 'POST':
return ProjectSettingsForm(self.request.POST, initial=initial)
return ProjectSettingsForm(initial=initial)
@property
@memoized
def domain_membership(self):
return self.request.couch_user.get_domain_membership(self.domain)
@property
def page_context(self):
return {
'my_project_settings_form': self.my_project_settings_form,
'override_global_tz': self.domain_membership.override_global_tz if self.domain_membership else False,
'no_domain_membership': not self.domain_membership,
}
def post(self, request, *args, **kwargs):
if self.my_project_settings_form.is_valid():
self.my_project_settings_form.save(self.request.couch_user, self.domain)
messages.success(request, _("Your project settings have been saved!"))
return self.get(request, *args, **kwargs)
class EditDhis2SettingsView(BaseProjectSettingsView):
template_name = 'domain/admin/dhis2_settings.html'
urlname = 'dhis2_settings'
page_title = ugettext_lazy("DHIS2 API settings")
@property
@memoized
def dhis2_settings_form(self):
settings_ = Dhis2Settings.for_domain(self.domain_object.name)
initial = settings_.dhis2 if settings_ else {'enabled': False}
if self.request.method == 'POST':
return Dhis2SettingsForm(self.request.POST, initial=initial)
return Dhis2SettingsForm(initial=initial)
@property
def page_context(self):
return {
'dhis2_settings_form': self.dhis2_settings_form,
}
def post(self, request, *args, **kwargs):
if self.dhis2_settings_form.is_valid():
if self.dhis2_settings_form.save(self.domain_object):
messages.success(request, _('DHIS2 API settings successfully updated'))
else:
messages.error(request, _('There seems to have been an error. Please try again.'))
return self.get(request, *args, **kwargs)
@require_POST
@require_can_edit_web_users
def drop_repeater(request, domain, repeater_id):
rep = FormRepeater.get(repeater_id)
rep.retire()
messages.success(request, "Form forwarding stopped!")
return HttpResponseRedirect(reverse(DomainForwardingOptionsView.urlname, args=[domain]))
@require_POST
@require_can_edit_web_users
def test_repeater(request, domain):
url = request.POST["url"]
repeater_type = request.POST['repeater_type']
format = request.POST['format']
form = GenericRepeaterForm(
{"url": url, "format": format},
domain=domain,
repeater_class=receiverwrapper.models.repeater_types[repeater_type]
)
if form.is_valid():
url = form.cleaned_data["url"]
# now we fake a post
def _stub(repeater_type):
if 'case' in repeater_type.lower():
return CaseBlock(
case_id='test-case-%s' % uuid.uuid4().hex,
create=True,
case_type='test',
case_name='test case',
).as_string()
else:
return "<?xml version='1.0' ?><data id='test'><TestString>Test post from CommCareHQ on %s</TestString></data>" % \
(datetime.datetime.utcnow())
fake_post = _stub(repeater_type)
try:
resp = simple_post(fake_post, url)
if 200 <= resp.status < 300:
return HttpResponse(json.dumps({"success": True,
"response": resp.read(),
"status": resp.status}))
else:
return HttpResponse(json.dumps({"success": False,
"response": resp.read(),
"status": resp.status}))
except Exception, e:
errors = str(e)
return HttpResponse(json.dumps({"success": False, "response": errors}))
else:
return HttpResponse(json.dumps({"success": False, "response": "Please enter a valid url."}))
def autocomplete_fields(request, field):
prefix = request.GET.get('prefix', '')
results = Domain.field_by_prefix(field, prefix)
return HttpResponse(json.dumps(results))
def logo(request, domain):
logo = Domain.get_by_name(domain).get_custom_logo()
if logo is None:
raise Http404()
return HttpResponse(logo[0], content_type=logo[1])
class DomainAccountingSettings(BaseAdminProjectSettingsView):
@method_decorator(login_and_domain_required)
def dispatch(self, request, *args, **kwargs):
return super(DomainAccountingSettings, self).dispatch(request, *args, **kwargs)
@property
@memoized
def product(self):
return SoftwareProductType.get_type_by_domain(self.domain_object)
@property
@memoized
def account(self):
return BillingAccount.get_account_by_domain(self.domain)
@property
def current_subscription(self):
return Subscription.get_subscribed_plan_by_domain(self.domain_object)[1]
class DomainSubscriptionView(DomainAccountingSettings):
urlname = 'domain_subscription_view'
template_name = 'domain/current_subscription.html'
page_title = ugettext_lazy("Current Subscription")
@property
def can_purchase_credits(self):
return self.request.couch_user.is_domain_admin(self.domain)
@property
def plan(self):
plan_version, subscription = Subscription.get_subscribed_plan_by_domain(self.domain_object)
date_end = None
next_subscription = {
'exists': False,
'can_renew': False,
'name': None,
'price': None,
}
cards = None
general_credits = None
if subscription:
cards = get_customer_cards(self.account, self.request.user.username, self.domain)
date_end = (subscription.date_end.strftime(USER_DATE_FORMAT)
if subscription.date_end is not None else "--")
if subscription.date_end is not None:
if subscription.is_renewed:
next_product = self.get_product_summary(subscription.next_subscription.plan_version,
self.account,
subscription)
next_subscription.update({
'exists': True,
'date_start': subscription.next_subscription.date_start.strftime(USER_DATE_FORMAT),
'name': subscription.next_subscription.plan_version.plan.name,
'price': next_product['monthly_fee'],
})
else:
days_left = (subscription.date_end - datetime.date.today()).days
next_subscription.update({
'can_renew': days_left <= 30,
'renew_url': reverse(SubscriptionRenewalView.urlname, args=[self.domain]),
})
general_credits = CreditLine.get_credits_by_subscription_and_features(subscription)
elif self.account is not None:
general_credits = CreditLine.get_credits_for_account(self.account)
if general_credits:
general_credits = self._fmt_credit(self._credit_grand_total(general_credits))
info = {
'products': [self.get_product_summary(plan_version, self.account, subscription)],
'features': self.get_feature_summary(plan_version, self.account, subscription),
'general_credit': general_credits,
'css_class': "label-plan %s" % plan_version.plan.edition.lower(),
'do_not_invoice': subscription.do_not_invoice if subscription is not None else False,
'is_trial': subscription.is_trial if subscription is not None else False,
'date_start': (subscription.date_start.strftime(USER_DATE_FORMAT)
if subscription is not None else None),
'date_end': date_end,
'cards': cards,
'next_subscription': next_subscription,
}
info.update(plan_version.user_facing_description)
return info
def _fmt_credit(self, credit_amount=None):
if credit_amount is None:
return {
'amount': "--",
}
return {
'amount': fmt_dollar_amount(credit_amount),
'is_visible': credit_amount != Decimal('0.0'),
}
def _credit_grand_total(self, credit_lines):
return sum([c.balance for c in credit_lines]) if credit_lines else Decimal('0.00')
def get_product_summary(self, plan_version, account, subscription):
product_rates = plan_version.product_rates.all()
if len(product_rates) > 1:
# Models and UI are both written to support multiple products,
# but for now, each subscription can only have one product.
accounting_logger.error(
"[BILLING] "
"There seem to be multiple ACTIVE NEXT subscriptions for the subscriber %s. "
"Odd, right? The latest one by date_created was used, but consider this an issue."
% self.account
)
product_rate = product_rates[0]
product_info = {
'name': product_rate.product.product_type,
'monthly_fee': _("USD %s /month") % product_rate.monthly_fee,
'credit': None,
'type': product_rate.product.product_type,
}
credit_lines = None
if subscription is not None:
credit_lines = CreditLine.get_credits_by_subscription_and_features(
subscription, product_type=product_rate.product.product_type
)
elif account is not None:
credit_lines = CreditLine.get_credits_for_account(
account, product_type=product_rate.product.product_type
)
if credit_lines:
product_info['credit'] = self._fmt_credit(self._credit_grand_total(credit_lines))
return product_info
def get_feature_summary(self, plan_version, account, subscription):
feature_summary = []
for feature_rate in plan_version.feature_rates.all():
usage = FeatureUsageCalculator(feature_rate, self.domain).get_usage()
feature_info = {
'name': get_feature_name(feature_rate.feature.feature_type, self.product),
'usage': usage,
'remaining': (
feature_rate.monthly_limit - usage
if feature_rate.monthly_limit != -1
else _('Unlimited')
),
'credit': self._fmt_credit(),
'type': feature_rate.feature.feature_type,
'recurring_interval': get_feature_recurring_interval(feature_rate.feature.feature_type),
}
credit_lines = None
if subscription is not None:
credit_lines = CreditLine.get_credits_by_subscription_and_features(
subscription, feature_type=feature_rate.feature.feature_type
)
elif account is not None:
credit_lines = CreditLine.get_credits_for_account(
account, feature_type=feature_rate.feature.feature_type)
if credit_lines:
feature_info['credit'] = self._fmt_credit(self._credit_grand_total(credit_lines))
feature_summary.append(feature_info)
return feature_summary
@property
def page_context(self):
return {
'plan': self.plan,
'change_plan_url': reverse(SelectPlanView.urlname, args=[self.domain]),
'can_purchase_credits': self.can_purchase_credits,
'credit_card_url': reverse(CreditsStripePaymentView.urlname, args=[self.domain]),
'wire_url': reverse(CreditsWireInvoiceView.urlname, args=[self.domain]),
'stripe_public_key': settings.STRIPE_PUBLIC_KEY,
'payment_error_messages': PAYMENT_ERROR_MESSAGES,
'sms_rate_calc_url': reverse(SMSRatesView.urlname,
args=[self.domain]),
'user_email': self.request.couch_user.username,
}
class EditExistingBillingAccountView(DomainAccountingSettings, AsyncHandlerMixin):
template_name = 'domain/update_billing_contact_info.html'
urlname = 'domain_update_billing_info'
page_title = ugettext_lazy("Billing Information")
async_handlers = [
Select2BillingInfoHandler,
]
@property
@memoized
def billing_info_form(self):
if self.request.method == 'POST':
return EditBillingAccountInfoForm(
self.account, self.domain, self.request.couch_user.username, data=self.request.POST
)
return EditBillingAccountInfoForm(self.account, self.domain, self.request.couch_user.username)
def dispatch(self, request, *args, **kwargs):
if self.account is None:
raise Http404()
return super(EditExistingBillingAccountView, self).dispatch(request, *args, **kwargs)
@property
def page_context(self):
return {
'billing_account_info_form': self.billing_info_form,
'cards': self._get_cards(),
'stripe_public_key': settings.STRIPE_PUBLIC_KEY,
'card_base_url': reverse(CardsView.url_name, args=[self.domain]),
}
def _get_cards(self):
user = self.request.user.username
payment_method, new_payment_method = StripePaymentMethod.objects.get_or_create(
web_user=user,
method_type=PaymentMethodType.STRIPE,
)
return payment_method.all_cards_serialized(self.account)
def post(self, request, *args, **kwargs):
if self.async_response is not None:
return self.async_response
if self.billing_info_form.is_valid():
is_saved = self.billing_info_form.save()
if not is_saved:
messages.error(
request, _("It appears that there was an issue updating your contact information. "
"We've been notified of the issue. Please try submitting again, and if the problem "
"persists, please try in a few hours."))
else:
messages.success(
request, _("Billing contact information was successfully updated.")
)
return HttpResponseRedirect(reverse(EditExistingBillingAccountView.urlname, args=[self.domain]))
return self.get(request, *args, **kwargs)
class DomainBillingStatementsView(DomainAccountingSettings, CRUDPaginatedViewMixin):
template_name = 'domain/billing_statements.html'
urlname = 'domain_billing_statements'
page_title = ugettext_lazy("Billing Statements")
limit_text = ugettext_lazy("statements per page")
empty_notification = ugettext_lazy("No Billing Statements match the current criteria.")
loading_message = ugettext_lazy("Loading statements...")
@property
def parameters(self):
return self.request.POST if self.request.method == 'POST' else self.request.GET
@property
def stripe_cards(self):
return get_customer_cards(self.account, self.request.user.username, self.domain)
@property
def show_hidden(self):
if not self.request.user.is_superuser:
return False
return bool(self.request.POST.get('additionalData[show_hidden]'))
@property
def show_unpaid(self):
try:
return json.loads(self.request.POST.get('additionalData[show_unpaid]'))
except TypeError:
return False
@property
def invoices(self):
invoices = Invoice.objects.filter(subscription__subscriber__domain=self.domain)
if not self.show_hidden:
invoices = invoices.filter(is_hidden=False)
if self.show_unpaid:
invoices = invoices.filter(date_paid__exact=None)
return invoices.order_by('-date_start', '-date_end')
@property
def total(self):
return self.paginated_invoices.count
@property
@memoized
def paginated_invoices(self):
return Paginator(self.invoices, self.limit)
@property
def total_balance(self):
"""
Returns the total balance of unpaid, unhidden invoices.
Doesn't take into account the view settings on the page.
"""
invoices = (Invoice.objects
.filter(subscription__subscriber__domain=self.domain)
.filter(date_paid__exact=None)
.filter(is_hidden=False))
return invoices.aggregate(
total_balance=Sum('balance')
).get('total_balance') or 0.00
@property
def column_names(self):
return [
_("Statement No."),
_("Plan"),
_("Billing Period"),
_("Date Due"),
_("Payment Status"),
_("PDF"),
]
@property
def page_context(self):
pagination_context = self.pagination_context
pagination_context.update({
'stripe_public_key': settings.STRIPE_PUBLIC_KEY,
'payment_error_messages': PAYMENT_ERROR_MESSAGES,
'process_invoice_payment_url': reverse(
InvoiceStripePaymentView.urlname,
args=[self.domain],
),
'process_bulk_payment_url': reverse(
BulkStripePaymentView.urlname,
args=[self.domain],
),
'process_wire_invoice_url': reverse(
WireInvoiceView.urlname,
args=[self.domain],
),
'stripe_cards': self.stripe_cards,
'total_balance': self.total_balance,
})
return pagination_context
@property
def can_pay_invoices(self):
return self.request.couch_user.is_domain_admin(self.domain)
@property
def paginated_list(self):
for invoice in self.paginated_invoices.page(self.page).object_list:
try:
last_billing_record = BillingRecord.objects.filter(
invoice=invoice
).latest('date_created')
if invoice.is_paid:
payment_status = (_("Paid on %s.")
% invoice.date_paid.strftime(USER_DATE_FORMAT))
payment_class = "label label-inverse"
else:
payment_status = _("Not Paid")
payment_class = "label label-important"
date_due = (
(invoice.date_due.strftime(USER_DATE_FORMAT)
if not invoice.is_paid else _("Already Paid"))
if invoice.date_due else _("None")
)
yield {
'itemData': {
'id': invoice.id,
'invoice_number': invoice.invoice_number,
'start': invoice.date_start.strftime(USER_DATE_FORMAT),
'end': invoice.date_end.strftime(USER_DATE_FORMAT),
'plan': invoice.subscription.plan_version.user_facing_description,
'payment_status': payment_status,
'payment_class': payment_class,
'date_due': date_due,
'pdfUrl': reverse(
BillingStatementPdfView.urlname,
args=[self.domain, last_billing_record.pdf_data_id]
),
'canMakePayment': (not invoice.is_paid
and self.can_pay_invoices),
'balance': "%s" % quantize_accounting_decimal(invoice.balance),
},
'template': 'statement-row-template',
}
except BillingRecord.DoesNotExist:
logging.error(
"An invoice was generated for %(invoice_id)d "
"(domain: %(domain)s), but no billing record!" % {
'invoice_id': invoice.id,
'domain': self.domain,
})
def refresh_item(self, item_id):
pass
def post(self, *args, **kwargs):
return self.paginate_crud_response
def dispatch(self, request, *args, **kwargs):
if self.account is None:
raise Http404()
return super(DomainBillingStatementsView, self).dispatch(request, *args, **kwargs)
class BaseStripePaymentView(DomainAccountingSettings):
http_method_names = ['post']
@property
def account(self):
raise NotImplementedError("you must impmement the property account")
@property
@memoized
def domain_admin(self):
if self.request.couch_user.is_domain_admin(self.domain):
return self.request.couch_user.username
else:
raise PaymentRequestError(
"The logged in user was not a domain admin."
)
def get_or_create_payment_method(self):
return StripePaymentMethod.objects.get_or_create(
web_user=self.domain_admin,
method_type=PaymentMethodType.STRIPE,
)[0]
def get_payment_handler(self):
"""Returns a StripePaymentHandler object
"""
raise NotImplementedError("You must impmenent get_payment_handler()")
def post(self, request, *args, **kwargs):
try:
payment_handler = self.get_payment_handler()
response = payment_handler.process_request(request)
except PaymentRequestError as e:
accounting_logger.error(
"[BILLING] Failed to process Stripe Payment due to bad "
"request for domain %(domain)s user %(web_user)s: "
"%(error)s" % {
'domain': self.domain,
'web_user': self.request.user.username,
'error': e,
}
)
response = {
'error': {
'message': _(
"There was an issue processing your payment. No "
"charges were made. We're looking into the issue "
"as quickly as possible. Sorry for the inconvenience."
)
}
}
return json_response(response)
class CreditsStripePaymentView(BaseStripePaymentView):
urlname = 'domain_credits_payment'
@property
@memoized
def account(self):
return BillingAccount.get_or_create_account_by_domain(
self.domain,
created_by=self.request.user.username,
account_type=BillingAccountType.USER_CREATED,
entry_point=EntryPoint.SELF_STARTED,
)[0]
def get_payment_handler(self):
return CreditStripePaymentHandler(
self.get_or_create_payment_method(),
self.domain,
self.account,
subscription=Subscription.get_subscribed_plan_by_domain(self.domain_object)[1],
post_data=self.request.POST.copy(),
)
class CreditsWireInvoiceView(DomainAccountingSettings):
http_method_names = ['post']
urlname = 'domain_wire_payment'
@method_decorator(login_and_domain_required)
def dispatch(self, request, *args, **kwargs):
return super(CreditsWireInvoiceView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
emails = request.POST.get('emails', []).split()
amount = Decimal(request.POST.get('amount', 0))
wire_invoice_factory = DomainWireInvoiceFactory(request.domain, contact_emails=emails)
try:
wire_invoice_factory.create_wire_credits_invoice(self._get_items(request), amount)
except Exception as e:
return json_response({'error': {'message': str(e)}})
return json_response({'success': True})
def _get_items(self, request):
product_type = SoftwareProductType.get_type_by_domain(Domain.get_by_name(self.domain))
features = [{'type': get_feature_name(feature_type[0], product_type),
'amount': Decimal(request.POST.get(feature_type[0], 0))}
for feature_type in FeatureType.CHOICES
if Decimal(request.POST.get(feature_type[0], 0)) > 0]
products = [{'type': pt[0],
'amount': Decimal(request.POST.get(pt[0], 0))}
for pt in SoftwareProductType.CHOICES
if Decimal(request.POST.get(pt[0], 0)) > 0]
return products + features
class InvoiceStripePaymentView(BaseStripePaymentView):
urlname = 'domain_invoice_payment'
@property
@memoized
def invoice(self):
try:
invoice_id = self.request.POST['invoice_id']
except IndexError:
raise PaymentRequestError("invoice_id is required")
try:
return Invoice.objects.get(pk=invoice_id)
except Invoice.DoesNotExist:
raise PaymentRequestError(
"Could not find a matching invoice for invoice_id '%s'"
% invoice_id
)
@property
def account(self):
return self.invoice.subscription.account
def get_payment_handler(self):
return InvoiceStripePaymentHandler(
self.get_or_create_payment_method(), self.domain, self.invoice
)
class BulkStripePaymentView(BaseStripePaymentView):
urlname = 'domain_bulk_payment'
@property
def account(self):
return BillingAccount.get_account_by_domain(self.domain)
def get_payment_handler(self):
return BulkStripePaymentHandler(
self.get_or_create_payment_method(), self.domain
)
class WireInvoiceView(View):
http_method_names = ['post']
urlname = 'domain_wire_invoice'
@method_decorator(login_and_domain_required)
@method_decorator(domain_admin_required)
def dispatch(self, request, *args, **kwargs):
return super(WireInvoiceView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
emails = request.POST.get('emails', []).split()
balance = Decimal(request.POST.get('customPaymentAmount', 0))
wire_invoice_factory = DomainWireInvoiceFactory(request.domain, contact_emails=emails)
try:
wire_invoice_factory.create_wire_invoice(balance)
except Exception, e:
return json_response({'error': {'message', e}})
return json_response({'success': True})
class BillingStatementPdfView(View):
urlname = 'domain_billing_statement_download'
@method_decorator(login_and_domain_required)
@method_decorator(domain_admin_required)
def dispatch(self, request, *args, **kwargs):
return super(BillingStatementPdfView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
domain = args[0]
statement_id = kwargs.get('statement_id')
if statement_id is None or domain is None:
raise Http404()
try:
invoice_pdf = InvoicePdf.get(statement_id)
except ResourceNotFound:
raise Http404()
try:
if invoice_pdf.is_wire:
invoice = WireInvoice.objects.get(
pk=invoice_pdf.invoice_id,
domain=domain
)
else:
invoice = Invoice.objects.get(
pk=invoice_pdf.invoice_id,
subscription__subscriber__domain=domain
)
except (Invoice.DoesNotExist, WireInvoice.DoesNotExist):
raise Http404()
if invoice.is_wire:
edition = 'Bulk'
else:
edition = DESC_BY_EDITION[invoice.subscription.plan_version.plan.edition]['name']
filename = "%(pdf_id)s_%(domain)s_%(edition)s_%(filename)s" % {
'pdf_id': invoice_pdf._id,
'domain': domain,
'edition': edition,
'filename': invoice_pdf.get_filename(invoice),
}
try:
data = invoice_pdf.get_data(invoice)
response = HttpResponse(data, content_type='application/pdf')
response['Content-Disposition'] = 'inline;filename="%s' % filename
except Exception as e:
logging.error('[Billing] Fetching invoice PDF failed: %s' % e)
return HttpResponse(_("Could not obtain billing statement. "
"An issue has been submitted."))
return response
class InternalSubscriptionManagementView(BaseAdminProjectSettingsView):
template_name = 'domain/internal_subscription_management.html'
urlname = 'internal_subscription_mgmt'
page_title = ugettext_lazy("Dimagi Internal Subscription Management")
form_classes = INTERNAL_SUBSCRIPTION_MANAGEMENT_FORMS
@method_decorator(require_superuser)
def get(self, request, *args, **kwargs):
return super(InternalSubscriptionManagementView, self).get(request, *args, **kwargs)
@method_decorator(require_superuser)
def post(self, request, *args, **kwargs):
form = self.get_post_form
if form.is_valid():
try:
form.process_subscription_management()
return HttpResponseRedirect(reverse(DomainSubscriptionView.urlname, args=[self.domain]))
except NewSubscriptionError as e:
messages.error(self.request, e.message)
return self.get(request, *args, **kwargs)
@property
def page_context(self):
return {
'plan_name': Subscription.get_subscribed_plan_by_domain(self.domain)[0],
'select_subscription_type_form': self.select_subscription_type_form,
'subscription_management_forms': self.slug_to_form.values(),
'today': datetime.date.today(),
}
@property
def get_post_form(self):
return self.slug_to_form[self.request.POST.get('slug')]
@property
@memoized
def slug_to_form(self):
def create_form(form_class):
if self.request.method == 'POST' and form_class.slug == self.request.POST.get('slug'):
return form_class(self.domain, self.request.couch_user.username, self.request.POST)
return form_class(self.domain, self.request.couch_user.username)
return {form_class.slug: create_form(form_class) for form_class in self.form_classes}
@property
@memoized
def select_subscription_type_form(self):
if self.request.method == 'POST':
for form_slug in self.slug_to_form:
if form_slug in self.request.POST:
return SelectSubscriptionTypeForm({
'subscription_type': form_slug,
})
subscription_type = None
subscription = Subscription.get_subscribed_plan_by_domain(self.domain_object)[1]
if subscription is None:
subscription_type = None
else:
plan = subscription.plan_version.plan
if subscription.service_type == SubscriptionType.CONTRACTED:
subscription_type = "contracted_partner"
elif plan.edition == SoftwarePlanEdition.ENTERPRISE:
subscription_type = "dimagi_only_enterprise"
elif (plan.edition == SoftwarePlanEdition.ADVANCED
and plan.visibility == SoftwarePlanVisibility.TRIAL_INTERNAL):
subscription_type = "advanced_extended_trial"
return SelectSubscriptionTypeForm({'subscription_type': subscription_type})
class SelectPlanView(DomainAccountingSettings):
template_name = 'domain/select_plan.html'
urlname = 'domain_select_plan'
page_title = ugettext_lazy("Change Plan")
step_title = ugettext_lazy("Select Plan")
edition = None
lead_text = ugettext_lazy("Please select a plan below that fits your organization's needs.")
@property
def edition_name(self):
if self.edition:
return DESC_BY_EDITION[self.edition]['name']
@property
def is_non_ops_superuser(self):
if not self.request.couch_user.is_superuser:
return False
return not has_privilege(self.request, privileges.ACCOUNTING_ADMIN)
@property
def parent_pages(self):
return [
{
'title': DomainSubscriptionView.page_title,
'url': reverse(DomainSubscriptionView.urlname, args=[self.domain]),
}
]
@property
def steps(self):
edition_name = u" (%s)" % self.edition_name if self.edition_name else ""
return [
{
'title': _(u"1. Select a Plan%(edition_name)s") % {
"edition_name": edition_name
},
'url': reverse(SelectPlanView.urlname, args=[self.domain]),
}
]
@property
def main_context(self):
context = super(SelectPlanView, self).main_context
context.update({
'steps': self.steps,
'step_title': self.step_title,
'lead_text': self.lead_text,
})
return context
@property
def page_context(self):
return {
'pricing_table': PricingTable.get_table_by_product(self.product, domain=self.domain),
'current_edition': (self.current_subscription.plan_version.plan.edition.lower()
if self.current_subscription is not None
and not self.current_subscription.is_trial
else ""),
'is_non_ops_superuser': self.is_non_ops_superuser,
}
class EditPrivacySecurityView(BaseAdminProjectSettingsView):
template_name = "domain/admin/project_privacy.html"
urlname = "privacy_info"
page_title = ugettext_lazy("Privacy and Security")
@property
@memoized
def privacy_form(self):
initial = {
"secure_submissions": self.domain_object.secure_submissions,
"restrict_superusers": self.domain_object.restrict_superusers,
"allow_domain_requests": self.domain_object.allow_domain_requests,
}
if self.request.method == 'POST':
return PrivacySecurityForm(self.request.POST, initial=initial)
return PrivacySecurityForm(initial=initial)
@property
def page_context(self):
return {
'privacy_form': self.privacy_form
}
def post(self, request, *args, **kwargs):
if self.privacy_form.is_valid():
self.privacy_form.save(self.domain_object)
messages.success(request, _("Your project settings have been saved!"))
return self.get(request, *args, **kwargs)
class SelectedEnterprisePlanView(SelectPlanView):
template_name = 'domain/selected_enterprise_plan.html'
urlname = 'enterprise_request_quote'
step_title = ugettext_lazy("Contact Dimagi")
edition = SoftwarePlanEdition.ENTERPRISE
@property
def steps(self):
last_steps = super(SelectedEnterprisePlanView, self).steps
last_steps.append({
'title': _("2. Contact Dimagi"),
'url': reverse(SelectedEnterprisePlanView.urlname, args=[self.domain]),
})
return last_steps
@property
@memoized
def is_not_redirect(self):
return not 'plan_edition' in self.request.POST
@property
@memoized
def enterprise_contact_form(self):
if self.request.method == 'POST' and self.is_not_redirect:
return EnterprisePlanContactForm(self.domain, self.request.couch_user, data=self.request.POST)
return EnterprisePlanContactForm(self.domain, self.request.couch_user)
@property
def page_context(self):
return {
'enterprise_contact_form': self.enterprise_contact_form,
}
def post(self, request, *args, **kwargs):
if self.is_not_redirect and self.enterprise_contact_form.is_valid():
self.enterprise_contact_form.send_message()
messages.success(request, _("Your request was sent to Dimagi. "
"We will try our best to follow up in a timely manner."))
return HttpResponseRedirect(reverse(DomainSubscriptionView.urlname, args=[self.domain]))
return self.get(request, *args, **kwargs)
class ConfirmSelectedPlanView(SelectPlanView):
template_name = 'domain/confirm_plan.html'
urlname = 'confirm_selected_plan'
step_title = ugettext_lazy("Confirm Plan")
@property
def steps(self):
last_steps = super(ConfirmSelectedPlanView, self).steps
last_steps.append({
'title': _("2. Confirm Plan"),
'url': reverse(SelectPlanView.urlname, args=[self.domain]),
})
return last_steps
@property
@memoized
def edition(self):
edition = self.request.POST.get('plan_edition').title()
if edition not in [e[0] for e in SoftwarePlanEdition.CHOICES]:
raise Http404()
return edition
@property
@memoized
def selected_plan_version(self):
return DefaultProductPlan.get_default_plan_by_domain(self.domain, self.edition).plan.get_version()
@property
def downgrade_messages(self):
current_plan_version, subscription = Subscription.get_subscribed_plan_by_domain(self.domain_object)
if subscription is None:
current_plan_version = None
downgrades = get_change_status(current_plan_version, self.selected_plan_version)[1]
downgrade_handler = DomainDowngradeStatusHandler(
self.domain_object, self.selected_plan_version, downgrades,
web_user=self.request.user.username
)
return downgrade_handler.get_response()
@property
def page_context(self):
return {
'downgrade_messages': self.downgrade_messages,
'current_plan': (self.current_subscription.plan_version.user_facing_description
if self.current_subscription is not None else None),
'show_community_notice': (self.edition == SoftwarePlanEdition.COMMUNITY
and self.current_subscription is None),
}
@property
def main_context(self):
context = super(ConfirmSelectedPlanView, self).main_context
context.update({
'plan': self.selected_plan_version.user_facing_description,
})
return context
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(reverse(SelectPlanView.urlname, args=[self.domain]))
def post(self, request, *args, **kwargs):
if self.edition == SoftwarePlanEdition.ENTERPRISE and not self.request.couch_user.is_superuser:
return HttpResponseRedirect(reverse(SelectedEnterprisePlanView.urlname, args=[self.domain]))
return super(ConfirmSelectedPlanView, self).get(request, *args, **kwargs)
class ConfirmBillingAccountInfoView(ConfirmSelectedPlanView, AsyncHandlerMixin):
template_name = 'domain/confirm_billing_info.html'
urlname = 'confirm_billing_account_info'
step_title = ugettext_lazy("Confirm Billing Information")
is_new = False
async_handlers = [
Select2BillingInfoHandler,
]
@property
def steps(self):
last_steps = super(ConfirmBillingAccountInfoView, self).steps
last_steps.append({
'title': _("3. Confirm Billing Account"),
'url': reverse(ConfirmBillingAccountInfoView.urlname, args=[self.domain]),
})
return last_steps
@property
@memoized
def account(self):
if self.current_subscription:
return self.current_subscription.account
account, self.is_new = BillingAccount.get_or_create_account_by_domain(
self.domain,
created_by=self.request.couch_user.username,
account_type=BillingAccountType.USER_CREATED,
entry_point=EntryPoint.SELF_STARTED,
)
return account
@property
def payment_method(self):
user = self.request.user.username
payment_method, __ = StripePaymentMethod.objects.get_or_create(
web_user=user,
method_type=PaymentMethodType.STRIPE,
)
return payment_method
@property
@memoized
def is_form_post(self):
return 'company_name' in self.request.POST
@property
@memoized
def billing_account_info_form(self):
initial = None
if self.edition == SoftwarePlanEdition.ENTERPRISE and self.request.couch_user.is_superuser:
initial = {
'company_name': "Dimagi",
'first_line': "585 Massachusetts Ave",
'second_line': "Suite 4",
'city': "Cambridge",
'state_province_region': "MA",
'postal_code': "02139",
'country': "US",
}
if self.request.method == 'POST' and self.is_form_post:
return ConfirmNewSubscriptionForm(
self.account, self.domain, self.request.couch_user.username,
self.selected_plan_version, self.current_subscription, data=self.request.POST, initial=initial
)
return ConfirmNewSubscriptionForm(self.account, self.domain, self.request.couch_user.username,
self.selected_plan_version, self.current_subscription, initial=initial)
@property
def page_context(self):
return {
'billing_account_info_form': self.billing_account_info_form,
'stripe_public_key': settings.STRIPE_PUBLIC_KEY,
'cards': self.payment_method.all_cards_serialized(self.account)
}
def post(self, request, *args, **kwargs):
if self.async_response is not None:
return self.async_response
if self.edition == SoftwarePlanEdition.ENTERPRISE and not self.request.couch_user.is_superuser:
return HttpResponseRedirect(reverse(SelectedEnterprisePlanView.urlname, args=[self.domain]))
if self.is_form_post and self.billing_account_info_form.is_valid():
is_saved = self.billing_account_info_form.save()
software_plan_name = DESC_BY_EDITION[self.selected_plan_version.plan.edition]['name'].encode('utf-8')
if not is_saved:
messages.error(
request, _("It appears there was an issue subscribing your project to the %s Software Plan. You "
"may try resubmitting, but if that doesn't work, rest assured someone will be "
"contacting you shortly.") % software_plan_name)
else:
messages.success(
request, _("Your project has been successfully subscribed to the %s Software Plan."
% software_plan_name)
)
return HttpResponseRedirect(reverse(DomainSubscriptionView.urlname, args=[self.domain]))
return super(ConfirmBillingAccountInfoView, self).post(request, *args, **kwargs)
class SubscriptionMixin(object):
@property
@memoized
def subscription(self):
subscription = Subscription.get_subscribed_plan_by_domain(self.domain_object)[1]
if subscription is None:
raise Http404
if subscription.is_renewed:
raise Http404
return subscription
class SubscriptionRenewalView(SelectPlanView, SubscriptionMixin):
urlname = "domain_subscription_renewal"
page_title = ugettext_lazy("Renew Plan")
step_title = ugettext_lazy("Renew or Change Plan")
@property
def lead_text(self):
return ugettext_lazy("Based on your current usage we recommend you use the <strong>{plan}</strong> plan"
.format(plan=self.current_subscription.plan_version.plan.edition))
@property
def main_context(self):
context = super(SubscriptionRenewalView, self).main_context
context.update({'is_renewal': True})
return context
@property
def page_context(self):
context = super(SubscriptionRenewalView, self).page_context
current_privs = get_privileges(self.subscription.plan_version)
plan = DefaultProductPlan.get_lowest_edition_by_domain(
self.domain, current_privs, return_plan=False,
).lower()
context['current_edition'] = (plan
if self.current_subscription is not None
and not self.current_subscription.is_trial
else "")
return context
class ConfirmSubscriptionRenewalView(DomainAccountingSettings, AsyncHandlerMixin, SubscriptionMixin):
template_name = 'domain/confirm_subscription_renewal.html'
urlname = 'domain_subscription_renewal_confirmation'
page_title = ugettext_lazy("Renew Plan")
async_handlers = [
Select2BillingInfoHandler,
]
@property
@memoized
def next_plan_version(self):
new_edition = self.request.POST.get('plan_edition').title()
plan_version = DefaultProductPlan.get_default_plan_by_domain(self.domain, new_edition)
if plan_version is None:
logging.error("[BILLING] Could not find a matching renewable plan "
"for %(domain)s, subscription number %(sub_pk)s." % {
'domain': self.domain,
'sub_pk': self.subscription.pk
})
raise Http404
return plan_version
@property
@memoized
def confirm_form(self):
if self.request.method == 'POST' and "from_plan_page" not in self.request.POST:
return ConfirmSubscriptionRenewalForm(
self.account, self.domain, self.request.couch_user.username,
self.subscription, self.next_plan_version,
data=self.request.POST,
)
return ConfirmSubscriptionRenewalForm(
self.account, self.domain, self.request.couch_user.username,
self.subscription, self.next_plan_version,
)
@property
def page_context(self):
return {
'subscription': self.subscription,
'plan': self.subscription.plan_version.user_facing_description,
'confirm_form': self.confirm_form,
'next_plan': self.next_plan_version.user_facing_description,
}
def post(self, request, *args, **kwargs):
if self.async_response is not None:
return self.async_response
if self.confirm_form.is_valid():
is_saved = self.confirm_form.save()
if not is_saved:
messages.error(
request, _(
"There was an issue renewing your subscription. We "
"have been notified of the issue. Please try "
"submitting again, and if the problem persists, "
"please try in a few hours."
)
)
else:
messages.success(
request, _("Your subscription was successfully renewed!")
)
return HttpResponseRedirect(
reverse(DomainSubscriptionView.urlname, args=[self.domain])
)
return self.get(request, *args, **kwargs)
class ExchangeSnapshotsView(BaseAdminProjectSettingsView):
template_name = 'domain/snapshot_settings.html'
urlname = 'domain_snapshot_settings'
page_title = ugettext_lazy("CommCare Exchange")
@property
def page_context(self):
return {
'project': self.domain_object,
'snapshots': list(self.domain_object.snapshots()),
'published_snapshot': self.domain_object.published_snapshot(),
}
class CreateNewExchangeSnapshotView(BaseAdminProjectSettingsView):
template_name = 'domain/create_snapshot.html'
urlname = 'domain_create_snapshot'
page_title = ugettext_lazy("Publish New Version")
strict_domain_fetching = True
@property
def parent_pages(self):
return [{
'title': ExchangeSnapshotsView.page_title,
'url': reverse(ExchangeSnapshotsView.urlname, args=[self.domain]),
}]
@property
def page_context(self):
context = {
'form': self.snapshot_settings_form,
'app_forms': self.app_forms,
'fixture_forms': self.fixture_forms,
'can_publish_as_org': self.can_publish_as_org,
'autocomplete_fields': ('project_type', 'phone_model', 'user_type', 'city', 'countries', 'region'),
}
if self.published_snapshot:
context.update({
'published_as_org': self.published_snapshot.publisher == 'organization',
'author': self.published_snapshot.author,
})
elif self.request.method == 'POST':
context.update({
'published_as_org': self.request.POST.get('publisher', '') == 'organization',
'author': self.request.POST.get('author', '')
})
return context
@property
def can_publish_as_org(self):
return (self.domain_object.get_organization()
and self.request.couch_user.is_org_admin(self.domain_object.get_organization().name))
@property
@memoized
def snapshots(self):
return list(self.domain_object.snapshots())
@property
@memoized
def published_snapshot(self):
return self.snapshots[0] if self.snapshots else self.domain_object
@property
@memoized
def published_apps(self):
published_apps = {}
if self.published_snapshot:
for app in self.published_snapshot.full_applications():
base_app_id = app.copy_of if self.domain_object == self.published_snapshot else app.copied_from.copy_of
if base_app_id:
published_apps[base_app_id] = app
return published_apps
@property
def app_forms(self):
app_forms = []
for app in self.domain_object.applications():
if self.request.method == 'POST':
app_forms.append((app, SnapshotApplicationForm(self.request.POST, prefix=app.id)))
elif self.published_snapshot and app.copy_of in self.published_apps:
original = self.published_apps[app.copy_of]
app_forms.append((app, SnapshotApplicationForm(initial={
'publish': True,
'name': original.name,
'description': original.description,
'deployment_date': original.deployment_date,
'user_type': original.user_type,
'attribution_notes': original.attribution_notes,
'phone_model': original.phone_model,
}, prefix=app.id)))
else:
app_forms.append((app,
SnapshotApplicationForm(
initial={
'publish': (self.published_snapshot is None
or self.published_snapshot == self.domain_object)
}, prefix=app.id)))
return app_forms
@property
@memoized
def published_fixtures(self):
return [f.copy_from for f in FixtureDataType.by_domain(self.published_snapshot._id)]
@property
def fixture_forms(self):
fixture_forms = []
for fixture in FixtureDataType.by_domain(self.domain_object.name):
fixture.id = fixture._id
if self.request.method == 'POST':
fixture_forms.append((fixture,
SnapshotFixtureForm(self.request.POST, prefix=fixture._id)))
else:
fixture_forms.append((fixture,
SnapshotFixtureForm(
initial={
'publish': (self.published_snapshot == self.domain_object
or fixture._id in self.published_fixtures)
}, prefix=fixture._id)))
return fixture_forms
@property
@memoized
def snapshot_settings_form(self):
if self.request.method == 'POST':
form = SnapshotSettingsForm(self.request.POST,
self.request.FILES,
domain=self.domain_object,
is_superuser=self.request.user.is_superuser)
return form
proj = self.published_snapshot if self.published_snapshot else self.domain_object
initial = {
'case_sharing': json.dumps(proj.case_sharing),
'publish_on_submit': True,
'share_multimedia': self.published_snapshot.multimedia_included if self.published_snapshot else True,
}
init_attribs = ['default_timezone', 'project_type', 'license']
if self.published_snapshot:
init_attribs.extend(['title', 'description', 'short_description'])
if self.published_snapshot.yt_id:
initial['video'] = 'http://www.youtube.com/watch?v=%s' % self.published_snapshot.yt_id
for attr in init_attribs:
initial[attr] = getattr(proj, attr)
return SnapshotSettingsForm(initial=initial,
domain=self.domain_object,
is_superuser=self.request.user.is_superuser)
@property
@memoized
def has_published_apps(self):
for app in self.domain_object.applications():
if self.request.POST.get("%s-publish" % app.id, False):
return True
messages.error(self.request, _("Cannot publish a project without applications to CommCare Exchange"))
return False
@property
def has_signed_eula(self):
eula_signed = self.request.couch_user.is_eula_signed()
if not eula_signed:
messages.error(self.request, _("You must agree to our eula to publish a project to Exchange"))
return eula_signed
@property
def has_valid_form(self):
is_valid = self.snapshot_settings_form.is_valid()
if not is_valid:
messages.error(self.request, _("There are some problems with your form. "
"Please address these issues and try again."))
return is_valid
def post(self, request, *args, **kwargs):
if self.has_published_apps and self.has_signed_eula and self.has_valid_form:
new_license = request.POST['license']
if request.POST.get('share_multimedia', False):
app_ids = self.snapshot_settings_form._get_apps_to_publish()
media = self.domain_object.all_media(from_apps=app_ids)
for m_file in media:
if self.domain not in m_file.shared_by:
m_file.shared_by.append(self.domain)
# set the license of every multimedia file that doesn't yet have a license set
if not m_file.license:
m_file.update_or_add_license(self.domain, type=new_license, should_save=False)
m_file.save()
if not request.POST.get('share_reminders', False):
share_reminders = False
else:
share_reminders = True
copy_by_id = set()
for k in request.POST.keys():
if k.endswith("-publish"):
copy_by_id.add(k[:-len("-publish")])
old = self.domain_object.published_snapshot()
new_domain = self.domain_object.save_snapshot(
share_reminders=share_reminders, copy_by_id=copy_by_id)
new_domain.license = new_license
new_domain.description = request.POST['description']
new_domain.short_description = request.POST['short_description']
new_domain.project_type = request.POST['project_type']
new_domain.title = request.POST['title']
new_domain.multimedia_included = request.POST.get('share_multimedia', '') == 'on'
new_domain.publisher = request.POST.get('publisher', None) or 'user'
if request.POST.get('video'):
new_domain.yt_id = self.snapshot_settings_form.cleaned_data['video']
new_domain.author = request.POST.get('author', None)
new_domain.is_approved = False
new_domain.is_starter_app = request.POST.get('is_starter_app', '') == 'on'
publish_on_submit = request.POST.get('publish_on_submit', "no") == "yes"
image = self.snapshot_settings_form.cleaned_data['image']
if image:
new_domain.image_path = image.name
new_domain.image_type = image.content_type
elif request.POST.get('old_image', False):
new_domain.image_path = old.image_path
new_domain.image_type = old.image_type
new_domain.save()
documentation_file = self.snapshot_settings_form.cleaned_data['documentation_file']
if documentation_file:
new_domain.documentation_file_path = documentation_file.name
new_domain.documentation_file_type = documentation_file.content_type
elif request.POST.get('old_documentation_file', False):
new_domain.documentation_file_path = old.documentation_file_path
new_domain.documentation_file_type = old.documentation_file_type
new_domain.save()
if publish_on_submit:
_publish_snapshot(request, self.domain_object, published_snapshot=new_domain)
else:
new_domain.published = False
new_domain.save()
if image:
im = Image.open(image)
out = cStringIO.StringIO()
im.thumbnail((200, 200), Image.ANTIALIAS)
im.save(out, new_domain.image_type.split('/')[-1])
new_domain.put_attachment(content=out.getvalue(), name=image.name)
elif request.POST.get('old_image', False):
new_domain.put_attachment(content=old.fetch_attachment(old.image_path), name=new_domain.image_path)
if documentation_file:
new_domain.put_attachment(content=documentation_file, name=documentation_file.name)
elif request.POST.get('old_documentation_file', False):
new_domain.put_attachment(content=old.fetch_attachment(old.documentation_file_path),
name=new_domain.documentation_file_path)
for application in new_domain.full_applications():
original_id = application.copied_from._id
name_field = "%s-name" % original_id
if name_field not in request.POST:
continue
application.name = request.POST[name_field]
application.description = request.POST["%s-description" % original_id]
date_picked = request.POST["%s-deployment_date" % original_id]
try:
date_picked = dateutil.parser.parse(date_picked)
if date_picked.year > 2009:
application.deployment_date = date_picked
except Exception:
pass
application.phone_model = request.POST["%s-phone_model" % original_id]
application.attribution_notes = request.POST["%s-attribution_notes" % original_id]
application.user_type = request.POST["%s-user_type" % original_id]
if not new_domain.multimedia_included:
application.multimedia_map = {}
application.save()
for fixture in FixtureDataType.by_domain(new_domain.name):
old_id = FixtureDataType.by_domain_tag(self.domain_object.name,
fixture.tag).first()._id
fixture.description = request.POST["%s-description" % old_id]
fixture.save()
if new_domain is None:
messages.error(request, _("Version creation failed; please try again"))
else:
messages.success(request, (_("Created a new version of your app. This version will be posted to "
"CommCare Exchange pending approval by admins.") if publish_on_submit
else _("Created a new version of your app.")))
return redirect(ExchangeSnapshotsView.urlname, self.domain)
return self.get(request, *args, **kwargs)
class ManageProjectMediaView(BaseAdminProjectSettingsView):
urlname = 'domain_manage_multimedia'
page_title = ugettext_lazy("Multimedia Sharing")
template_name = 'domain/admin/media_manager.html'
@property
def project_media_data(self):
return [{
'license': m.license.type if m.license else 'public',
'shared': self.domain in m.shared_by,
'url': m.url(),
'm_id': m._id,
'tags': m.tags.get(self.domain, []),
'type': m.doc_type,
} for m in self.request.project.all_media()]
@property
def page_context(self):
return {
'media': self.project_media_data,
'licenses': LICENSES.items(),
}
@retry_resource(3)
def post(self, request, *args, **kwargs):
for m_file in request.project.all_media():
if '%s_tags' % m_file._id in request.POST:
m_file.tags[self.domain] = request.POST.get('%s_tags' % m_file._id, '').split(' ')
if self.domain not in m_file.shared_by and request.POST.get('%s_shared' % m_file._id, False):
m_file.shared_by.append(self.domain)
elif self.domain in m_file.shared_by and not request.POST.get('%s_shared' % m_file._id, False):
m_file.shared_by.remove(self.domain)
if '%s_license' % m_file._id in request.POST:
m_file.update_or_add_license(self.domain,
type=request.POST.get('%s_license' % m_file._id, 'public'),
should_save=True)
m_file.save()
messages.success(request, _("Multimedia updated successfully!"))
return self.get(request, *args, **kwargs)
class RepeaterMixin(object):
@property
def friendly_repeater_names(self):
return {
'FormRepeater': _("Forms"),
'CaseRepeater': _("Cases"),
'ShortFormRepeater': _("Form Stubs"),
'AppStructureRepeater': _("App Schema Changes"),
}
class DomainForwardingOptionsView(BaseAdminProjectSettingsView, RepeaterMixin):
urlname = 'domain_forwarding'
page_title = ugettext_lazy("Data Forwarding")
template_name = 'domain/admin/domain_forwarding.html'
@property
def repeaters(self):
available_repeaters = [
FormRepeater, CaseRepeater, ShortFormRepeater, AppStructureRepeater,
]
return [(r.__name__, r.by_domain(self.domain), self.friendly_repeater_names[r.__name__])
for r in available_repeaters]
@property
def page_context(self):
return {
'repeaters': self.repeaters,
'pending_record_count': RepeatRecord.count(self.domain),
}
class AddRepeaterView(BaseAdminProjectSettingsView, RepeaterMixin):
urlname = 'add_repeater'
page_title = ugettext_lazy("Forward Data")
template_name = 'domain/admin/add_form_repeater.html'
repeater_form_class = GenericRepeaterForm
@property
def page_url(self):
return reverse(self.urlname, args=[self.domain, self.repeater_type])
@property
def parent_pages(self):
return [{
'title': DomainForwardingOptionsView.page_title,
'url': reverse(DomainForwardingOptionsView.urlname, args=[self.domain]),
}]
@property
def repeater_type(self):
return self.kwargs['repeater_type']
@property
def page_name(self):
return "Forward %s" % self.friendly_repeater_names.get(self.repeater_type, "Data")
@property
@memoized
def repeater_class(self):
try:
return receiverwrapper.models.repeater_types[self.repeater_type]
except KeyError:
raise Http404()
@property
@memoized
def add_repeater_form(self):
if self.request.method == 'POST':
return self.repeater_form_class(
self.request.POST,
domain=self.domain,
repeater_class=self.repeater_class
)
return self.repeater_form_class(
domain=self.domain,
repeater_class=self.repeater_class
)
@property
def page_context(self):
return {
'form': self.add_repeater_form,
'repeater_type': self.repeater_type,
}
def make_repeater(self):
repeater = self.repeater_class(
domain=self.domain,
url=self.add_repeater_form.cleaned_data['url'],
use_basic_auth=self.add_repeater_form.cleaned_data['use_basic_auth'],
username=self.add_repeater_form.cleaned_data['username'],
password=self.add_repeater_form.cleaned_data['password'],
format=self.add_repeater_form.cleaned_data['format']
)
return repeater
def post(self, request, *args, **kwargs):
if self.add_repeater_form.is_valid():
repeater = self.make_repeater()
repeater.save()
messages.success(request, _("Forwarding set up to %s" % repeater.url))
return HttpResponseRedirect(reverse(DomainForwardingOptionsView.urlname, args=[self.domain]))
return self.get(request, *args, **kwargs)
class AddFormRepeaterView(AddRepeaterView):
urlname = 'add_form_repeater'
repeater_form_class = FormRepeaterForm
@property
def page_url(self):
return reverse(self.urlname, args=[self.domain])
def make_repeater(self):
repeater = super(AddFormRepeaterView, self).make_repeater()
repeater.exclude_device_reports = self.add_repeater_form.cleaned_data['exclude_device_reports']
repeater.include_app_id_param = self.add_repeater_form.cleaned_data['include_app_id_param']
return repeater
class OrgSettingsView(BaseAdminProjectSettingsView):
template_name = 'domain/orgs_settings.html'
urlname = 'domain_org_settings'
page_title = ugettext_lazy("Organization")
@method_decorator(requires_privilege_with_fallback(privileges.CROSS_PROJECT_REPORTS))
def dispatch(self, request, *args, **kwargs):
return super(OrgSettingsView, self).dispatch(request, *args, **kwargs)
@property
def page_context(self):
domain = self.domain_object
org_users = []
teams = Team.get_by_domain(domain.name)
for team in teams:
for user in team.get_members():
user.team_id = team.get_id
user.team = team.name
org_users.append(user)
for user in org_users:
user.current_domain = domain.name
all_orgs = Organization.get_all()
return {
"project": domain,
'domain': domain.name,
"organization": Organization.get_by_name(getattr(domain, "organization", None)),
"org_users": org_users,
"all_orgs": all_orgs,
}
class BaseInternalDomainSettingsView(BaseProjectSettingsView):
strict_domain_fetching = True
@method_decorator(login_and_domain_required)
@method_decorator(require_superuser)
def dispatch(self, request, *args, **kwargs):
return super(BaseInternalDomainSettingsView, self).dispatch(request, *args, **kwargs)
@property
def main_context(self):
context = super(BaseInternalDomainSettingsView, self).main_context
context.update({
'project': self.domain_object,
})
return context
@property
def page_name(self):
return mark_safe("%s <small>Internal</small>" % self.page_title)
class EditInternalDomainInfoView(BaseInternalDomainSettingsView):
urlname = 'domain_internal_settings'
page_title = ugettext_lazy("Project Information")
template_name = 'domain/internal_settings.html'
strict_domain_fetching = True
@property
def autocomplete_fields(self):
return ['countries']
@property
@memoized
def internal_settings_form(self):
can_edit_eula = CAN_EDIT_EULA.enabled(self.request.couch_user.username)
if self.request.method == 'POST':
return DomainInternalForm(can_edit_eula, self.request.POST)
initial = {
'deployment_date': self.domain_object.deployment.date.date
if self.domain_object.deployment.date else '',
'countries': self.domain_object.deployment.countries,
'is_test': self.domain_object.is_test,
}
internal_attrs = [
'sf_contract_id',
'sf_account_id',
'services',
'initiative',
'self_started',
'area',
'sub_area',
'organization_name',
'notes',
'phone_model',
'commtrack_domain',
'business_unit',
'workshop_region',
]
if can_edit_eula:
internal_attrs += [
'custom_eula',
'can_use_data',
]
for attr in internal_attrs:
val = getattr(self.domain_object.internal, attr)
if isinstance(val, bool):
val = 'true' if val else 'false'
initial[attr] = val
return DomainInternalForm(can_edit_eula, initial=initial)
@property
def page_context(self):
return {
'project': self.domain_object,
'form': self.internal_settings_form,
'areas': dict([(a["name"], a["sub_areas"]) for a in settings.INTERNAL_DATA["area"]]),
}
def post(self, request, *args, **kwargs):
if self.internal_settings_form.is_valid():
old_attrs = copy.copy(self.domain_object.internal)
self.internal_settings_form.save(self.domain_object)
eula_props_changed = (bool(old_attrs.custom_eula) != bool(self.domain_object.internal.custom_eula) or
bool(old_attrs.can_use_data) != bool(self.domain_object.internal.can_use_data))
if eula_props_changed and settings.EULA_CHANGE_EMAIL:
message = '\n'.join([
'{user} changed either the EULA or data sharing properties for domain {domain}.',
'',
'The properties changed were:',
'- Custom eula: {eula_old} --> {eula_new}',
'- Can use data: {can_use_data_old} --> {can_use_data_new}'
]).format(
user=self.request.couch_user.username,
domain=self.domain,
eula_old=old_attrs.custom_eula,
eula_new=self.domain_object.internal.custom_eula,
can_use_data_old=old_attrs.can_use_data,
can_use_data_new=self.domain_object.internal.can_use_data,
)
send_mail_async.delay(
'Custom EULA or data use flags changed for {}'.format(self.domain),
message, settings.DEFAULT_FROM_EMAIL, [settings.EULA_CHANGE_EMAIL]
)
messages.success(request, _("The internal information for project %s was successfully updated!")
% self.domain)
else:
messages.error(request, _(
"Your settings are not valid, see below for errors. Correct them and try again!"))
return self.get(request, *args, **kwargs)
class EditInternalCalculationsView(BaseInternalDomainSettingsView):
urlname = 'domain_internal_calculations'
page_title = ugettext_lazy("Calculated Properties")
template_name = 'domain/internal_calculations.html'
@property
def page_context(self):
return {
'calcs': CALCS,
'order': CALC_ORDER,
}
@login_and_domain_required
@require_superuser
def calculated_properties(request, domain):
calc_tag = request.GET.get("calc_tag", '').split('--')
extra_arg = calc_tag[1] if len(calc_tag) > 1 else ''
calc_tag = calc_tag[0]
if not calc_tag or calc_tag not in CALC_FNS.keys():
data = {"error": 'This tag does not exist'}
else:
data = {"value": dom_calc(calc_tag, domain, extra_arg)}
return json_response(data)
def _publish_snapshot(request, domain, published_snapshot=None):
snapshots = domain.snapshots()
for snapshot in snapshots:
if snapshot.published:
snapshot.published = False
if not published_snapshot or snapshot.name != published_snapshot.name:
snapshot.save()
if published_snapshot:
if published_snapshot.copied_from.name != domain.name:
messages.error(request, "Invalid snapshot")
return False
# cda stuff. In order to publish a snapshot, a user must have agreed to this
published_snapshot.cda.signed = True
published_snapshot.cda.date = datetime.datetime.utcnow()
published_snapshot.cda.type = 'Content Distribution Agreement'
if request.couch_user:
published_snapshot.cda.user_id = request.couch_user.get_id
published_snapshot.cda.user_ip = get_ip(request)
published_snapshot.published = True
published_snapshot.save()
_notification_email_on_publish(domain, published_snapshot, request.couch_user)
return True
def _notification_email_on_publish(domain, snapshot, published_by):
params = {"domain": domain, "snapshot": snapshot,
"published_by": published_by, "url_base": get_site_domain()}
text_content = render_to_string(
"domain/email/published_app_notification.txt", params)
html_content = render_to_string(
"domain/email/published_app_notification.html", params)
recipients = settings.EXCHANGE_NOTIFICATION_RECIPIENTS
subject = "New App on Exchange: %s" % snapshot.title
try:
for recipient in recipients:
send_html_email_async.delay(subject, recipient, html_content,
text_content=text_content,
email_from=settings.DEFAULT_FROM_EMAIL)
except Exception:
logging.warning("Can't send notification email, "
"but the message was:\n%s" % text_content)
@domain_admin_required
def set_published_snapshot(request, domain, snapshot_name=''):
domain = request.project
snapshots = domain.snapshots()
if request.method == 'POST':
if snapshot_name != '':
published_snapshot = Domain.get_by_name(snapshot_name)
_publish_snapshot(request, domain, published_snapshot=published_snapshot)
else:
_publish_snapshot(request, domain)
return redirect('domain_snapshot_settings', domain.name)
class ProBonoMixin():
page_title = ugettext_lazy("Pro-Bono Application")
is_submitted = False
url_name = None
@property
def requesting_domain(self):
raise NotImplementedError
@property
@memoized
def pro_bono_form(self):
if self.request.method == 'POST':
return ProBonoForm(self.use_domain_field, self.request.POST)
return ProBonoForm(self.use_domain_field)
@property
def page_context(self):
return {
'pro_bono_form': self.pro_bono_form,
'is_submitted': self.is_submitted,
}
@property
def page_url(self):
return self.url_name
def post(self, request, *args, **kwargs):
if self.pro_bono_form.is_valid():
self.pro_bono_form.process_submission(domain=self.requesting_domain)
self.is_submitted = True
return self.get(request, *args, **kwargs)
class ProBonoStaticView(ProBonoMixin, BasePageView):
template_name = 'domain/pro_bono/static.html'
urlname = 'pro_bono_static'
use_domain_field = True
@property
def requesting_domain(self):
return self.pro_bono_form.cleaned_data['domain']
class ProBonoView(ProBonoMixin, DomainAccountingSettings):
template_name = 'domain/pro_bono/domain.html'
urlname = 'pro_bono'
use_domain_field = False
@property
def requesting_domain(self):
return self.domain
@property
def parent_pages(self):
return [
{
'title': DomainSubscriptionView.page_title,
'url': reverse(DomainSubscriptionView.urlname, args=[self.domain]),
}
]
@property
def section_url(self):
return self.page_url
class FeaturePreviewsView(BaseAdminProjectSettingsView):
urlname = 'feature_previews'
page_title = ugettext_lazy("Feature Previews")
template_name = 'domain/admin/feature_previews.html'
@memoized
def features(self):
features = []
for preview_name in dir(feature_previews):
if not preview_name.startswith('__'):
preview = getattr(feature_previews, preview_name)
if isinstance(preview, feature_previews.FeaturePreview) and preview.has_privilege(self.request):
features.append((preview, preview.enabled(self.domain)))
return features
def get_toggle(self, slug):
if not slug in [f.slug for f, _ in self.features()]:
raise Http404()
try:
return Toggle.get(slug)
except ResourceNotFound:
return Toggle(slug=slug)
@property
def page_context(self):
return {
'features': self.features(),
}
def post(self, request, *args, **kwargs):
for feature, enabled in self.features():
self.update_feature(feature, enabled, feature.slug in request.POST)
return redirect('feature_previews', domain=self.domain)
def update_feature(self, feature, current_state, new_state):
if current_state != new_state:
feature.set(self.domain, new_state, NAMESPACE_DOMAIN)
if feature.save_fn is not None:
feature.save_fn(self.domain, new_state)
class FeatureFlagsView(BaseAdminProjectSettingsView):
urlname = 'domain_feature_flags'
page_title = ugettext_lazy("Feature Flags")
template_name = 'domain/admin/feature_flags.html'
@method_decorator(require_superuser)
def dispatch(self, request, *args, **kwargs):
return super(FeatureFlagsView, self).dispatch(request, *args, **kwargs)
@memoized
def enabled_flags(self):
def _sort_key(toggle_enabled_tuple):
return (not toggle_enabled_tuple[1], not toggle_enabled_tuple[2], toggle_enabled_tuple[0].label)
return sorted(
[(toggle, toggle.enabled(self.domain), toggle.enabled(self.request.couch_user.username))
for toggle in all_toggles()],
key=_sort_key,
)
@property
def page_context(self):
return {
'flags': self.enabled_flags(),
}
class TransferDomainView(BaseAdminProjectSettingsView):
urlname = 'transfer_domain_view'
page_title = ugettext_lazy("Transfer Project")
template_name = 'domain/admin/transfer_domain.html'
@property
@memoized
def active_transfer(self):
return TransferDomainRequest.get_active_transfer(self.domain,
self.request.user.username)
@property
@memoized
def transfer_domain_form(self):
return TransferDomainForm(self.domain,
self.request.user.username,
self.request.POST or None)
def get(self, request, *args, **kwargs):
if self.active_transfer:
self.template_name = 'domain/admin/transfer_domain_pending.html'
if request.GET.get('resend', None):
self.active_transfer.send_transfer_request()
messages.info(request,
_(u"Resent transfer request for project '{domain}'").format(domain=self.domain))
return super(TransferDomainView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
form = self.transfer_domain_form
if form.is_valid():
# Initiate domain transfer
transfer = form.save()
transfer.send_transfer_request()
return HttpResponseRedirect(self.page_url)
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
@property
def page_context(self):
if self.active_transfer:
return {'transfer': self.active_transfer.as_dict()}
else:
return {'form': self.transfer_domain_form}
@method_decorator(domain_admin_required)
def dispatch(self, request, *args, **kwargs):
if not TRANSFER_DOMAIN.enabled(request.domain):
raise Http404()
return super(TransferDomainView, self).dispatch(request, *args, **kwargs)
class ActivateTransferDomainView(BasePageView):
urlname = 'activate_transfer_domain'
page_title = 'Activate Domain Transfer'
template_name = 'domain/activate_transfer_domain.html'
@property
@memoized
def active_transfer(self):
return TransferDomainRequest.get_by_guid(self.guid)
@property
def page_context(self):
if self.active_transfer:
return {'transfer': self.active_transfer.as_dict()}
else:
return {}
@property
def page_url(self):
return self.request.get_full_path()
def get(self, request, guid, *args, **kwargs):
self.guid = guid
if (self.active_transfer and
self.active_transfer.to_username != request.user.username and
not request.user.is_superuser):
return HttpResponseRedirect(reverse("no_permissions"))
return super(ActivateTransferDomainView, self).get(request, *args, **kwargs)
def post(self, request, guid, *args, **kwargs):
self.guid = guid
if not self.active_transfer:
raise Http404()
if self.active_transfer.to_username != request.user.username and not request.user.is_superuser:
return HttpResponseRedirect(reverse("no_permissions"))
self.active_transfer.transfer_domain(ip=get_ip(request))
messages.success(request, _(u"Successfully transferred ownership of project '{domain}'")
.format(domain=self.active_transfer.domain))
return HttpResponseRedirect(reverse('dashboard_default', args=[self.active_transfer.domain]))
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ActivateTransferDomainView, self).dispatch(*args, **kwargs)
class DeactivateTransferDomainView(View):
def post(self, request, guid, *args, **kwargs):
transfer = TransferDomainRequest.get_by_guid(guid)
if not transfer:
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
if (transfer.to_username != request.user.username and
transfer.from_username != request.user.username and
not request.user.is_superuser):
return HttpResponseRedirect(reverse("no_permissions"))
transfer.active = False
transfer.save()
referer = request.META.get('HTTP_REFERER', '/')
# Do not want to send them back to the activate page
if referer.endswith(reverse('activate_transfer_domain', args=[guid])):
messages.info(request,
_(u"Declined ownership of project '{domain}'").format(domain=transfer.domain))
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect(referer)
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(DeactivateTransferDomainView, self).dispatch(*args, **kwargs)
from corehq.apps.smsbillables.forms import PublicSMSRateCalculatorForm
from corehq.apps.smsbillables.async_handlers import PublicSMSRatesAsyncHandler
class PublicSMSRatesView(BasePageView, AsyncHandlerMixin):
urlname = 'public_sms_rates_view'
page_title = ugettext_lazy("SMS Rate Calculator")
template_name = 'domain/admin/global_sms_rates.html'
async_handlers = [PublicSMSRatesAsyncHandler]
@property
def page_url(self):
return reverse(self.urlname)
@property
def page_context(self):
return {
'rate_calc_form': PublicSMSRateCalculatorForm()
}
def post(self, request, *args, **kwargs):
return self.async_response or self.get(request, *args, **kwargs)
class SMSRatesView(BaseAdminProjectSettingsView, AsyncHandlerMixin):
urlname = 'domain_sms_rates_view'
page_title = ugettext_lazy("SMS Rate Calculator")
template_name = 'domain/admin/sms_rates.html'
async_handlers = [
SMSRatesAsyncHandler,
SMSRatesSelect2AsyncHandler,
]
@property
@memoized
def rate_calc_form(self):
if self.request.method == 'POST':
return SMSRateCalculatorForm(self.domain, self.request.POST)
return SMSRateCalculatorForm(self.domain)
@property
def page_context(self):
return {
'rate_calc_form': self.rate_calc_form,
}
def post(self, request, *args, **kwargs):
if self.async_response is not None:
return self.async_response
return self.get(request, *args, **kwargs)
@require_POST
@domain_admin_required
def org_request(request, domain):
org_name = request.POST.get("org_name", None)
org = Organization.get_by_name(org_name)
if org:
org_request = OrgRequest.get_requests(org_name, domain=domain, user_id=request.couch_user.get_id)
if not org_request:
org_request = OrgRequest(organization=org_name, domain=domain,
requested_by=request.couch_user.get_id, requested_on=datetime.datetime.utcnow())
org_request.save()
_send_request_notification_email(request, org, domain)
messages.success(request,
"Your request was submitted. The admin of organization %s can now choose to manage the project %s" %
(org_name, domain))
else:
messages.error(request, "You've already submitted a request to this organization")
else:
messages.error(request, "The organization '%s' does not exist" % org_name)
return HttpResponseRedirect(reverse('domain_org_settings', args=[domain]))
def _send_request_notification_email(request, org, dom):
params = {"org": org, "dom": dom, "requestee": request.couch_user,
"url_base": get_site_domain()}
text_content = render_to_string(
"domain/email/org_request_notification.txt", params)
html_content = render_to_string(
"domain/email/org_request_notification.html", params)
recipients = [member.email for member in org.get_members()
if member.is_org_admin(org.name)]
subject = "New request to add a project to your organization! -- CommcareHQ"
try:
for recipient in recipients:
send_html_email_async.delay(subject, recipient, html_content,
text_content=text_content,
email_from=settings.DEFAULT_FROM_EMAIL)
except Exception:
logging.warning("Can't send notification email, "
"but the message was:\n%s" % text_content)
class BaseCardView(DomainAccountingSettings):
@property
def payment_method(self):
payment_method, __ = StripePaymentMethod.objects.get_or_create(
web_user=self.request.user.username,
method_type=PaymentMethodType.STRIPE,
)
return payment_method
def _generic_error(self):
error = ("Something went wrong while processing your request. "
"We're working quickly to resolve the issue. "
"Please try again in a few hours.")
return json_response({'error': error}, status_code=500)
def _stripe_error(self, e):
body = e.json_body
err = body['error']
return json_response({'error': err['message'],
'cards': self.payment_method.all_cards_serialized(self.account)},
status_code=502)
class CardView(BaseCardView):
"""View for dealing with a single Credit Card"""
url_name = "card_view"
def post(self, request, domain, card_token):
try:
card = self.payment_method.get_card(card_token)
if request.POST.get("is_autopay") == 'true':
self.payment_method.set_autopay(card, self.account)
elif request.POST.get("is_autopay") == 'false':
self.payment_method.unset_autopay(card, self.account)
except self.payment_method.STRIPE_GENERIC_ERROR as e:
return self._stripe_error(e)
except Exception as e:
return self._generic_error()
return json_response({'cards': self.payment_method.all_cards_serialized(self.account)})
def delete(self, request, domain, card_token):
try:
self.payment_method.remove_card(card_token)
except self.payment_method.STRIPE_GENERIC_ERROR as e:
return self._stripe_error(e)
return json_response({'cards': self.payment_method.all_cards_serialized(self.account)})
class CardsView(BaseCardView):
"""View for dealing Credit Cards"""
url_name = "cards_view"
def get(self, request, domain):
return json_response({'cards': self.payment_method.all_cards_serialized(self.account)})
def post(self, request, domain):
stripe_token = request.POST.get('token')
autopay = request.POST.get('autopay') == 'true'
try:
self.payment_method.create_card(stripe_token, self.account, autopay)
except self.payment_method.STRIPE_GENERIC_ERROR as e:
return self._stripe_error(e)
except Exception as e:
return self._generic_error()
return json_response({'cards': self.payment_method.all_cards_serialized(self.account)})
| [] |
alexmascension/ANMI | src/anmi/T2/funcs_met_iters.py | 9c51a497a5fa2650f1429f847c7f9df69271168b | from sympy import simplify, zeros
from sympy import Matrix as mat
import numpy as np
from ..genericas import print_verbose, matriz_inversa
def criterio_radio_espectral(H, verbose=True):
eigs = [simplify(i) for i in list(H.eigenvals().keys())]
print_verbose("||Criterio de radio espectral||", verbose)
try:
print_verbose(
f"El mayor autovalor es {np.max(np.array(eigs, dtype=float))}. Si ese valor es < 1 entonces los métodos iterativos convergen.",
verbose,
)
except:
print_verbose(
f"Los autovalores son {eigs}. Si el mayor autovalor es < 1, entonces el método converge.",
verbose,
)
def criterio_diagonal_dominante(A, verbose=True):
print_verbose(
"||Criterio de Diagonal Dominante||\n Si la matriz es dominante por filas, los métodos de Jacobi y Gauss-Seidel convergen.",
verbose,
)
A_abs = abs(A)
try:
np.array(A_abs, dtype=float)
for r in range(A.shape[0]):
diff = 2 * A_abs[r, r] - sum(A_abs[r, :])
if diff <= 0:
print_verbose(
f"La fila {r} NO es dominante por filas: diff = {diff}.", verbose
)
return
print_verbose("La matriz CUMPLE EL CRITERIO DIAGONAL DOMINANTE", verbose)
except:
print_verbose(
"La matriz tiene complejos o simbolos. Hay que verificar el criterio a mano.",
verbose,
)
def criterio_simetrica_definida_positiva(A, verbose=True):
print_verbose(
"||Criterio de Sim Def Pos||\n Si la matriz es simétrica y definida positiva, el método de Gauss-Seidel es convergente.",
verbose,
)
if A != A.T:
print_verbose("La matriz NO es simétrica.", verbose)
return
det_A = A.det()
print_verbose(f"El determinante de A es {det_A}.", verbose)
try:
if float(det_A) > 0:
print_verbose(
"La matriz es DEFINIDA POSITIVA (el determinante es positivo).",
verbose,
)
print_verbose("La matriz CUMPLE EL CRITERIO SIM DEF POS", verbose)
else:
print_verbose(
"La matriz NO es DEFINIDA POSITIVA (el determinante no es positivo).",
verbose,
)
except:
print_verbose(
"No podemos determinar la positividad porque hay símbolos o complejos.",
verbose,
)
def criterio_SOR(verbose):
print_verbose(
"||Criterio SOR||\n Si la matriz es simétrica y definida positiva y w in (0, 2) el método SOR es convergente.\nSi w no (0, 2) el método SOR no converge.",
verbose,
)
def criterio_m_matriz(A, verbose):
print_verbose(
"||Criterio M matriz||\n Si la A es M-matriz entonces las descomposiciones de Jacobi y Gauss-Seidel son convergentes.\nA^-1 >= 0\naij < 0 para todo i =/= j",
verbose,
)
A_inv = matriz_inversa(A)
try:
np.array(A, dtype=float)
if np.min(A_inv) >= 0:
print_verbose("A^-1 >= 0", verbose)
else:
print_verbose("A^-1 < 0. La matriz NO CUMPLE el criterio", verbose)
A_null_diag = A.copy()
for i in range(A.shape[0]):
A_null_diag[i, i] = 0
if np.max(A_null_diag) > 0:
print_verbose(
"La matriz tiene elementos no diagonales positivos. NO CUMPLE el criterio.",
verbose,
)
else:
print_verbose("Los elementos no diagonales son negativos.", verbose)
except:
print_verbose(
"La matriz tiene complejos o símbolos, no podemos verificar le criterio.",
verbose,
)
def metodo_iterativo(
A, b=None, x0=None, metodo="jacobi", w=1.5, n_iter=10, verbose=True,
):
"""Aplica el método iterativo designado
Args:
A (matriz): Matriz de valores
b (vector, optional): Vector de rhs. Por defecto es 1, 1, ..., 1.
x0 (vector, optional): Vector con elementos de la primera iteración. Por defecto es 1, 1, ..., 1.
metodo (str, optional): método de resolución, puede ser "jacobi", "gs" o "sor".
w (float, optional): Peso para método sor. Defaults to 1.5.
n_iter (int, optional): Número de iteraciones del método. Defaults to 10.
verbose (bool, optional): Imprime resultados intermedios. Defaults to True.
Returns:
dict: 'x': vector de resultados para Ax=b, 'diff': diferencia entre Ax y b para cada iteración.
"""
if b is None:
b = mat([[1] * A.shape[0]]).T
if x0 is None:
x0 = mat([[1] * A.shape[1]]).T
D, L, U = (
zeros(A.shape[0], A.shape[1]),
zeros(A.shape[0], A.shape[1]),
zeros(A.shape[0], A.shape[1]),
)
for r in range(A.shape[0]):
for c in range(A.shape[1]):
if r == c:
D[r, c] = A[r, c]
elif r < c:
U[r, c] = -A[r, c]
else:
L[r, c] = -A[r, c]
if metodo == "jacobi":
M = D
elif metodo == "gs":
M = D - L
elif metodo == "sor":
M = D / w - L
N = simplify(M - A)
# Aplicamos criterios!
criterio_radio_espectral(matriz_inversa(M) * N, verbose)
criterio_diagonal_dominante(A, verbose)
criterio_simetrica_definida_positiva(A, verbose)
criterio_SOR(verbose)
criterio_m_matriz(A, verbose)
diff = []
for iter in range(n_iter): # Aplica el método
x0 = (matriz_inversa(M)) * (N * x0 + b)
diff.append(np.sum(np.abs(A * x0 - b)))
return {"x": x0, "diff": diff}
| [((5208, 5223), 'sympy.simplify', 'simplify', (['(M - A)'], {}), '(M - A)\n', (5216, 5223), False, 'from sympy import simplify, zeros\n'), ((201, 212), 'sympy.simplify', 'simplify', (['i'], {}), '(i)\n', (209, 212), False, 'from sympy import simplify, zeros\n'), ((948, 976), 'numpy.array', 'np.array', (['A_abs'], {'dtype': 'float'}), '(A_abs, dtype=float)\n', (956, 976), True, 'import numpy as np\n'), ((3010, 3034), 'numpy.array', 'np.array', (['A'], {'dtype': 'float'}), '(A, dtype=float)\n', (3018, 3034), True, 'import numpy as np\n'), ((4714, 4743), 'sympy.zeros', 'zeros', (['A.shape[0]', 'A.shape[1]'], {}), '(A.shape[0], A.shape[1])\n', (4719, 4743), False, 'from sympy import simplify, zeros\n'), ((4753, 4782), 'sympy.zeros', 'zeros', (['A.shape[0]', 'A.shape[1]'], {}), '(A.shape[0], A.shape[1])\n', (4758, 4782), False, 'from sympy import simplify, zeros\n'), ((4792, 4821), 'sympy.zeros', 'zeros', (['A.shape[0]', 'A.shape[1]'], {}), '(A.shape[0], A.shape[1])\n', (4797, 4821), False, 'from sympy import simplify, zeros\n'), ((3046, 3059), 'numpy.min', 'np.min', (['A_inv'], {}), '(A_inv)\n', (3052, 3059), True, 'import numpy as np\n'), ((3322, 3341), 'numpy.max', 'np.max', (['A_null_diag'], {}), '(A_null_diag)\n', (3328, 3341), True, 'import numpy as np\n'), ((4605, 4628), 'sympy.Matrix', 'mat', (['[[1] * A.shape[0]]'], {}), '([[1] * A.shape[0]])\n', (4608, 4628), True, 'from sympy import Matrix as mat\n'), ((4663, 4686), 'sympy.Matrix', 'mat', (['[[1] * A.shape[1]]'], {}), '([[1] * A.shape[1]])\n', (4666, 4686), True, 'from sympy import Matrix as mat\n'), ((5611, 5629), 'numpy.abs', 'np.abs', (['(A * x0 - b)'], {}), '(A * x0 - b)\n', (5617, 5629), True, 'import numpy as np\n'), ((389, 416), 'numpy.array', 'np.array', (['eigs'], {'dtype': 'float'}), '(eigs, dtype=float)\n', (397, 416), True, 'import numpy as np\n')] |
zhangxianbing/cookiecutter-pypackage | {{cookiecutter.project_hyphen}}/{{cookiecutter.project_slug}}/__init__.py | 28f7f305d3baf96771881c3359227bed1bc7d182 | """{{ cookiecutter.project_name }} - {{ cookiecutter.project_short_description }}"""
__version__ = "{{ cookiecutter.project_version }}"
__author__ = """{{ cookiecutter.author_name }}"""
__email__ = "{{ cookiecutter.author_email }}"
prog_name = "{{ cookiecutter.project_hyphen }}"
| [] |
mguidon/osparc-dask-gateway | services/osparc-gateway-server/tests/integration/_dask_helpers.py | accd850c15cb3a36cf4421a1d070a4db29843013 | from typing import NamedTuple
from dask_gateway_server.app import DaskGateway
class DaskGatewayServer(NamedTuple):
address: str
proxy_address: str
password: str
server: DaskGateway
| [] |
kazuyaujihara/rdkit | rdkit/ML/InfoTheory/BitRank.py | 06027dcd05674787b61f27ba46ec0d42a6037540 | #
# Copyright (C) 2001,2002,2003 greg Landrum and Rational Discovery LLC
#
""" Functionality for ranking bits using info gains
**Definitions used in this module**
- *sequence*: an object capable of containing other objects which supports
__getitem__() and __len__(). Examples of these include lists, tuples, and
Numeric arrays.
- *IntVector*: an object containing integers which supports __getitem__() and
__len__(). Examples include lists, tuples, Numeric Arrays, and BitVects.
**NOTE**: Neither *sequences* nor *IntVectors* need to support item assignment.
It is perfectly acceptable for them to be read-only, so long as they are
random-access.
"""
import numpy
from rdkit.ML.InfoTheory import entropy
def FormCounts(bitVects, actVals, whichBit, nPossibleActs, nPossibleBitVals=2):
""" generates the counts matrix for a particular bit
**Arguments**
- bitVects: a *sequence* containing *IntVectors*
- actVals: a *sequence*
- whichBit: an integer, the bit number to use.
- nPossibleActs: the (integer) number of possible activity values.
- nPossibleBitVals: (optional) if specified, this integer provides the maximum
value attainable by the (increasingly inaccurately named) bits in _bitVects_.
**Returns**
a Numeric array with the counts
**Notes**
This is really intended for internal use.
"""
if len(bitVects) != len(actVals):
raise ValueError('var and activity lists should be the same length')
res = numpy.zeros((nPossibleBitVals, nPossibleActs), numpy.integer)
for i in range(len(bitVects)):
res[bitVects[i][whichBit], actVals[i]] += 1
return res
def CalcInfoGains(bitVects, actVals, nPossibleActs, nPossibleBitVals=2):
""" Calculates the information gain for a set of points and activity values
**Arguments**
- bitVects: a *sequence* containing *IntVectors*
- actVals: a *sequence*
- nPossibleActs: the (integer) number of possible activity values.
- nPossibleBitVals: (optional) if specified, this integer provides the maximum
value attainable by the (increasingly inaccurately named) bits in _bitVects_.
**Returns**
a list of floats
"""
if len(bitVects) != len(actVals):
raise ValueError('var and activity lists should be the same length')
nBits = len(bitVects[0])
res = numpy.zeros(nBits, numpy.float)
for bit in range(nBits):
counts = FormCounts(bitVects, actVals, bit, nPossibleActs, nPossibleBitVals=nPossibleBitVals)
res[bit] = entropy.InfoGain(counts)
return res
def RankBits(bitVects, actVals, nPossibleBitVals=2, metricFunc=CalcInfoGains):
""" Rank a set of bits according to a metric function
**Arguments**
- bitVects: a *sequence* containing *IntVectors*
- actVals: a *sequence*
- nPossibleBitVals: (optional) if specified, this integer provides the maximum
value attainable by the (increasingly inaccurately named) bits in _bitVects_.
- metricFunc: (optional) the metric function to be used. See _CalcInfoGains()_
for a description of the signature of this function.
**Returns**
A 2-tuple containing:
- the relative order of the bits (a list of ints)
- the metric calculated for each bit (a list of floats)
"""
nPossibleActs = max(actVals) + 1
metrics = metricFunc(bitVects, actVals, nPossibleActs, nPossibleBitVals=nPossibleBitVals)
bitOrder = list(numpy.argsort(metrics))
bitOrder.reverse()
return bitOrder, metrics
def AnalyzeSparseVects(bitVects, actVals):
""" #DOC
**Arguments**
- bitVects: a *sequence* containing SBVs
- actVals: a *sequence*
**Returns**
a list of floats
**Notes**
- these need to be bit vects and binary activities
"""
nPts = len(bitVects)
if nPts != len(actVals):
raise ValueError('var and activity lists should be the same length')
nBits = bitVects[0].GetSize()
actives = numpy.zeros(nBits, numpy.integer)
inactives = numpy.zeros(nBits, numpy.integer)
nActives, nInactives = 0, 0
for i in range(nPts):
sig, act = bitVects[i], actVals[i]
onBitList = sig.GetOnBits()
if act:
for bit in onBitList:
actives[bit] += 1
nActives += 1
else:
for bit in onBitList:
inactives[bit] += 1
nInactives += 1
resTbl = numpy.zeros((2, 2), numpy.integer)
res = []
gains = []
for bit in range(nBits):
nAct, nInact = actives[bit], inactives[bit]
if nAct or nInact:
resTbl[0, 0] = nAct
resTbl[1, 0] = nPts - nAct
resTbl[0, 1] = nInact
resTbl[1, 1] = nPts - nInact
gain = entropy.InfoGain(resTbl)
gains.append(gain)
res.append((bit, gain, nAct, nInact))
return res, gains
def SparseRankBits(bitVects, actVals, metricFunc=AnalyzeSparseVects):
""" Rank a set of bits according to a metric function
**Arguments**
- bitVects: a *sequence* containing SBVs
- actVals: a *sequence*
- metricFunc: (optional) the metric function to be used. See _SparseCalcInfoGains()_
for a description of the signature of this function.
**Returns**
A 2-tuple containing:
- the relative order of the bits (a list of ints)
- the metric calculated for each bit (a list of floats)
**Notes**
- these need to be bit vects and binary activities
"""
info, metrics = metricFunc(bitVects, actVals)
bitOrder = list(numpy.argsort(metrics))
bitOrder.reverse()
return bitOrder, info
| [((1514, 1575), 'numpy.zeros', 'numpy.zeros', (['(nPossibleBitVals, nPossibleActs)', 'numpy.integer'], {}), '((nPossibleBitVals, nPossibleActs), numpy.integer)\n', (1525, 1575), False, 'import numpy\n'), ((2354, 2385), 'numpy.zeros', 'numpy.zeros', (['nBits', 'numpy.float'], {}), '(nBits, numpy.float)\n', (2365, 2385), False, 'import numpy\n'), ((3938, 3971), 'numpy.zeros', 'numpy.zeros', (['nBits', 'numpy.integer'], {}), '(nBits, numpy.integer)\n', (3949, 3971), False, 'import numpy\n'), ((3986, 4019), 'numpy.zeros', 'numpy.zeros', (['nBits', 'numpy.integer'], {}), '(nBits, numpy.integer)\n', (3997, 4019), False, 'import numpy\n'), ((4330, 4364), 'numpy.zeros', 'numpy.zeros', (['(2, 2)', 'numpy.integer'], {}), '((2, 2), numpy.integer)\n', (4341, 4364), False, 'import numpy\n'), ((2527, 2551), 'rdkit.ML.InfoTheory.entropy.InfoGain', 'entropy.InfoGain', (['counts'], {}), '(counts)\n', (2543, 2551), False, 'from rdkit.ML.InfoTheory import entropy\n'), ((3432, 3454), 'numpy.argsort', 'numpy.argsort', (['metrics'], {}), '(metrics)\n', (3445, 3454), False, 'import numpy\n'), ((5418, 5440), 'numpy.argsort', 'numpy.argsort', (['metrics'], {}), '(metrics)\n', (5431, 5440), False, 'import numpy\n'), ((4622, 4646), 'rdkit.ML.InfoTheory.entropy.InfoGain', 'entropy.InfoGain', (['resTbl'], {}), '(resTbl)\n', (4638, 4646), False, 'from rdkit.ML.InfoTheory import entropy\n')] |
vinay-swamy/gMVP | trainer/dataset.py | 62202baa0769dfe0e47c230e78dffa42fb1280f1 | import tensorflow as tf
import os
import pickle
import numpy as np
from constant_params import input_feature_dim, window_size
def build_dataset(input_tfrecord_files, batch_size):
drop_remainder = False
feature_description = {
'label': tf.io.FixedLenFeature([], tf.int64),
'ref_aa': tf.io.FixedLenFeature([], tf.int64),
'alt_aa': tf.io.FixedLenFeature([], tf.int64),
'feature': tf.io.FixedLenFeature([], tf.string),
'mask': tf.io.FixedLenFeature([], tf.string),
'var_id': tf.io.FixedLenFeature([], tf.string),
}
def _parser(example_proto):
parsed = tf.io.parse_single_example(example_proto, feature_description)
label, ref_aa, alt_aa = parsed['label'], parsed['ref_aa'], parsed[
'alt_aa']
var_id = parsed['var_id']
ref_aa, alt_aa, label = tf.cast(ref_aa, tf.int32), tf.cast(
alt_aa, tf.int32), tf.cast(label, tf.float32)
feature = tf.io.decode_raw(parsed['feature'], tf.float32)
feature = tf.reshape(feature, (window_size, input_feature_dim))
mask = tf.io.decode_raw(parsed['mask'], tf.float32)
mask = tf.reshape(mask, (window_size, ))
h = window_size // 2
#mask the postion of interest
mask = tf.concat(
[mask[:h],
tf.cast([
1,
], dtype=tf.float32), mask[h + 1:]],
axis=-1)
'''
pos_encoding = 1.0 + tf.cast(
tf.math.abs(window_size // 2 - tf.range(window_size)),
dtype=tf.float32)
#pos_encoding = tf.math.log() / tf.math.log(2.0)
feature = tf.concat([feature, pos_encoding[:, tf.newaxis]], axis=-1)
'''
return var_id, ref_aa, alt_aa, feature, label, mask
dataset = tf.data.TFRecordDataset(input_tfrecord_files)
options = tf.data.Options()
options.experimental_threading.max_intra_op_parallelism = 1
dataset = dataset.with_options(options)
dataset = dataset.shuffle(2048)
dataset = dataset.map(_parser, num_parallel_calls=8)
dataset = dataset.batch(batch_size)
#dataset = dataset.prefetch(4)
return dataset
def build_all_possible_missenses_dataset(tr_list, feature_dir, batch_size):
amino_acid_order = 'ACDEFGHIKLMNPQRSTVWY*'
def _gen_data():
for transcript_id in tr_list:
feature_path = f'{feature_dir}/{transcript_id}.pickle'
if not os.path.exists(feature_path):
continue
print(feature_path, flush=True)
with open(feature_path, 'rb') as fr:
feature = pickle.load(fr)
L = feature.shape[0]
w = window_size // 2
for aa_pos in range(L):
ref_aa = int(feature[aa_pos, 0])
start = max(aa_pos - w, 0)
end = min(L, aa_pos + 1 + w)
var_start = start - (aa_pos - w)
var_end = var_start + (end - start)
var_feature = np.zeros([w * 2 + 1, feature.shape[1]])
var_feature[var_start:var_end] = feature[start:end]
mask = np.ones((w * 2 + 1, ), dtype=np.float32)
mask[var_start:var_end] = 0.0
mask[w] = 1.0
for alt_aa in range(20):
var_id = f'{transcript_id}_{str(aa_pos+1)}_{amino_acid_order[ref_aa]}_{amino_acid_order[alt_aa]}'.encode(
'utf-8')
yield var_id, np.int32(ref_aa), np.int32(
alt_aa), np.float32(var_feature), np.float32(mask)
dataset = tf.data.Dataset.from_generator(
_gen_data, (tf.string, tf.int32, tf.int32, tf.float32, tf.float32),
(tf.TensorShape(()), tf.TensorShape(()), tf.TensorShape(
()), tf.TensorShape((window_size, input_feature_dim)),
tf.TensorShape((window_size, ))))
options = tf.data.Options()
options.experimental_threading.max_intra_op_parallelism = 1
dataset = dataset.with_options(options)
#dataset = dataset.map(_parser, num_parallel_calls=8)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(4)
return dataset
def build_test_dataset(input_tfrecord_files, batch_size):
drop_remainder = False
feature_description = {
'ref_aa': tf.io.FixedLenFeature([], tf.int64),
'alt_aa': tf.io.FixedLenFeature([], tf.int64),
'feature': tf.io.FixedLenFeature([], tf.string),
'mask': tf.io.FixedLenFeature([], tf.string),
'var_id': tf.io.FixedLenFeature([], tf.string),
}
def _parser(example_proto):
parsed = tf.io.parse_single_example(example_proto, feature_description)
ref_aa, alt_aa = parsed['ref_aa'], parsed['alt_aa']
var_id = parsed['var_id']
ref_aa, alt_aa = tf.cast(ref_aa, tf.int32), tf.cast(alt_aa, tf.int32)
feature = tf.io.decode_raw(parsed['feature'], tf.float32)
feature = tf.reshape(feature, (window_size, input_feature_dim))
mask = tf.io.decode_raw(parsed['mask'], tf.float32)
mask = tf.reshape(mask, (window_size, ))
h = window_size // 2
#mask the postion of interest
mask = tf.concat(
[mask[:h],
tf.cast([
1,
], dtype=tf.float32), mask[h + 1:]],
axis=-1)
return var_id, ref_aa, alt_aa, feature, mask
dataset = tf.data.TFRecordDataset(input_tfrecord_files)
options = tf.data.Options()
options.experimental_threading.max_intra_op_parallelism = 1
dataset = dataset.with_options(options)
dataset = dataset.map(_parser, num_parallel_calls=8)
dataset = dataset.batch(batch_size)
#dataset = dataset.prefetch(4)
return dataset
| [((1795, 1840), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['input_tfrecord_files'], {}), '(input_tfrecord_files)\n', (1818, 1840), True, 'import tensorflow as tf\n'), ((1856, 1873), 'tensorflow.data.Options', 'tf.data.Options', ([], {}), '()\n', (1871, 1873), True, 'import tensorflow as tf\n'), ((3909, 3926), 'tensorflow.data.Options', 'tf.data.Options', ([], {}), '()\n', (3924, 3926), True, 'import tensorflow as tf\n'), ((5424, 5469), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['input_tfrecord_files'], {}), '(input_tfrecord_files)\n', (5447, 5469), True, 'import tensorflow as tf\n'), ((5485, 5502), 'tensorflow.data.Options', 'tf.data.Options', ([], {}), '()\n', (5500, 5502), True, 'import tensorflow as tf\n'), ((255, 290), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (276, 290), True, 'import tensorflow as tf\n'), ((310, 345), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (331, 345), True, 'import tensorflow as tf\n'), ((365, 400), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (386, 400), True, 'import tensorflow as tf\n'), ((421, 457), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (442, 457), True, 'import tensorflow as tf\n'), ((475, 511), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (496, 511), True, 'import tensorflow as tf\n'), ((531, 567), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (552, 567), True, 'import tensorflow as tf\n'), ((625, 687), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['example_proto', 'feature_description'], {}), '(example_proto, feature_description)\n', (651, 687), True, 'import tensorflow as tf\n'), ((965, 1012), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["parsed['feature']", 'tf.float32'], {}), "(parsed['feature'], tf.float32)\n", (981, 1012), True, 'import tensorflow as tf\n'), ((1031, 1084), 'tensorflow.reshape', 'tf.reshape', (['feature', '(window_size, input_feature_dim)'], {}), '(feature, (window_size, input_feature_dim))\n', (1041, 1084), True, 'import tensorflow as tf\n'), ((1101, 1145), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["parsed['mask']", 'tf.float32'], {}), "(parsed['mask'], tf.float32)\n", (1117, 1145), True, 'import tensorflow as tf\n'), ((1161, 1193), 'tensorflow.reshape', 'tf.reshape', (['mask', '(window_size,)'], {}), '(mask, (window_size,))\n', (1171, 1193), True, 'import tensorflow as tf\n'), ((4324, 4359), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (4345, 4359), True, 'import tensorflow as tf\n'), ((4379, 4414), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (4400, 4414), True, 'import tensorflow as tf\n'), ((4435, 4471), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (4456, 4471), True, 'import tensorflow as tf\n'), ((4489, 4525), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (4510, 4525), True, 'import tensorflow as tf\n'), ((4545, 4581), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (4566, 4581), True, 'import tensorflow as tf\n'), ((4639, 4701), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['example_proto', 'feature_description'], {}), '(example_proto, feature_description)\n', (4665, 4701), True, 'import tensorflow as tf\n'), ((4894, 4941), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["parsed['feature']", 'tf.float32'], {}), "(parsed['feature'], tf.float32)\n", (4910, 4941), True, 'import tensorflow as tf\n'), ((4960, 5013), 'tensorflow.reshape', 'tf.reshape', (['feature', '(window_size, input_feature_dim)'], {}), '(feature, (window_size, input_feature_dim))\n', (4970, 5013), True, 'import tensorflow as tf\n'), ((5030, 5074), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["parsed['mask']", 'tf.float32'], {}), "(parsed['mask'], tf.float32)\n", (5046, 5074), True, 'import tensorflow as tf\n'), ((5090, 5122), 'tensorflow.reshape', 'tf.reshape', (['mask', '(window_size,)'], {}), '(mask, (window_size,))\n', (5100, 5122), True, 'import tensorflow as tf\n'), ((852, 877), 'tensorflow.cast', 'tf.cast', (['ref_aa', 'tf.int32'], {}), '(ref_aa, tf.int32)\n', (859, 877), True, 'import tensorflow as tf\n'), ((879, 904), 'tensorflow.cast', 'tf.cast', (['alt_aa', 'tf.int32'], {}), '(alt_aa, tf.int32)\n', (886, 904), True, 'import tensorflow as tf\n'), ((919, 945), 'tensorflow.cast', 'tf.cast', (['label', 'tf.float32'], {}), '(label, tf.float32)\n', (926, 945), True, 'import tensorflow as tf\n'), ((3728, 3746), 'tensorflow.TensorShape', 'tf.TensorShape', (['()'], {}), '(())\n', (3742, 3746), True, 'import tensorflow as tf\n'), ((3748, 3766), 'tensorflow.TensorShape', 'tf.TensorShape', (['()'], {}), '(())\n', (3762, 3766), True, 'import tensorflow as tf\n'), ((3768, 3786), 'tensorflow.TensorShape', 'tf.TensorShape', (['()'], {}), '(())\n', (3782, 3786), True, 'import tensorflow as tf\n'), ((3801, 3849), 'tensorflow.TensorShape', 'tf.TensorShape', (['(window_size, input_feature_dim)'], {}), '((window_size, input_feature_dim))\n', (3815, 3849), True, 'import tensorflow as tf\n'), ((3860, 3890), 'tensorflow.TensorShape', 'tf.TensorShape', (['(window_size,)'], {}), '((window_size,))\n', (3874, 3890), True, 'import tensorflow as tf\n'), ((4822, 4847), 'tensorflow.cast', 'tf.cast', (['ref_aa', 'tf.int32'], {}), '(ref_aa, tf.int32)\n', (4829, 4847), True, 'import tensorflow as tf\n'), ((4849, 4874), 'tensorflow.cast', 'tf.cast', (['alt_aa', 'tf.int32'], {}), '(alt_aa, tf.int32)\n', (4856, 4874), True, 'import tensorflow as tf\n'), ((1325, 1355), 'tensorflow.cast', 'tf.cast', (['[1]'], {'dtype': 'tf.float32'}), '([1], dtype=tf.float32)\n', (1332, 1355), True, 'import tensorflow as tf\n'), ((2444, 2472), 'os.path.exists', 'os.path.exists', (['feature_path'], {}), '(feature_path)\n', (2458, 2472), False, 'import os\n'), ((2619, 2634), 'pickle.load', 'pickle.load', (['fr'], {}), '(fr)\n', (2630, 2634), False, 'import pickle\n'), ((3009, 3048), 'numpy.zeros', 'np.zeros', (['[w * 2 + 1, feature.shape[1]]'], {}), '([w * 2 + 1, feature.shape[1]])\n', (3017, 3048), True, 'import numpy as np\n'), ((3141, 3180), 'numpy.ones', 'np.ones', (['(w * 2 + 1,)'], {'dtype': 'np.float32'}), '((w * 2 + 1,), dtype=np.float32)\n', (3148, 3180), True, 'import numpy as np\n'), ((5254, 5284), 'tensorflow.cast', 'tf.cast', (['[1]'], {'dtype': 'tf.float32'}), '([1], dtype=tf.float32)\n', (5261, 5284), True, 'import tensorflow as tf\n'), ((3493, 3509), 'numpy.int32', 'np.int32', (['ref_aa'], {}), '(ref_aa)\n', (3501, 3509), True, 'import numpy as np\n'), ((3511, 3527), 'numpy.int32', 'np.int32', (['alt_aa'], {}), '(alt_aa)\n', (3519, 3527), True, 'import numpy as np\n'), ((3554, 3577), 'numpy.float32', 'np.float32', (['var_feature'], {}), '(var_feature)\n', (3564, 3577), True, 'import numpy as np\n'), ((3579, 3595), 'numpy.float32', 'np.float32', (['mask'], {}), '(mask)\n', (3589, 3595), True, 'import numpy as np\n')] |
dpressel/baseline | layers/eight_mile/pytorch/layers.py | 2f46f3b043f2d20bc348495cc54c834f31f71098 | import copy
import math
import logging
from typing import Dict, List, Optional, Tuple, Union
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.jit as jit
import torch.autograd
import contextlib
import glob
from eight_mile.utils import listify, Offsets, is_sequence, str2bool, get_alibi_slopes
from eight_mile.utils import transition_mask as transition_mask_np
MASK_FALSE = False
logger = logging.getLogger("mead.layers")
def sequence_mask(lengths: torch.Tensor, max_len: int = -1) -> torch.Tensor:
"""Generate a sequence mask of shape `BxT` based on the given lengths
:param lengths: A `B` tensor containing the lengths of each example
:param max_len: The maximum width (length) allowed in this mask (default to None)
:return: A mask
"""
lens = lengths.cpu()
if max_len < 0:
max_len_v = torch.max(lens)
else:
max_len_v = max_len
# 1 x T
row = torch.arange(0, max_len_v).type_as(lens).view(1, -1)
# B x 1
col = lens.view(-1, 1)
# Broadcast to B x T, compares increasing number to max
mask = row < col
return mask
def sequence_mask_mxlen(lengths: torch.Tensor, max_len: int) -> torch.Tensor:
"""Generate a sequence mask of shape `BxT` based on the given lengths, with a maximum value
This function primarily exists to make ONNX tracing work better
:param lengths: A `B` tensor containing the lengths of each example
:param max_len: The maximum width (length) allowed in this mask (default to None)
:return: A mask
"""
lens = lengths.cpu()
max_len_v = max_len
# 1 x T
row = torch.arange(0, max_len_v).type_as(lens).view(1, -1)
# B x 1
col = lens.view(-1, 1)
# Broadcast to B x T, compares increasing number to max
mask = row < col
return mask
@torch.jit.script
def truncate_mask_over_time(mask: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
Tout = x.shape[1]
mask = mask[:, :Tout]
#mask = mask.narrow(1, 0, arcs_h.shape[1])
return mask
def vec_log_sum_exp(vec: torch.Tensor, dim: int) -> torch.Tensor:
"""Vectorized version of log-sum-exp
:param vec: Vector
:param dim: What dimension to operate on
:return:
"""
max_scores, idx = torch.max(vec, dim, keepdim=True)
max_scores_broadcast = max_scores.expand_as(vec)
return max_scores + torch.log(torch.sum(torch.exp(vec - max_scores_broadcast), dim, keepdim=True))
def unsort_batch(batch: torch.Tensor, perm_idx: torch.Tensor) -> torch.Tensor:
"""Undo the sort on a batch of tensors done for packing the data in the RNN.
:param batch: The batch of data batch first `[B, ...]`
:param perm_idx: The permutation index returned from the torch.sort.
:returns: The batch in the original order.
"""
# Add ones to the shape of the perm_idx until it can broadcast to the batch
perm_idx = perm_idx.to(batch.device)
diff = len(batch.shape) - len(perm_idx.shape)
extra_dims = [1] * diff
perm_idx = perm_idx.view([-1] + extra_dims)
return torch.scatter(torch.zeros_like(batch), 0, perm_idx.expand_as(batch), batch)
def infer_lengths(tensor, dim=1):
"""Infer the lengths of an input based on the idea the Offsets.PAD was used as the padding token.
:param tensor: The data to infer the length of, should be either [B, T] or [T, B]
:param dim: The dimension which contains the sequential signal
:returns: A Tensor of shape `[B]` that has the lengths for example item in the batch
"""
if len(tensor.shape) != 2:
raise ValueError(f"infer_lengths only works with tensors wit two dims right now, got {len(tensor.shape)}")
offsets = torch.arange(1, tensor.shape[dim] + 1, device=tensor.device, dtype=tensor.dtype).unsqueeze(1 - dim)
non_pad_loc = (tensor != Offsets.PAD).to(tensor.dtype)
return torch.argmax(non_pad_loc * offsets, dim=dim) + 1
def tensor_and_lengths(inputs) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Return either the unpacked inputs (2), or a `Tuple` of the input with None
TODO: this function should probably be changed to always return the lengths second.
To do this, we just need a sentinel value, e.g. <PAD> (0). The problem with doing this is
that it might be possible to generate <PAD> in the middle of the tensor which would make that
length invalid.
:param inputs: Either a sequence of the `(tensor, length)` or just the `tensor`
:return: A `Tuple` of `(tensor, length)` or `(tensor, None)`
"""
if isinstance(inputs, (list, tuple)):
in_tensor, lengths = inputs
else:
in_tensor = inputs
lengths = None
return in_tensor, lengths
class VariationalDropout(nn.Module):
"""Inverted dropout that applies the same mask at each time step."""
def __init__(self, pdrop: float = 0.5, batch_first: bool = False):
"""Variational Dropout
:param pdrop: the percentage to drop
"""
super().__init__()
self.pdrop = pdrop
self.batch_first = batch_first
def extra_repr(self):
return "p=%.1f" % self.pdrop
def forward(self, input: torch.Tensor) -> torch.Tensor:
if not self.training:
return input
# Create a mask that covers a single time step
if self.batch_first:
dim0 = input.size(0)
dim1 = 1
else:
dim0 = 1
dim1 = input.size(1)
mask = torch.zeros(dim0, dim1, input.size(2)).bernoulli_(1 - self.pdrop).to(input.device)
mask = mask / self.pdrop
# Broadcast the mask over the sequence
return mask * input
class SequenceLoss(nn.Module):
"""Computes the loss over a sequence"""
def __init__(self, LossFn: nn.Module = nn.NLLLoss, avg: str = "token"):
"""A class that applies a Loss function to sequence via the folding trick.
:param LossFn: A loss function to apply (defaults to `nn.NLLLoss`)
:param avg: A divisor to apply, valid values are `token` and `batch`
"""
super().__init__()
self.avg = avg
if avg == "token":
self.crit = LossFn(ignore_index=Offsets.PAD, reduction="mean")
self._norm = self._no_norm
else:
self.crit = LossFn(ignore_index=Offsets.PAD, reduction="sum")
self._norm = self._batch_norm
def _batch_norm(self, loss, inputs):
return loss / inputs.size()[0]
def _no_norm(self, loss, inputs):
return loss
def forward(self, inputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
"""Evaluate some loss over a sequence.
:param inputs: torch.FloatTensor, [B, .., C] The scores from the model. Batch First
:param targets: torch.LongTensor, The labels.
:returns: torch.FloatTensor, The loss.
"""
total_sz = targets.nelement()
loss = self.crit(inputs.view(total_sz, -1), targets.view(total_sz))
return self._norm(loss, inputs)
def extra_repr(self):
return f"reduction={self.avg}"
class LabelSmoothingLoss(nn.Module):
def __init__(self, label_smoothing, ignore_index=0, reduction="none"):
"""Use Label smoothing from `Szegedy et. al., 2015`_ to temper model confidence.
Implements add-gamma smoothing where the probability mass of the gold label distribution
is smoothed across classes.
This implementation is based on `OpenNMT-py`_ but has been adapted to not require the
vocabulary size up front.
.. _Szegedy et. al., 2015: https://arxiv.org/abs/1512.00567
.. _OpenNMY-py: https://github.com/OpenNMT/OpenNMT-py/blob/938a4f561b07f4d468647823fab761cfb51f21da/onmt/utils/loss.py#L194
"""
if not (0.0 < label_smoothing <= 1.0):
raise ValueError(f"`label_smoothing` must be between 0.0 and 1.0, got {label_smoothing}")
super().__init__()
self.ignore_index = ignore_index
self.label_smoothing = label_smoothing
self.confidence = 1.0 - label_smoothing
self.reduction = reduction if reduction != "mean" else "batchmean"
def forward(self, output: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""
:param output: The model outputs, [B, V]
:param target: The target labels, [B]
"""
B, V = output.size()
smoothed = torch.full((B, V), self.label_smoothing / (V - 2))
smoothed[:, self.ignore_index] = 0
smoothed = torch.scatter(smoothed, 1, target.unsqueeze(1), self.confidence)
smoothed = smoothed.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, smoothed, reduction=self.reduction)
def extra_repr(self):
return f"label_smoothing={self.label_smoothing}"
class MeanPool1D(nn.Module):
"""Do a mean pool while accounting for the length of a sequence
"""
def __init__(self, outsz, batch_first=True):
"""Set up pooling module
:param outsz: The output dim, for dowstream access
:param batch_first: Is this module batch first or time first?
"""
super().__init__()
self.batch_first = batch_first
self.reduction_dim = 1 if self.batch_first else 0
self.output_dim = outsz
self.requires_length = True
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Apply mean pooling on the valid inputs
:param inputs: A tuple of `(input, lengths)`
:return: Pooled output
"""
tensor, lengths = tensor_and_lengths(inputs)
# Regardless of whether the input is `[B, T, H]` or `[T, B, H]` the shape after
# the sum is `[B, H]` so the lengths (of shape `[B]`) should be unsqueezed to
# `[B, 1]` in order to broadcast
return torch.sum(tensor, self.reduction_dim, keepdim=False) / torch.unsqueeze(lengths, -1).to(tensor.dtype).to(
tensor.device
)
def extra_repr(self):
return f"batch_first={self.batch_first}"
class MaxPool1D(nn.Module):
"""Do a max-pooling operation with or without a length given
"""
def __init__(self, outsz, batch_first=True):
super().__init__()
self.batch_first = batch_first
self.reduction_dim = 1 if self.batch_first else 0
self.output_dim = outsz
def forward(self, inputs: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]) -> torch.Tensor:
"""If we are given a tuple as input, we will use the length, otherwise we will do an operation without masking
:param inputs: either a tuple of `(input, lengths)` or a tensor `input`
:return: A pooled tensor
"""
tensor, lengths = tensor_and_lengths(inputs)
if lengths is not None:
# If tensor = `[B, T, H]`
# mask = `[B, T, 1]`
# If tensor = `[T, B, H]`
# mask = `[T, B, 1]`
# So it will mask all the values in H past the right length
mask = sequence_mask(lengths).to(tensor.device)
mask = mask if self.batch_first else bth2tbh(mask)
# Fill masked with very negative so it never gets selected
tensor = tensor.masked_fill(mask.unsqueeze(-1) == MASK_FALSE, -1e4)
dmax, _ = torch.max(tensor, self.reduction_dim, keepdim=False)
return dmax
def extra_repr(self) -> str:
return f"batch_first={self.batch_first}"
# Torch only added this module in 1.4.0, shim
class GeLU(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.nn.functional.gelu(x)
#Code taken from: https://github.com/huggingface/transformers/blob/766d4bf7920213bdd8a8afb42a72719190124568/src/transformers/activations.py#L27
class Gpt2GELU(nn.Module):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
def forward(self, input):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0))))
def get_activation(name: str = "relu") -> nn.Module:
"""Get back an `nn.Module` by string name of the activation operator
:param name: A string name of the operation
:return: A module associated with that string
"""
if name is None or name == "ident":
return nn.Identity()
if name == "tanh":
return nn.Tanh()
if name == "gelu":
return GeLU()
if name == "hardtanh":
return nn.Hardtanh()
if name == "leaky_relu":
return nn.LeakyReLU()
if name == "prelu":
return nn.PReLU()
if name == "sigmoid":
return nn.Sigmoid()
if name == "log_sigmoid":
return nn.LogSigmoid()
if name == "log_softmax":
return nn.LogSoftmax(dim=-1)
if name == "softmax":
return nn.Softmax(dim=-1)
if name == "gpt2_gelu":
return Gpt2GELU()
return nn.ReLU()
def _cat_dir(h: torch.Tensor) -> torch.Tensor:
"""Concat forward and backword state vectors.
The shape of the hidden is `[#layers * #dirs, B, H]`. The docs say you can
separate directions with `h.view(#l, #dirs, B, H)` with the forward dir being
index 0 and backwards dir being 1.
This means that before separating with the view the forward dir are the even
indices in the first dim while the backwards dirs are the odd ones. Here we select
the even and odd values and concatenate them
:param h: The hidden shape as it comes back from PyTorch modules
"""
return torch.cat([h[0 : h.size(0) : 2], h[1 : h.size(0) : 2]], dim=-1)
def concat_state_dirs(state):
"""Convert the bidirectional out of an RNN so the forward and backward values are a single vector."""
if isinstance(state, tuple):
return tuple(_cat_dir(h) for h in state)
return _cat_dir(state)
class Conv1DSame(nn.Module):
"""Perform a 1D convolution with output size same as input size
To make this operation work as expected, we cannot just use `padding=kernel_size//2` inside
of the convolution operation. Instead, we zeropad the input using the `ConstantPad1d` module
"""
def __init__(self, in_channels: int, out_channels: int, kernel_size: int, bias: bool = True, groups: int = 1, unif: float = 0.0, initializer: Optional[str] = None, activation: Optional[str] = None):
"""Create a 1D conv to produce the same output size as input
:param in_channels: The number of input feature maps
:param out_channels: The number of output feature maps
:param kernel_size: The kernel size
:param bias: Is bias on?
:param groups: Number of conv groups
"""
super().__init__()
end_pad = kernel_size // 2
start_pad = end_pad - 1 if kernel_size % 2 == 0 else end_pad
self.conv = nn.Sequential(
nn.ConstantPad1d((start_pad, end_pad), 0.),
pytorch_conv1d(in_channels, out_channels, kernel_size, unif=unif, initializer=initializer, bias=bias, groups=groups),
get_activation(activation)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Do convolution1d on an input tensor, `[B, C, T]`
:param x: The input tensor of shape `[B, C, T]`
:return: The output tensor of shape `[B, H, T]`
"""
return self.conv(x)
class ConvEncoder(nn.Module):
"""1D Convolutional layer encoder with given activation function, optional dropout
This module takes in a temporal signal of either shape `[B, C, T]` or `[B, T, C]`, depending on the constructor
and produces an output signal of the same orientation (`[B, H, T]` or `[B, T, H]`, respectively). We default
to `[B, T, H]` orientation to make it more convenient for typical layout, but this requires transposing the last
2 dims before and after the convolution operation.
"""
def __init__(self, insz: int, outsz: int, filtsz: int, pdrop: float = 0.0, activation: str = "relu", bias: bool = True, groups: int = 1, hidden_last=True):
"""Construct the encoder with optional dropout, given activation, and orientation
:param insz: The number of input feature maps
:param outsz: The number of output feature maps (or hidden size)
:param filtsz: The kernel size
:param pdrop: The amount of dropout to apply, this defaults to 0
:param activation: The activation function by name, defaults to `relu`
:param bias: Use bias?
:param groups: How many conv groups. Defaults to 1
:param hidden_last: PyTorch only! If `True` the orientatiation is `[B, T, H]`, o.w. `[B, H, T]` expected
"""
super().__init__()
self.output_dim = outsz
conv = Conv1DSame(insz, outsz, filtsz, bias=bias, groups=groups)
act = get_activation(activation)
dropout = nn.Dropout(pdrop)
if hidden_last:
self.conv = nn.Sequential(BTH2BHT(), conv, act, dropout, BHT2BTH())
else:
self.conv = nn.Sequential(conv, act, dropout)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return self.conv(input)
class ConvEncoderStack(nn.Module):
"""Create a stack of convolutional encoders with residual connections between, using the `ConvEncoder` underneath
This creates an encoder stack of convolutions, finally returning the last temporal output. Each layer uses zero-padding
which causes the output of the convolution at each layer to be the same length.
As in the `ConvEncoder` we support input tensor shapes of `[B, C, T]` or `[B, T, C]` depending on the constructor
initialization, and transpose underneath the input and output of the stack if the orientation is defaulted to
`[B, T, C]`
"""
def __init__(self, insz: int, outsz: int, filtsz: int, nlayers: int = 1, pdrop: float = 0.0, activation: str = "relu", bias: bool = True, groups: int = 1, hidden_last=True):
"""Construct the encoder stack
:param insz: The input number of feature maps
:param outsz: The output number of feature maps
:param filtsz: The kernel size
:param nlayers: The number of layers in the stack (defaults to a single layer)
:param pdrop: The amount of dropout to apply (defaults to `0`)
:param activation: The activation function to use as a string, defaults to `relu`
:param bias: Use bias?
:param groups: How many conv groups. Defaults to 1
:param hidden_last: PyTorch only! If `True` the orientatiation is `[B, T, H]`, o.w. `[B, H, T]` expected
"""
super().__init__()
if hidden_last:
first_layer = nn.Sequential(BTH2BHT(), ConvEncoder(insz, outsz, filtsz, pdrop, activation, bias, groups, hidden_last=False))
else:
first_layer = ConvEncoder(insz, outsz, filtsz, pdrop, activation, bias, groups, hidden_last=False)
subsequent_layer = ResidualBlock(ConvEncoder(outsz, outsz, filtsz, pdrop, activation, bias, groups, hidden_last=False))
self.layers = nn.ModuleList([first_layer] + [copy.deepcopy(subsequent_layer) for _ in range(nlayers - 1)])
if hidden_last:
self.layers.append(BHT2BTH())
self.output_dim = outsz
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Apply a stack of 1D convolutions with residual connections between them
:param input: A tensor of shape `[B, T, C]` or `[B, C, T]` depending on value of `hidden_last`
:return: A tensor of shape `[B, T, H]` or `[B, H, T]` depending on the value of `hidden_last`
"""
x = input
for layer in self.layers:
x = layer(x)
return x
def bth2bht(t: torch.Tensor) -> torch.Tensor:
"""Transpose the 2nd and 3rd dim of a tensor"""
return t.transpose(1, 2).contiguous()
class BTH2BHT(nn.Module):
"""Utility layer to convert from `[B, T, H]` to `[B, H, T]`
"""
def __init__(self):
super().__init__()
def forward(self, t: torch.Tensor) -> torch.Tensor:
return bth2bht(t)
def tbh2bht(t: torch.Tensor) -> torch.Tensor:
"""Permute the dimensions, first goes to third, second goes to first, last moves to second"""
return t.permute(1, 2, 0).contiguous()
class TBH2BHT(nn.Module):
"""Utility layer to convert from `[T, B, H]` to `[B, H, T]`
"""
def __init__(self):
super().__init__()
def forward(self, t: torch.Tensor) -> torch.Tensor:
return tbh2bht(t)
def tbh2bth(t: torch.Tensor) -> torch.Tensor:
"""Transpose the first 2 dims"""
return t.transpose(0, 1).contiguous()
class TBH2BTH(nn.Module):
"""Utility layer to convert from `[T, B, H]` to `[B, T, H]`
"""
def __init__(self):
super().__init__()
def forward(self, t: torch.Tensor) -> torch.Tensor:
return tbh2bth(t)
def bth2tbh(t: torch.Tensor) -> torch.Tensor:
"""Transpose the first 2 dims"""
return t.transpose(0, 1).contiguous()
class BTH2TBH(nn.Module):
"""Utility layer to convert from `[B, T, H]` to `[T, B, H]`
"""
def __init__(self):
super().__init__()
def forward(self, t: torch.Tensor) -> torch.Tensor:
return bth2tbh(t)
def bht2bth(t: torch.Tensor) -> torch.Tensor:
return t.transpose(1, 2).contiguous()
class BHT2BTH(nn.Module):
"""Utility layer to convert from `[B, H, T]` to `[B, T, H]`
"""
def __init__(self):
super().__init__()
def forward(self, t: torch.Tensor) -> torch.Tensor:
return bht2bth(t)
class ParallelConv(nn.Module):
"""Layer of parallel convolutions with varying filter sizes followed by max over time pooling
This module takes an input tensor of any orientation based on its constructor, and pools its
output to shape `[B, H]`, where `H` is `outsz * len(filtsz)`
"""
def __init__(self, insz: int, outsz: int, filtsz: List[int], activation: str = "relu", input_fmt: str = "bth"):
"""
Constructor for a parallel convolution from any orientation tensor input
:param insz: The number of input feature maps
:param outsz: The number of output feature maps
:param filtsz: The kernel size as a list of parallel filters to apply, e.g. `[3, 4, 5]`
:param activation: An activation function by name to apply
:param input_fmt: A string for the orientation. Valid values are `bth` or `btc` meaning hidden units last,
`bht` or `bct` meaning the temporal dim last or `tbh` or `tbc` meaning the hidden units last and the temporal dim
first
"""
super().__init__()
self.requires_length = False
convs = []
outsz_filts = outsz
self.input_fmt = input_fmt.lower()
if type(outsz) == int:
outsz_filts = len(filtsz) * [outsz]
self.output_dim = sum(outsz_filts)
for i, fsz in enumerate(filtsz):
if fsz % 2 == 0:
conv = Conv1DSame(insz, outsz_filts[i], fsz)
else:
pad = fsz // 2
conv = nn.Conv1d(insz, outsz_filts[i], fsz, padding=pad)
conv = nn.Sequential(
conv,
get_activation(activation)
)
convs.append(conv)
# Add the module so its managed correctly
self.convs = nn.ModuleList(convs)
def transform_input(self, t: torch.Tensor) -> torch.Tensor:
if self.input_fmt == "bth" or self.input_fmt == "btc":
return bth2bht(t)
elif self.input_fmt == "tbh" or self.input_fmt == "tbc":
return tbh2bht(t)
else:
return t
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Transform the input to `[B, C, T]` from any orientation and perform parallel 1D convs and max over time pool
:param inputs: An input tensor of any format specified in the constructor
:return: A `[B, H]` tensor representing the pooled outputs
"""
mots = []
input_bct = self.transform_input(inputs)
for conv in self.convs:
# In Conv1d, data BxCxT, max over time
conv_out = conv(input_bct)
mot, _ = conv_out.max(2)
mots.append(mot)
mots = torch.cat(mots, 1)
return mots # self.conv_drop(mots)
class Highway(nn.Module):
"""Highway layer as defined in https://arxiv.org/abs/1505.00387
"""
def __init__(self, input_size: int, **kwargs):
"""Highway layer constructor
:param input_size: The input hidden size
:param kwargs:
"""
super().__init__()
self.proj = nn.Linear(input_size, input_size)
self.transform = nn.Linear(input_size, input_size)
self.transform.bias.data.fill_(-2.0)
self.output_dim = input_size
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Take a tensor in and produce the highway layer output
:param input: Input tensor
:return: output tensor
"""
proj_result = torch.relu(self.proj(input))
proj_gate = torch.sigmoid(self.transform(input))
gated = (proj_gate * proj_result) + ((1 - proj_gate) * input)
return gated
def pytorch_linear(in_sz: int, out_sz: int, unif: float = 0, initializer: str = None, bias: bool = True):
"""Utility function that wraps a linear (AKA dense) layer creation, with options for weight init and bias"""
l = nn.Linear(in_sz, out_sz, bias=bias)
if unif > 0:
l.weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal(l.weight)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(l.weight)
else:
nn.init.xavier_uniform_(l.weight)
if bias:
l.bias.data.zero_()
return l
class StackedLSTMCell(nn.Module):
"""A stacked LSTM cells applied at a timestep
"""
def __init__(self, num_layers: int, input_size: int, rnn_size: int, dropout: float):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.LSTMCell(input_size=input_size, hidden_size=rnn_size, bias=False))
input_size = rnn_size
def forward(self, input: torch.Tensor, hidden: torch.Tensor):
"""Apply a stack of LSTMs
:param input: The input to the first LSTM `[B, H]`
:param hidden: The previous `(h, c)` where `h=(h_0, h_1,..)`, `c=(c_0, c_1,..)`
:return: The output and hidden `(h, c)` where `h=(h_0, h_1,..)`, `c=(c_0, c_1,..)`
"""
h_0, c_0 = hidden
hs, cs = [], []
for i, layer in enumerate(self.layers):
h_i, c_i = layer(input, (h_0[i], c_0[i]))
input = h_i
if i != self.num_layers - 1:
input = self.dropout(input)
hs.append(h_i)
cs.append(c_i)
hs = torch.stack(hs)
cs = torch.stack(cs)
return input, (hs, cs)
class StackedGRUCell(nn.Module):
"""A stacked GRU cells applied at a timestep
"""
def __init__(self, num_layers: int, input_size: int, rnn_size: int, dropout: float):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.GRUCell(input_size=input_size, hidden_size=rnn_size))
input_size = rnn_size
def forward(self, input: torch.Tensor, hidden: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Apply a stack of GRUs
:param input: The input to the first LSTM `[B, H]`
:param hidden: The previous `h` where `h=(h_0, h_1,..)`
:return: The output and hidden `h` where `h=(h_0, h_1,..)`
"""
h_0 = hidden
hs = []
for i, layer in enumerate(self.layers):
h_i = layer(input, (h_0[i]))
input = h_i
if i != self.num_layers:
input = self.dropout(input)
hs.append(h_i)
hs = torch.stack(hs)
return input, hs
class Dense(nn.Module):
"""Dense (Linear) layer with optional activation given
This module is the equivalent of the tf.keras.layer.Dense, module with optional activations applied
"""
def __init__(
self,
insz: int,
outsz: int,
activation: Optional[str] = None,
unif: float = 0,
initializer: Optional[str] = None,
):
"""Constructor for "dense" or "linear" layer, with optional activation applied
:param insz: The number of hidden units in the input
:param outsz: The number of hidden units in the output
:param activation: The activation function by name, defaults to `None`, meaning no activation is applied
:param unif: An optional initialization value which can set the linear weights. If given, biases will init to 0
:param initializer: An initialization scheme by string name: `ortho`, `kaiming` or `he`, `xavier` or `glorot`
"""
super().__init__()
self.layer = pytorch_linear(insz, outsz, unif, initializer)
self.activation = get_activation(activation)
self.output_dim = outsz
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Run a linear projection over the input, followed by an optional activation given by constructor
:param input: the input tensor
:return: the transformed output
"""
return self.activation(self.layer(input))
class WeightTieDense(nn.Module):
"""Do weight tying from the input parameter
This module never copies the weight pointer, it lazily accesses to allow the tied variable to reset its parameters
after initialization. This is helpful for cases where we have LMs and are reloading them after they have been
initially created
"""
def __init__(self, tie: nn.Module, bias=False):
super().__init__()
self.tie = tie
self.transform = self._get_transform(tie)
if bias:
bias = torch.nn.Parameter(torch.zeros(self.transform(self.weight.shape[0])))
else:
bias = None
self.register_parameter("bias", bias)
def _get_transform(self, tie: nn.Module):
emb = getattr(tie, "embeddings", None)
if emb is not None:
return self._identity
return self._transpose
@property
def weight(self):
emb = getattr(self.tie, "embeddings", None)
if emb is not None:
return getattr(emb, "weight")
return getattr(self.tie, "weight")
def _identity(self, x: torch.Tensor) -> torch.Tensor:
return x
def _transpose(self, x: torch.Tensor) -> torch.Tensor:
return x.transpose(0, 1).contiguous()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.linear(input, self.transform(self.weight), self.bias)
class ResidualBlock(nn.Module):
"""Create a residual block by wrapping an layer with a residual connection"""
def __init__(self, layer: Optional[nn.Module] = None, **kwargs):
"""Wrap an layer with a residual connection
:param layer: This layer will be applied to the input and added to the input
:param kwargs:
"""
super().__init__()
self.layer = layer
if self.layer is not None and hasattr(layer, "output_dim"):
self.output_dim = layer.output_dim
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Apply a residual block
:param input: A tensor to use as input and to add to output
:return: The residual connection output
"""
return input + self.layer(input)
class SkipConnection(ResidualBlock):
"""Subclass of ResidualBlock(Dense) with an activation function given
"""
def __init__(self, input_size: int, activation: str = "relu"):
"""Create a `SkipConnection`
:param input_size: The input dimension size
:param activation: A string activation name
"""
super().__init__(None)
self.layer = Dense(input_size, input_size, activation=activation)
self.output_dim = input_size
def rnn_cell(insz: int, hsz: int, rnntype: str, nlayers: int, dropout: float):
"""This is a wrapper function around a stacked RNN cell
:param insz: The input dimensions
:param hsz: The hidden dimensions
:param rnntype: An RNN type `gru` or `lstm`
:param nlayers: The number of layers to stack
:param dropout: The amount of dropout
:return:
"""
if rnntype == "gru":
rnn = StackedGRUCell(nlayers, insz, hsz, dropout)
else:
rnn = StackedLSTMCell(nlayers, insz, hsz, dropout)
return rnn
def pytorch_lstm(
insz: int,
hsz: int,
rnntype: str,
nlayers: int,
dropout: float,
unif: float = 0,
batch_first: bool = False,
initializer: str = None,
) -> torch.nn.LSTM:
"""Wrapper around `torch.nn.LSTM`, mainly for weight initialization options
:param insz: The input dimension
:param hsz: The number of hidden units
:param rnntype: A string description of the type of LSTM: `bi?lstm` or `lstm`
:param nlayers: The number of layers
:param dropout: How much dropout to apply
:param unif: if uniform initialization, what range?
:param batch_first: Should we do the RNN batch first or time first
:param initializer: An optional string representing a style of initialization `ortho`, `he`/`kaiming`, `xavier`/`glorot`
:return: An LSTM
"""
if nlayers == 1:
dropout = 0.0
ndir = 2 if rnntype.startswith("b") else 1
layer_hsz = hsz // ndir
rnn = torch.nn.LSTM(
insz, layer_hsz, nlayers, dropout=dropout, bidirectional=True if ndir > 1 else False, batch_first=batch_first
) # , bias=False)
if initializer == "ortho":
nn.init.orthogonal(rnn.weight_hh_l0)
nn.init.orthogonal(rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(rnn.weight_hh_l0)
nn.init.kaiming_uniform(rnn.weight_ih_l0)
elif unif > 0:
for weight in rnn.parameters():
weight.data.uniform_(-unif, unif)
else:
nn.init.xavier_uniform_(rnn.weight_hh_l0)
nn.init.xavier_uniform_(rnn.weight_ih_l0)
return rnn
class LSTMEncoderBase(nn.Module):
"""The LSTM encoder is a base for a set of encoders producing various outputs.
All LSTM encoders inheriting this class will trim the input to the max length given in the batch. For example,
if the input sequence is `[B, T, C]` and the `S = max(lengths)` then the resulting sequence, if produced, will
be length `S` (or more precisely, `[B, S, H]`)
*PyTorch Note*: In PyTorch, its more common for the input shape to be temporal length first (`[T, B, H]`) and this
is the PyTorch default. There is an extra parameter in all of these models called `batch_first` which controls this.
Currently, the default is time first (`batch_first=False`), which differs from TensorFlow. To match the TF impl,
set `batch_first=True`.
*PyTorch Note*:
Most `LSTMEncoder` variants just define the `forward`. This module cannot provide the same utility as the
TensorFlow `LSTMEncoder` base right now, because because the JIT isnt handling subclassing of forward properly.
"""
def __init__(
self,
insz: int,
hsz: int,
nlayers: int,
pdrop: float = 0.0,
requires_length: bool = True,
batch_first: bool = False,
unif: float = 0,
initializer: str = None,
**kwargs,
):
"""Produce a stack of LSTMs with dropout performed on all but the last layer.
:param insz: The size of the input
:param hsz: The number of hidden units per LSTM
:param nlayers: The number of layers of LSTMs to stack
:param pdrop: The probability of dropping a unit value during dropout, defaults to 0
:param requires_length: Does this encoder require an input length in its inputs (defaults to `True`)
:param batch_first: PyTorch only! Should we do batch first input or time-first input? Defaults to `False` (differs from TF!)
:param unif: PyTorch only! Initialization parameters for RNN
:param initializer: PyTorch only! A string describing optional initialization type for RNN
"""
super().__init__()
self.requires_length = requires_length
self.batch_first = batch_first
self.nlayers = nlayers
if nlayers == 1:
pdrop = 0.0
self.rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=pdrop, bidirectional=False, batch_first=batch_first)
if initializer == "ortho":
nn.init.orthogonal(self.rnn.weight_hh_l0)
nn.init.orthogonal(self.rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(self.rnn.weight_hh_l0)
nn.init.kaiming_uniform(self.rnn.weight_ih_l0)
elif unif > 0:
for weight in self.rnn.parameters():
weight.data.uniform_(-unif, unif)
else:
nn.init.xavier_uniform_(self.rnn.weight_hh_l0)
nn.init.xavier_uniform_(self.rnn.weight_ih_l0)
self.output_dim = hsz
# def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
# tbc, lengths = tensor_and_lengths(inputs)
# packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths, batch_first=self.batch_first)
# output, hidden = self.rnn(packed)
# output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
# return self.output_fn(output, hidden)
# def output_fn(self, output, state):
# return output, self.extract_top_state(state)
def extract_top_state(self, state: Tuple[torch.Tensor, torch.Tensor]) -> List[torch.Tensor]:
"""Get a view of the top state of shape [B, H]`
:param state:
:return:
"""
# Select the topmost state with -1 and the only direction is forward (select with 0)
top = []
for s in state:
top.append(s.view(self.nlayers, 1, -1, self.output_dim)[-1, 0])
return top
class LSTMEncoderSequence(LSTMEncoderBase):
"""LSTM encoder to produce the transduced output sequence.
Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of
shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input
sequence if the `max(lengths)` given is shorter than `T` during execution.
*PyTorch Note:* The input shape of is either `[B, T, C]` or `[T, B, C]` depending on the value of `batch_first`,
and defaults to `[T, B, C]` for consistency with other PyTorch modules. The output shape is of the same orientation.
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Take in a tuple of `(sequence, lengths)` and produce and output tensor of the last layer of LSTMs
The value `S` here is defined as `max(lengths)`, `S <= T`
:param inputs: sequence of shapes `[B, T, C]` or `[T, B, C]` and a lengths of shape `[B]`
:return: A tensor of shape `[B, S, H]` or `[S, B, H]` depending on setting of `batch_first`
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output
class LSTMEncoderWithState(nn.Module):
"""LSTM encoder producing the hidden state and the output, where the input doesnt require any padding
PyTorch note: This type of encoder doesnt inherit the `LSTMEncoderWithState` base
"""
def __init__(
self,
insz: int,
hsz: int,
nlayers: int,
pdrop: float = 0.0,
batch_first: bool = False,
unif: float = 0,
initializer: str = None,
**kwargs,
):
"""
:param insz: The size of the input
:param hsz: The number of hidden units per LSTM
:param nlayers: The number of layers of LSTMs to stack
:param pdrop: The probability of dropping a unit value during dropout, defaults to 0
:param batch_first: PyTorch only! do batch first or time-first input? Defaults to `False` (differs from TF!)
:param unif: PyTorch only! Initialization parameters for RNN
:param initializer: PyTorch only! A string describing optional initialization type for RNN
"""
super().__init__()
self.requires_length = False
self.requires_state = True
self.batch_first = batch_first
self.nlayers = nlayers
if nlayers == 1:
pdrop = 0.0
self.rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=pdrop, bidirectional=False, batch_first=batch_first)
if initializer == "ortho":
nn.init.orthogonal(self.rnn.weight_hh_l0)
nn.init.orthogonal(self.rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(self.rnn.weight_hh_l0)
nn.init.kaiming_uniform(self.rnn.weight_ih_l0)
elif unif > 0:
for weight in self.rnn.parameters():
weight.data.uniform_(-unif, unif)
else:
nn.init.xavier_uniform_(self.rnn.weight_hh_l0)
nn.init.xavier_uniform_(self.rnn.weight_ih_l0)
self.output_dim = hsz
def forward(self, input_and_prev_h: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param input_and_prev_h: The input at this timestep and the previous hidden unit or `None`
:return: Raw `torch.nn.LSTM` output
"""
inputs, hidden = input_and_prev_h
output, hidden = self.rnn(inputs, hidden)
return output, hidden ##concat_state_dirs(hidden)
class LSTMEncoderAll(LSTMEncoderBase):
"""LSTM encoder that passes along the full output and hidden states for each layer
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]`
This returns a 2-tuple of outputs `[B, S, H]` where `S = max(lengths)`, for the output vector sequence,
and a tuple of hidden vector `[L, B, H]` and context vector `[L, B, H]`, respectively
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor `[B, S, H]` or `[B, H, S]` , and tuple of hidden `[L, B, H]` and context `[L, B, H]`
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, hidden
class LSTMEncoderHidden(LSTMEncoderBase):
"""LSTM encoder that returns the top hidden state
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` and
returns a hidden unit tensor of shape `[B, H]`
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor of shape `[B, H]` representing the last RNNs hidden state
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return self.extract_top_state(hidden)[0]
# TODO: this module only exists in pytorch. Do we eliminate it or put it in both?
class LSTMEncoderSequenceHiddenContext(LSTMEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, self.extract_top_state(hidden)
class BiLSTMEncoderBase(nn.Module):
"""BiLSTM encoder base for a set of encoders producing various outputs.
All BiLSTM encoders inheriting this class will trim the input to the max length given in the batch. For example,
if the input sequence is `[B, T, C]` and the `S = max(lengths)` then the resulting sequence, if produced, will
be length `S` (or more precisely, `[B, S, H]`). Because its bidirectional, half of the hidden units given in the
constructor will be applied to the forward direction and half to the backward direction, and these will get
concatenated.
*PyTorch Note*: In PyTorch, its more common for the input shape to be temporal length first (`[T, B, H]`) and this
is the PyTorch default. There is an extra parameter in all of these models called `batch_first` which controls this.
Currently, the default is time first (`batch_first=False`), which differs from TensorFlow. To match the TF impl,
set `batch_first=True`.
*PyTorch Note*:
Most `BiLSTMEncoder` variants just define the `forward`. This module cannot provide the same utility as the
TensorFlow `BiLSTMEncoder` base right now, because because the JIT isnt handling subclassing of forward properly.
"""
def __init__(
self,
insz: int,
hsz: int,
nlayers: int,
pdrop: float = 0.0,
requires_length: bool = True,
batch_first: bool = False,
unif: float = 0,
initializer: str = None,
**kwargs,
):
"""Produce a stack of LSTMs with dropout performed on all but the last layer.
:param insz: The size of the input
:param hsz: The number of hidden units per BiLSTM (`hsz//2` used for each direction and concatenated)
:param nlayers: The number of layers of BiLSTMs to stack
:param pdrop: The probability of dropping a unit value during dropout, defaults to 0
:param requires_length: Does this encoder require an input length in its inputs (defaults to `True`)
:param batch_first: Should we do batch first input or time-first input? Defaults to `False` (differs from TF!)
:param unif: PyTorch only! Initialization parameters for RNN
:param initializer: PyTorch only! A string describing optional initialization type for RNN
"""
super().__init__()
self.requires_length = requires_length
self.batch_first = batch_first
self.nlayers = nlayers
if nlayers == 1:
pdrop = 0.0
self.rnn = torch.nn.LSTM(insz, hsz // 2, nlayers, dropout=pdrop, bidirectional=True, batch_first=batch_first)
if initializer == "ortho":
nn.init.orthogonal(self.rnn.weight_hh_l0)
nn.init.orthogonal(self.rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(self.rnn.weight_hh_l0)
nn.init.kaiming_uniform(self.rnn.weight_ih_l0)
elif unif > 0:
for weight in self.rnn.parameters():
weight.data.uniform_(-unif, unif)
else:
nn.init.xavier_uniform_(self.rnn.weight_hh_l0)
nn.init.xavier_uniform_(self.rnn.weight_ih_l0)
self.output_dim = hsz
def extract_top_state(self, state):
# Select the topmost state with -1 and the only direction is forward (select with 0)
return tuple(s.view(self.nlayers, 1, -1, self.output_dim)[-1, 0] for s in state)
# TODO: this module only exists in pytorch. Do we eliminate it or put it in both?
class BiLSTMEncoderSequenceHiddenContext(BiLSTMEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, self.extract_top_state(concat_state_dirs(hidden))
class BiLSTMEncoderAll(BiLSTMEncoderBase):
"""BiLSTM encoder that passes along the full output and hidden states for each layer
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]`
This returns a 2-tuple of outputs `[B, S, H]` where `S = max(lengths)`, for the output vector sequence,
and a tuple of hidden vector `[L, B, H]` and context vector `[L, B, H]`, respectively
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor `[B, S, H] or `[B, H, S]` , and tuple of hidden `[L, B, H]` and context `[L, B, H]`
"""
tensor, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tensor, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, concat_state_dirs(hidden)
class BiLSTMEncoderSequence(BiLSTMEncoderBase):
"""BiLSTM encoder to produce the transduced output sequence.
Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of
shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input
sequence if the `max(lengths)` given is shorter than `T` during execution.
*PyTorch Note:* The input shape of is either `[B, T, C]` or `[T, B, C]` depending on the value of `batch_first`,
and defaults to `[T, B, C]` for consistency with other PyTorch modules. The output shape is of the same orientation.
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Take in a tuple of `(sequence, lengths)` and produce and output tensor of the last layer of LSTMs
The value `S` here is defined as `max(lengths)`, `S <= T`
:param inputs: sequence of shapes `[B, T, C]` or `[T, B, C]` and a lengths of shape `[B]`
:return: A tensor of shape `[B, S, H]` or `[S, B, H]` depending on setting of `batch_first`
"""
tensor, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tensor, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output
class BiLSTMEncoderHidden(BiLSTMEncoderBase):
"""BiLSTM encoder that returns the top hidden state
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` and
returns a hidden unit tensor of shape `[B, H]`
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs):
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor of shape `[B, H]` representing the last RNNs hidden state
"""
tensor, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tensor, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return self.extract_top_state(concat_state_dirs(hidden))[0]
# TODO: Add this to TF or remove
class BiLSTMEncoderHiddenContext(BiLSTMEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return self.extract_top_state(concat_state_dirs(hidden))
class GRUEncoderBase(nn.Module):
"""The GRU encoder is a base for a set of encoders producing various outputs.
All GRU encoders inheriting this class will trim the input to the max length given in the batch. For example,
if the input sequence is `[B, T, C]` and the `S = max(lengths)` then the resulting sequence, if produced, will
be length `S` (or more precisely, `[B, S, H]`)
*PyTorch Note*: In PyTorch, its more common for the input shape to be temporal length first (`[T, B, H]`) and this
is the PyTorch default. There is an extra parameter in all of these models called `batch_first` which controls this.
Currently, the default is time first (`batch_first=False`), which differs from TensorFlow. To match the TF impl,
set `batch_first=True`.
*PyTorch Note*:
Most `GRUEncoder` variants just define the `forward`. This module cannot provide the same utility as the
TensorFlow `GRUEncoder` base right now, because because the JIT isnt handling subclassing of forward properly.
"""
def __init__(
self,
insz: int,
hsz: int,
nlayers: int,
pdrop: float = 0.0,
requires_length: bool = True,
batch_first: bool = False,
unif: float = 0,
initializer: str = None,
**kwargs,
):
"""Produce a stack of GRUs with dropout performed on all but the last layer.
:param insz: The size of the input
:param hsz: The number of hidden units per GRU
:param nlayers: The number of layers of GRUs to stack
:param pdrop: The probability of dropping a unit value during dropout, defaults to 0
:param requires_length: Does this encoder require an input length in its inputs (defaults to `True`)
:param batch_first: PyTorch only! Should we do batch first input or time-first input? Defaults to `False` (differs from TF!)
:param unif: PyTorch only! Initialization parameters for RNN
:param initializer: PyTorch only! A string describing optional initialization type for RNN
"""
super().__init__()
self.requires_length = requires_length
self.batch_first = batch_first
self.nlayers = nlayers
if nlayers == 1:
pdrop = 0.0
self.rnn = torch.nn.GRU(insz, hsz, nlayers, dropout=pdrop, bidirectional=False, batch_first=batch_first)
if initializer == "ortho":
nn.init.orthogonal_(self.rnn.weight_ih_l0)
nn.init.orthogonal_(self.rnn.weight_hh_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform_(self.rnn.weight_ih_l0)
nn.init.kaiming_uniform_(self.rnn.weight_hh_l0)
elif unif > 0:
for weight in self.rnn.parameters():
weight.data.uniform_(-unif, unif)
else:
nn.init.xavier_uniform_(self.rnn.weight_ih_l0)
nn.init.xavier_uniform_(self.rnn.weight_hh_l0)
self.output_dim = hsz
def extract_top_state(self, state: torch.Tensor) -> torch.Tensor:
return state[-1]
class GRUEncoderSequence(GRUEncoderBase):
"""GRU encoder to produce the transduced output sequence.
Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of
shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input
sequence if the `max(lengths)` given is shorter than `T` during execution.
*PyTorch Note:* The input shape of is either `[B, T, C]` or `[T, B, C]` depending on the value of `batch_first`,
and defaults to `[T, B, C]` for consistency with other PyTorch modules. The output shape is of the same orientation.
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Take in a tuple of the sequence tensor `[T, B, H]` or `[B, T, H]` and its length, produce output sequence
:param inputs: A tuple of the sequence tensor and its length
:return: A sequence tensor of shape `[T, B, H]` or `[B, T, H]`
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output
class GRUEncoderAll(GRUEncoderBase):
"""GRU encoder that passes along the full output and hidden states for each layer
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]`
This returns a 2-tuple of outputs `[B, S, H]` where `S = max(lengths)`, for the output vector sequence,
and a hidden vector `[L, B, H]`
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor `[B, S, H]` or `[B, H, S]` , and a hidden tensor `[L, B, H]`
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, hidden
class GRUEncoderHidden(GRUEncoderBase):
"""GRU encoder that returns the top hidden state
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` and
returns a hidden unit tensor of shape `[B, H]`
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor of shape `[B, H]` representing the last RNNs hidden state
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return self.extract_top_state(hidden)
class BiGRUEncoderBase(nn.Module):
"""BiGRU encoder base for a set of encoders producing various outputs.
All BiGRU encoders inheriting this class will trim the input to the max length given in the batch. For example,
if the input sequence is `[B, T, C]` and the `S = max(lengths)` then the resulting sequence, if produced, will
be length `S` (or more precisely, `[B, S, H]`). Because its bidirectional, half of the hidden units given in the
constructor will be applied to the forward direction and half to the backward direction, and these will get
concatenated.
*PyTorch Note*: In PyTorch, its more common for the input shape to be temporal length first (`[T, B, H]`) and this
is the PyTorch default. There is an extra parameter in all of these models called `batch_first` which controls this.
Currently, the default is time first (`batch_first=False`), which differs from TensorFlow. To match the TF impl,
set `batch_first=True`.
*PyTorch Note*:
Most `BiGRUEncoder` variants just define the `forward`. This module cannot provide the same utility as the
TensorFlow `BiGRUEncoder` base right now, because because the JIT isnt handling subclassing of forward properly.
"""
def __init__(
self,
insz: int,
hsz: int,
nlayers: int,
pdrop: float = 0.0,
requires_length: bool = True,
batch_first: bool = False,
unif: float = 0,
initializer: str = None,
**kwargs,
):
"""Produce a stack of GRUs with dropout performed on all but the last layer.
:param insz: The size of the input
:param hsz: The number of hidden units per BiGRU (`hsz//2` used for each direction and concatenated)
:param nlayers: The number of layers of BiGRUs to stack
:param pdrop: The probability of dropping a unit value during dropout, defaults to 0
:param requires_length: Does this encoder require an input length in its inputs (defaults to `True`)
:param batch_first: Should we do batch first input or time-first input? Defaults to `False` (differs from TF!)
:param unif: PyTorch only! Initialization parameters for RNN
:param initializer: PyTorch only! A string describing optional initialization type for RNN
"""
super().__init__()
self.requires_length = requires_length
self.batch_first = batch_first
self.nlayers = nlayers
if nlayers == 1:
pdrop = 0.0
self.rnn = torch.nn.GRU(insz, hsz // 2, nlayers, dropout=pdrop, bidirectional=True, batch_first=batch_first)
if initializer == "ortho":
nn.init.orthogonal(self.rnn.weight_hh_l0)
nn.init.orthogonal(self.rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(self.rnn.weight_hh_l0)
nn.init.kaiming_uniform(self.rnn.weight_ih_l0)
elif unif > 0:
for weight in self.rnn.parameters():
weight.data.uniform_(-unif, unif)
else:
nn.init.xavier_uniform_(self.rnn.weight_hh_l0)
nn.init.xavier_uniform_(self.rnn.weight_ih_l0)
self.output_dim = hsz
def extract_top_state(self, state: torch.Tensor) -> torch.Tensor:
# Select the topmost state with -1 and the only direction is forward (select with 0)
return state[-1]
# TODO: normalize across backends or remove
class BiGRUEncoderSequenceHiddenContext(BiGRUEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, self.extract_top_state(_cat_dir(hidden))
class BiGRUEncoderAll(BiGRUEncoderBase):
"""BiGRU encoder that passes along the full output and hidden states for each layer
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]`
This returns a 2-tuple of outputs `[B, S, H]` where `S = max(lengths)`, for the output vector sequence,
and a hidden vector `[L, B, H]`
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor `[B, S, H] or `[B, H, S]` , and a hidden vector `[L, B, H]`
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, _cat_dir(hidden)
class BiGRUEncoderSequence(BiGRUEncoderBase):
"""BiGRU encoder to produce the transduced output sequence.
Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of
shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input
sequence if the `max(lengths)` given is shorter than `T` during execution.
*PyTorch Note:* The input shape of is either `[B, T, C]` or `[T, B, C]` depending on the value of `batch_first`,
and defaults to `[T, B, C]` for consistency with other PyTorch modules. The output shape is of the same orientation.
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Take in a tuple of `(sequence, lengths)` and produce and output tensor of the last layer of GRUs
The value `S` here is defined as `max(lengths)`, `S <= T`
:param inputs: sequence of shapes `[B, T, C]` or `[T, B, C]` and a lengths of shape `[B]`
:return: A tensor of shape `[B, S, H]` or `[S, B, H]` depending on setting of `batch_first`
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output
class BiGRUEncoderHidden(BiGRUEncoderBase):
"""GRU encoder that returns the top hidden state
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` and
returns a hidden unit tensor of shape `[B, H]`
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs):
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor of shape `[B, H]` representing the last RNNs hidden state
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return self.extract_top_state(_cat_dir(hidden))
class Reduction(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
pass
def set_output_dim(self, output_dims: List[int]):
pass
class ConcatReduction(Reduction):
def __init__(self, output_dims: List[int], axis=-1, **kwargs):
super().__init__()
self.axis = axis
self.set_output_dim(output_dims)
def set_output_dim(self, output_dims: List[int]):
self.output_dim = sum(output_dims)
def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
return torch.cat(inputs, self.axis)
class ConcatSubtractReduction(Reduction):
"""This reduction assumes paired input and subtracts the two to get a distance
It is useful for training sentence encoders and is used, for example, in SentenceBERT
For this to work we assume that the inputs are paired, and subtract them
"""
def __init__(self, output_dims: List[int], axis=-1, **kwargs):
super().__init__()
self.axis = axis
self.set_output_dim(output_dims)
def set_output_dim(self, output_dims: List[int]):
self.output_dim = 3 * output_dims[0]
def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
sub = torch.abs(inputs[0] - inputs[1])
return torch.cat([inputs[0], inputs[1], sub], self.axis)
class SumReduction(Reduction):
def __init__(self, output_dims: List[int], **kwargs):
super().__init__()
self.set_output_dim(output_dims)
def set_output_dim(self, output_dims: List[int]):
# We could actually project if we needed, or at least should validate
self.output_dim = output_dims[0]
def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
return sum(inputs)
class SumLayerNormReduction(Reduction):
def __init__(self, output_dims: List[int], layer_norm_eps: float = 1.0e-12, **kwargs):
super().__init__()
self.set_output_dim(output_dims)
self.ln = nn.LayerNorm(self.output_dim, eps=layer_norm_eps)
def set_output_dim(self, output_dims: List[int]):
self.output_dim = output_dims[0]
def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
output = sum(inputs)
return self.ln(output)
class EmbeddingsStack(nn.Module):
def __init__(
self,
embeddings_dict: Dict[str, nn.Embedding],
dropout_rate: float = 0.0,
requires_length: bool = False,
reduction: Optional[Union[str, nn.Module]] = 'concat',
**kwargs,
):
"""Takes in a dictionary where the keys are the input tensor names, and the values are the embeddings
:param embeddings_dict: dictionary of each feature embedding
:param dropout_rate: The dropout rate (0.0 means no dropout, 1.0 means complete)
"""
super().__init__()
self._keys: List[str] = []
embeddings_list = []
output_dims = []
for k, embedding in embeddings_dict.items():
embeddings_list.append(embedding)
self._keys.append(k)
output_dims += [embedding.get_dsz()]
self.embeddings: nn.ModuleList = nn.ModuleList(embeddings_list)
# TODO: should we make a registry of options?
if isinstance(reduction, str):
if reduction == 'sum':
self.reduction = SumReduction(output_dims)
elif reduction == 'sum-layer-norm':
self.reduction = SumLayerNormReduction(output_dims, layer_norm_eps=kwargs.get('layer_norm_eps', 1.0e-12))
elif reduction == 'concat-subtract':
self.reduction = ConcatSubtractReduction(output_dims)
else:
self.reduction = ConcatReduction(output_dims)
else:
self.reduction = reduction
self.reduction.set_output_dim(output_dims)
self.dsz = self.reduction.output_dim
self.dropout = nn.Dropout(dropout_rate)
self.requires_length = requires_length
def __getitem__(self, item: str) -> nn.Module:
idx = self._keys.index(item)
if idx < 0:
raise Exception(f"Invalid item ({item})")
return self.embeddings[idx]
def forward(self, inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
"""This method performs "embedding" of the inputs. The base method here then concatenates along depth
dimension to form word embeddings
:return: A 3-d vector where the last dimension is the concatenated dimensions of all embeddings
"""
all_embeddings_out = []
i = 0
for embedding in self.embeddings:
k = self._keys[i]
x = inputs[k]
# Its a hair faster to do this than using isinstance
if x.__class__ == tuple:
embeddings_out = embedding(*x)
else:
embeddings_out = embedding(x)
all_embeddings_out.append(embeddings_out)
i += 1
word_embeddings = self.reduction(all_embeddings_out)
return self.dropout(word_embeddings)
def keys(self):
return self._keys
@property
def output_dim(self):
return self.dsz
def items(self):
for k, v in zip(self.keys(), self.embeddings):
yield k, v
class DenseStack(nn.Module):
"""A stack of one or more hidden layers
"""
def __init__(
self,
insz: int,
hsz: Union[int, List[int]],
activation: Union[str, List[str]] = "relu",
pdrop_value: float = 0.5,
init=None,
skip_connect=False,
layer_norm=False,
**kwargs,
):
"""Stack 1 or more hidden layers, optionally (forming an MLP)
:param insz: The number of input units
:param hsz: The number of hidden units
:param activation: The name of the activation function to use
:param pdrop_value: The dropout probability
:param init: The initializer
:param skip_connect: whether use skip connection when insz is equal to outsz for a layer
:param layer_norm: whether use layer norm in each layer
"""
super().__init__()
hszs = listify(hsz)
self.output_dim = hsz[-1]
activations = listify(activation)
if len(activations) == 1:
activations = activations * len(hszs)
if len(activations) != len(hszs):
raise ValueError("Number of activations must match number of hidden sizes in a stack!")
current = insz
layer_stack = []
if layer_norm:
layer_norm_eps = kwargs.get('layer_norm_eps', 1e-6)
for hsz, activation in zip(hszs, activations):
if skip_connect and current == hsz:
layer = SkipConnection(current, activation)
else:
layer = Dense(current, hsz, activation)
if layer_norm:
layer = nn.Sequential(layer, nn.LayerNorm(hsz, eps=layer_norm_eps))
layer_stack.append(WithDropout(layer, pdrop_value))
current = hsz
self.layer_stack = nn.Sequential(*layer_stack)
self.requires_length = False
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Stack 1 or more hidden layers, optionally (forming an MLP)
:param inputs: The fixed representation of the model
:Keyword Arguments:
* *hsz* -- (``int``) The number of hidden units (defaults to `100`)
:return: The final layer
"""
return self.layer_stack(inputs)
class VectorSequenceAttention(nn.Module):
def __init__(self, hsz: int):
super().__init__()
self.hsz = hsz
self.W_c = nn.Linear(2 * self.hsz, hsz, bias=False)
def forward(self, query_t, keys_bth, values_bth, keys_mask=None):
# Output(t) = B x H x 1
# Keys = B x T x H
# a = B x T x 1
a = self._attention(query_t, keys_bth, keys_mask)
attended = self._update(a, query_t, values_bth)
return attended
def _attention(self, query_t, keys_bth, keys_mask):
pass
def _update(self, a, query_t, values_bth):
# a = B x T
# Want to apply over context, scaled by a
# (B x 1 x T) (B x T x H) = (B x 1 x H)
a = a.view(a.size(0), 1, a.size(1))
c_t = torch.bmm(a, values_bth).squeeze(1)
attended = torch.cat([c_t, query_t], -1)
attended = torch.tanh(self.W_c(attended))
return attended
def dot_product_attention_weights(query_t: torch.Tensor,
keys_bth: torch.Tensor,
keys_mask: torch.Tensor) -> torch.Tensor:
a = keys_bth @ query_t.unsqueeze(2)
a = a.squeeze(2).masked_fill(keys_mask == MASK_FALSE, -1e9)
a = F.softmax(a, dim=-1)
return a
def dot_product_attention_weights_lengths(query_t: torch.Tensor,
keys_bth: torch.Tensor,
keys_lengths: torch.Tensor) -> torch.Tensor:
mask = sequence_mask(keys_lengths, keys_bth.shape[1]).to(keys_bth.device)
return dot_product_attention_weights(query_t, keys_bth, mask)
class LuongDotProductAttention(VectorSequenceAttention):
def __init__(self, hsz):
super().__init__(hsz)
def _attention(self, query_t, keys_bth, keys_mask):
return dot_product_attention_weights(query_t, keys_bth, keys_mask)
class ScaledDotProductAttention(VectorSequenceAttention):
def __init__(self, hsz):
super().__init__(hsz)
def _attention(self, query_t, keys_bth, keys_mask):
a = (keys_bth @ query_t.unsqueeze(2)) / math.sqrt(self.hsz)
a = a.squeeze(2).masked_fill(keys_mask == MASK_FALSE, -1e9)
a = F.softmax(a, dim=-1)
return a
class LuongGeneralAttention(VectorSequenceAttention):
def __init__(self, hsz):
super().__init__(hsz)
self.W_a = nn.Linear(self.hsz, self.hsz, bias=False)
def _attention(self, query_t, keys_bth, keys_mask):
a = keys_bth @ self.W_a(query_t).unsqueeze(2)
a = a.squeeze(2).masked_fill(keys_mask == MASK_FALSE, -1e9)
a = F.softmax(a, dim=-1)
return a
class BahdanauAttention(VectorSequenceAttention):
def __init__(self, hsz):
super().__init__(hsz)
self.hsz = hsz
self.W_a = nn.Linear(self.hsz, self.hsz, bias=False)
self.E_a = nn.Linear(self.hsz, self.hsz, bias=False)
self.v = nn.Linear(self.hsz, 1, bias=False)
def _attention(self, query_t, keys_bth, keys_mask):
B, T, H = keys_bth.shape
q = self.W_a(query_t.view(-1, self.hsz)).view(B, 1, H)
u = self.E_a(keys_bth).view(B, T, H)
z = torch.tanh(q + u)
a = self.v(z.view(-1, self.hsz)).view(B, T)
a = a.masked_fill(keys_mask == MASK_FALSE, -1e9)
a = F.softmax(a, dim=-1)
return a
def _update(self, a, query_t, values_bth):
query_t = query_t.view(-1, self.hsz)
# a = B x T
# Want to apply over context, scaled by a
# (B x 1 x T) (B x T x H) = (B x 1 x H) -> (B x H)
a = a.view(a.size(0), 1, a.size(1))
c_t = (a @ values_bth).squeeze(1)
# (B x 2H)
attended = torch.cat([c_t, query_t], -1)
attended = self.W_c(attended)
return attended
class FineTuneModel(nn.Module):
def __init__(self, nc, embeddings, stack_model=None):
super().__init__()
if isinstance(embeddings, dict):
self.finetuned = EmbeddingsStack(embeddings)
else:
self.finetuned = embeddings
self.stack_model = stack_model
output_dim = self.finetuned.output_dim if stack_model is None else stack_model.output_dim
self.output_layer = Dense(output_dim, nc, activation="log_softmax")
def forward(self, inputs):
base_layers = self.finetuned(inputs)
stacked = self.stack_model(base_layers) if self.stack_model is not None else base_layers
return self.output_layer(stacked)
class CompositePooling(nn.Module):
"""Composite pooling allows for multiple sub-modules during pooling to be used in parallel
"""
def __init__(self, models):
"""
Note, this currently requires that each submodel is an eight_mile model with an `output_dim` attr
"""
super().__init__()
self.models = nn.ModuleList(models)
self.output_dim = sum(m.output_dim for m in self.models)
self.requires_length = any(getattr(m, "requires_length", False) for m in self.models)
def forward(self, inputs):
inputs, lengths = tensor_and_lengths(inputs)
pooled = []
for sub_model in self.models:
if getattr(sub_model, "requires_length", False):
pooled.append(sub_model((inputs, lengths)))
else:
pooled.append(sub_model(inputs))
return torch.cat(pooled, -1)
class EmbedPoolStackModel(nn.Module):
"""This provides an idiom for classification consisting of multiple phases
In the first phase, we embed the input tensors, and subsequently pool them to
a fixed width representation. Finally, we allow multiple hidden "stacking"
layers, ultimately ending in a projection to the output space
"""
def __init__(
self,
nc: int,
embeddings: nn.Module,
pool_model: nn.Module,
stack_model: Optional[nn.Module] = None,
output_model: Optional[nn.Module] = None,
):
super().__init__()
self.embed_model = embeddings
self.pool_model = pool_model
self.stack_model = stack_model if stack_model else nn.Identity()
output_dim = self.pool_model.output_dim if stack_model is None else stack_model.output_dim
self.output_layer = Dense(output_dim, nc, activation="log_softmax") if output_model is None else output_model
def forward(self, inputs: Dict[str, torch.Tensor]):
lengths = inputs["lengths"]
embedded = self.embed_model(inputs)
embedded = (embedded, lengths)
pooled = self.pool_model(embedded)
stacked = self.stack_model(pooled)
return self.output_layer(stacked)
class PassThru(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.output_dim = input_dim
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
return inputs
class WithoutLength(nn.Module):
"""Wrapper layer to remove lengths from the input
"""
def __init__(self, layer: nn.Module):
super().__init__()
self.layer = layer
self.output_dim = self.layer.output_dim if hasattr(self.layer, "output_dim") else 0
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
return self.layer(inputs[0])
class WithDropout(nn.Module):
"""Wrapper for any layer that surrounds it with dropout"""
def __init__(self, layer: nn.Module, pdrop: float = 0.5, variational=False, batch_first=False):
"""Create a dropout wrapper around the given layer
:param layer: Some sort of layer
:param pdrop: A dropout value
"""
super().__init__()
self.layer = layer
self.dropout = VariationalDropout(pdrop, batch_first=batch_first) if variational else nn.Dropout(pdrop)
self.output_dim = self.layer.output_dim if hasattr(self.layer, "output_dim") else 0
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Apply the layer followed by dropout
:param inputs: input tensor
:return: output transformed by the held layer and subsequent dropout
"""
return self.dropout(self.layer(inputs))
class WithDropoutOnFirst(nn.Module):
"""Wrapper for any layer that surrounds it with dropout
This exists primarily for the LSTMEncoderWithState to allow dropout on the output while
passing back the hidden state
"""
def __init__(self, layer: nn.Module, pdrop: float = 0.5, variational=False):
"""Create a dropout wrapper around the given layer
:param layer: Some sort of layer
:param pdrop: A dropout value
"""
super().__init__()
self.layer = layer
self.dropout = VariationalDropout(pdrop) if variational else nn.Dropout(pdrop)
self.output_dim = self.layer.output_dim if hasattr(self.layer, "output_dim") else 0
def forward(self, inputs: Tuple[torch.Tensor]) -> torch.Tensor:
"""Apply the layer followed by dropout
:param inputs: input tensor
:return: output transformed by the held layer and subsequent dropout
"""
outputs = self.layer(inputs)
return self.dropout(outputs[0]), outputs[1]
def transition_mask(vocab, span_type, s_idx, e_idx, pad_idx=None):
"""Create a mask to enforce span sequence transition constraints.
Returns a Tensor with valid transitions as a 0 and invalid as a 1 for easy use with `masked_fill`
"""
np_mask = transition_mask_np(vocab, span_type, s_idx, e_idx, pad_idx=pad_idx)
return torch.from_numpy(np_mask) == 0
@torch.jit.script
def inplace_assign(data: torch.Tensor, index: torch.Tensor, new_data: torch.Tensor) -> torch.Tensor:
new_data = new_data.unsqueeze(0)
index = index.expand(1, new_data.size(1))
data.scatter_(0, index, new_data)
return data
@torch.jit.script
def i2t(i: int) -> torch.Tensor:
return torch.tensor(i).unsqueeze(0)
@torch.jit.script
def script_viterbi(
unary: torch.Tensor, trans: torch.Tensor, start_idx: int, end_idx: int
) -> Tuple[torch.Tensor, torch.Tensor]:
seq_len: int = unary.size(0)
num_tags: int = unary.size(1)
fill_value: float = -1e4
# dtype=unary.dtype fails, with prim_dtype error on torch 1.7.1
alphas = torch.full((num_tags,), fill_value, dtype=torch.float, device=unary.device)
broadcast_idx = torch.full((num_tags,), start_idx, dtype=torch.long)
alphas = alphas.scatter(0, broadcast_idx, torch.zeros((num_tags,)))
alphas = alphas.unsqueeze(0)
backpointers: torch.Tensor = torch.zeros(num_tags, dtype=torch.long).unsqueeze(0)
for i in range(seq_len):
unary_t = unary[i, :]
next_tag_var = alphas + trans
viterbi, best_tag_ids = torch.max(next_tag_var, 1)
backpointers = torch.cat([backpointers, best_tag_ids.unsqueeze(0)], 0)
alphas = (viterbi + unary_t).unsqueeze(0)
terminal_vars = alphas.squeeze(0) + trans[end_idx, :]
path_score, best_tag_id = torch.max(terminal_vars, 0)
best_path = best_tag_id.unsqueeze(0)
for i in range(unary.size(0)):
t = seq_len - i - 1
best_tag_id = backpointers[t + 1, best_tag_id]
best_path = torch.cat([best_path, best_tag_id.unsqueeze(0)], -1)
new_path_vec = best_path.flip(0)
return new_path_vec[1:], path_score
class ViterbiBatchSize1(nn.Module):
def __init__(self, start_idx: int, end_idx: int):
super().__init__()
self.start_idx = start_idx
self.end_idx = end_idx
def forward(self, unary: torch.Tensor, trans: torch.Tensor, _: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
unary = unary.squeeze(1)
trans = trans.squeeze(0)
path, score = script_viterbi(unary, trans, self.start_idx, self.end_idx)
return path.unsqueeze(1), score
class Viterbi(nn.Module):
def __init__(self, start_idx: int, end_idx: int):
super().__init__()
self.start_idx = start_idx
self.end_idx = end_idx
# r, start_idx: int, end_idx: int, norm = lambda x, y: x
def forward(
self, unary: torch.Tensor, trans: torch.Tensor, lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Do Viterbi decode on a batch.
:param unary: torch.FloatTensor: [T, B, N]
:param trans: torch.FloatTensor: [1, N, N]
:param norm: Callable: This function should take the initial and a dim to
normalize along.
:return: torch.LongTensor: [T, B] the padded paths
:return: torch.FloatTensor: [B] the path scores
"""
seq_len, batch_size, tag_size = unary.size()
min_length = torch.min(lengths)
backpointers = []
# Alphas: [B, 1, N]
alphas = torch.full((batch_size, 1, tag_size), -1e4, device=unary.device)
alphas[:, 0, self.start_idx] = 0
# alphas = self.norm(alphas)
for i, unary_t in enumerate(unary):
next_tag_var = alphas + trans
viterbi, best_tag_ids = torch.max(next_tag_var, 2)
backpointers.append(best_tag_ids)
new_alphas = viterbi + unary_t
new_alphas.unsqueeze_(1)
# This part generates a warning
if i >= min_length:
mask = (i < lengths).view(-1, 1, 1)
alphas = alphas.masked_fill(mask, 0) + new_alphas.masked_fill(mask == MASK_FALSE, 0)
else:
alphas = new_alphas
# Add end tag
terminal_var = alphas.squeeze(1) + trans[:, self.end_idx, :]
path_score, best_tag_id = torch.max(terminal_var, 1)
# Flip lengths
rev_len = seq_len - lengths - 1
best_path = [best_tag_id]
for i in range(len(backpointers)):
t = len(backpointers) - i - 1
backpointer_t = backpointers[t]
# Get new best tag candidate
new_best_tag_id = backpointer_t.gather(1, best_tag_id.unsqueeze(1)).squeeze(1)
# We are going backwards now, if flipped length was passed
# these you aren't in your real results yet
mask = i > rev_len
best_tag_id = best_tag_id.masked_fill(mask, 0) + new_best_tag_id.masked_fill(mask == MASK_FALSE, 0)
best_path.append(best_tag_id)
_ = best_path.pop()
best_path.reverse()
best_path = torch.stack(best_path)
# Mask out the extra tags (This might be pointless given thathatt anything that
# will use this as a dense tensor downstream will mask it itself?)
seq_mask = sequence_mask(lengths, seq_len).to(best_path.device).transpose(0, 1)
best_path = best_path.masked_fill(seq_mask == MASK_FALSE, 0)
return best_path, path_score
@torch.jit.script
def script_viterbi_log_softmax_norm(
unary: torch.Tensor, trans: torch.Tensor, start_idx: int, end_idx: int
) -> Tuple[torch.Tensor, torch.Tensor]:
seq_len: int = unary.size(0)
num_tags: int = unary.size(1)
fill_value: float = -1e4
# dtype=unary.dtype fails, with prim_dtype error on torch 1.7.1
alphas = torch.full((num_tags,), fill_value, dtype=torch.float, device=unary.device)
broadcast_idx = torch.full((num_tags,), start_idx, dtype=torch.long)
alphas = alphas.scatter(0, broadcast_idx, torch.zeros((num_tags,)))
alphas = alphas.unsqueeze(0)
alphas = torch.log(F.softmax(alphas, dim=-1))
backpointers: torch.Tensor = torch.zeros(num_tags, dtype=torch.long).unsqueeze(0)
for i in range(seq_len):
unary_t = unary[i, :]
next_tag_var = alphas + trans
viterbi, best_tag_ids = torch.max(next_tag_var, 1)
backpointers = torch.cat([backpointers, best_tag_ids.unsqueeze(0)], 0)
alphas = (viterbi + unary_t).unsqueeze(0)
terminal_vars = alphas.squeeze(0) + trans[end_idx, :]
path_score, best_tag_id = torch.max(terminal_vars, 0)
best_path = best_tag_id.unsqueeze(0)
for i in range(unary.size(0)):
t = seq_len - i - 1
best_tag_id = backpointers[t + 1, best_tag_id]
best_path = torch.cat([best_path, best_tag_id.unsqueeze(0)], -1)
new_path_vec = best_path.flip(0)
return new_path_vec[1:], path_score
class ViterbiLogSoftmaxNormBatchSize1(nn.Module):
def __init__(self, start_idx: int, end_idx: int):
super().__init__()
self.start_idx = start_idx
self.end_idx = end_idx
def forward(self, unary: torch.Tensor, trans: torch.Tensor, _: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
unary = unary.squeeze(1)
trans = trans.squeeze(0)
path, score = script_viterbi_log_softmax_norm(unary, trans, self.start_idx, self.end_idx)
return path.unsqueeze(1), score
class ViterbiLogSoftmaxNorm(Viterbi):
def forward(
self, unary: torch.Tensor, trans: torch.Tensor, lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Do Viterbi decode on a batch.
:param unary: torch.FloatTensor: [T, B, N]
:param trans: torch.FloatTensor: [1, N, N]
:param norm: Callable: This function should take the initial and a dim to
normalize along.
:return: torch.LongTensor: [T, B] the padded paths
:return: torch.FloatTensor: [B] the path scores
"""
seq_len, batch_size, tag_size = unary.size()
min_length = torch.min(lengths)
backpointers = []
# Alphas: [B, 1, N]
alphas = torch.full((batch_size, 1, tag_size), -1e4, device=unary.device)
alphas[:, 0, self.start_idx] = 0
alphas = F.log_softmax(alphas, dim=-1)
for i, unary_t in enumerate(unary):
next_tag_var = alphas + trans
viterbi, best_tag_ids = torch.max(next_tag_var, 2)
backpointers.append(best_tag_ids)
new_alphas = viterbi + unary_t
new_alphas.unsqueeze_(1)
if i >= min_length:
mask = (i < lengths).view(-1, 1, 1)
alphas = alphas.masked_fill(mask, 0) + new_alphas.masked_fill(mask == MASK_FALSE, 0)
else:
alphas = new_alphas
# Add end tag
terminal_var = alphas.squeeze(1) + trans[:, self.end_idx, :]
path_score, best_tag_id = torch.max(terminal_var, 1)
# Flip lengths
rev_len = seq_len - lengths - 1
best_path = [best_tag_id]
for i in range(len(backpointers)):
t = len(backpointers) - i - 1
backpointer_t = backpointers[t]
# Get new best tag candidate
new_best_tag_id = backpointer_t.gather(1, best_tag_id.unsqueeze(1)).squeeze(1)
# We are going backwards now, if flipped length was passed
# these you aren't in your real results yet
mask = i > rev_len
best_tag_id = best_tag_id.masked_fill(mask, 0) + new_best_tag_id.masked_fill(mask == MASK_FALSE, 0)
best_path.append(best_tag_id)
_ = best_path.pop()
best_path.reverse()
best_path = torch.stack(best_path)
# Mask out the extra tags (This might be pointless given that anything that
# will use this as a dense tensor downstream will mask it itself?)
seq_mask = sequence_mask(lengths, seq_len).to(best_path.device).transpose(0, 1)
best_path = best_path.masked_fill(seq_mask == MASK_FALSE, 0)
return best_path, path_score
def ident(x):
return x
class TaggerGreedyDecoder(nn.Module):
def __init__(
self,
num_tags: int,
constraint_mask: Optional[torch.Tensor] = None,
batch_first: bool = True,
reduction: str = "batch",
):
"""A Greedy decoder and loss module for taggers.
:param num_tags: `int` The number of output classes
:param constraint_mask: `Tensor[1, N, N]` A mask with valid transitions as 1 and invalid as 0
:param batch_first: `bool` Should the batch dimensions be first?
:param reduction: `str` Should the loss be calculated at the token level or batch level
"""
super().__init__()
self.num_tags = num_tags
if constraint_mask is not None:
constraint_mask = F.log_softmax(
torch.zeros(constraint_mask.shape).masked_fill(constraint_mask, -1e4), dim=1
)
self.register_buffer("constraint_mask", constraint_mask)
else:
self.constraint_mask = None
# FIXME: we cant do it like this if using TorchScript
self.to_batch_first = ident if batch_first else tbh2bth
self.to_time_first = bth2tbh if batch_first else ident
self.batch_first = batch_first
self.loss = SequenceLoss(LossFn=nn.CrossEntropyLoss, avg=reduction)
self.viterbi = ViterbiLogSoftmaxNorm(Offsets.GO, Offsets.EOS)
@property
def transitions(self):
return self.constraint_mask
def neg_log_loss(self, inputs, tags, lengths):
unaries = self.to_batch_first(inputs)
tags = self.to_batch_first(tags)
return self.loss(unaries, tags)
def forward(self, inputs) -> torch.Tensor:
unaries, lengths = tensor_and_lengths(inputs)
# If there is a constraint mask do a masked viterbi
if self.constraint_mask is not None:
probv = self.to_time_first(unaries)
probv = F.log_softmax(probv, dim=-1)
preds, scores = self.viterbi(probv, self.constraint_mask, lengths)
if self.batch_first:
return tbh2bth(preds) # , scores
else:
return preds
else:
# Decoding doesn't care about batch/time first
_, preds = torch.max(unaries, -1)
mask = sequence_mask(lengths, unaries.shape[1]).to(preds.device)
# The mask gets generated as batch first
mask = mask if self.batch_first else mask.transpose(0, 1)
preds = preds.masked_fill(mask == MASK_FALSE, 0)
return preds # , None
def extra_repr(self) -> str:
str_ = f"n_tags={self.num_tags}, batch_first={self.batch_first}"
if self.constraint_mask is not None:
str_ += ", constrained=True"
return str_
class CRF(nn.Module):
def __init__(
self,
num_tags: int,
constraint_mask: Optional[torch.Tensor] = None,
batch_first: bool = True,
idxs: Tuple[int, int] = (Offsets.GO, Offsets.EOS),
):
"""Initialize the object.
:param num_tags: int, The number of tags in your output (emission size)
:param constraint: torch.ByteTensor, Constraints on the transitions [1, N, N]
:param idxs: Tuple(int. int), The index of the start and stop symbol
in emissions.
:param batch_first: bool, if the input [B, T, ...] or [T, B, ...]
Note:
if idxs is none then the CRF adds these symbols to the emission
vectors and n_tags is assumed to be the number of output tags.
if idxs is not none then the first element is assumed to be the
start index and the second idx is assumed to be the end index. In
this case n_tags is assumed to include the start and end symbols.
"""
super().__init__()
self.start_idx, self.end_idx = idxs
self.num_tags = num_tags
if constraint_mask is not None:
self.register_buffer("constraint_mask", constraint_mask)
else:
self.constraint_mask = None
self.transitions_p = nn.Parameter(torch.Tensor(1, self.num_tags, self.num_tags).zero_())
self.batch_first = batch_first
self.viterbi = Viterbi(self.start_idx, self.end_idx)
def extra_repr(self) -> str:
str_ = "n_tags=%d, batch_first=%s" % (self.num_tags, self.batch_first)
if self.constraint_mask is not None:
str_ += ", constrained=True"
return str_
@property
def transitions(self):
if self.constraint_mask is not None:
return self.transitions_p.masked_fill(self.constraint_mask, -1e4)
return self.transitions_p
def neg_log_loss(self, unary, tags, lengths):
"""Neg Log Loss with a Batched CRF.
:param unary: torch.FloatTensor: [T, B, N] or [B, T, N]
:param tags: torch.LongTensor: [T, B] or [B, T]
:param lengths: torch.LongTensor: [B]
:return: torch.FloatTensor: [B]
"""
# Convert from [B, T, N] -> [T, B, N]
if self.batch_first:
unary = unary.transpose(0, 1)
tags = tags.transpose(0, 1)
_, batch_size, _ = unary.size()
fwd_score = self._forward_alg(unary, lengths)
gold_score = self.score_sentence(unary, tags, lengths)
loss = fwd_score - gold_score
batch_loss = torch.mean(loss)
return batch_loss
def score_sentence(self, unary: torch.Tensor, tags: torch.Tensor, lengths: torch.Tensor) -> torch.Tensor:
"""Score a batch of sentences.
:param unary: torch.FloatTensor: [T, B, N]
:param tags: torch.LongTensor: [T, B]
:param lengths: torch.LongTensor: [B]
:param min_length: torch.LongTensor: []
:return: torch.FloatTensor: [B]
"""
batch_size = lengths.shape[0]
assert lengths.shape[0] == unary.shape[1]
trans = self.transitions.squeeze(0) # [N, N]
start = torch.full((1, batch_size), self.start_idx, dtype=tags.dtype, device=tags.device) # [1, B]
tags = torch.cat([start, tags], 0) # [T + 1, B]
# Unfold gives me all slices of size 2 (this tag next tag) from dimension T
tag_pairs = tags.unfold(0, 2, 1)
# Move the pair dim to the front and split it into two
indices = tag_pairs.permute(2, 0, 1).chunk(2)
trans_score = trans[[indices[1], indices[0]]].squeeze(0)
# Pull out the values of the tags from the unary scores.
unary_score = unary.gather(2, tags[1:].unsqueeze(-1)).squeeze(-1)
mask = sequence_mask(lengths).transpose(0, 1).to(tags.device)
scores = unary_score + trans_score
scores = scores.masked_fill(mask == MASK_FALSE, 0)
scores = scores.sum(0)
eos_scores = trans[self.end_idx, tags.gather(0, lengths.unsqueeze(0)).squeeze(0)]
scores = scores + eos_scores
return scores
def _forward_alg(self, unary: torch.Tensor, lengths: torch.Tensor) -> torch.Tensor:
"""For CRF forward on a batch.
:param unary: torch.FloatTensor: [T, B, N]
:param lengths: torch.LongTensor: [B]
:return: torch.FloatTensor: [B]
"""
# alphas: [B, 1, N]
min_length = torch.min(lengths)
batch_size = lengths.shape[0]
lengths.shape[0] == unary.shape[1]
alphas = torch.full((batch_size, 1, self.num_tags), -1e4, device=unary.device)
alphas[:, 0, self.start_idx] = 0.0
# alphas.requires_grad = True
trans = self.transitions # [1, N, N]
for i, unary_t in enumerate(unary):
# unary_t: [B, N]
unary_t = unary_t.unsqueeze(2) # [B, N, 1]
# Broadcast alphas along the rows of trans
# Broadcast trans along the batch of alphas
# [B, 1, N] + [1, N, N] -> [B, N, N]
# Broadcast unary_t along the cols of result
# [B, N, N] + [B, N, 1] -> [B, N, N]
scores = alphas + trans + unary_t
new_alphas = vec_log_sum_exp(scores, 2).transpose(1, 2)
# If we haven't reached your length zero out old alpha and take new one.
# If we are past your length, zero out new_alpha and keep old one.
if i >= min_length:
mask = (i < lengths).view(-1, 1, 1)
alphas = alphas.masked_fill(mask, 0) + new_alphas.masked_fill(mask == MASK_FALSE, 0)
else:
alphas = new_alphas
terminal_vars = alphas + trans[:, self.end_idx]
alphas = vec_log_sum_exp(terminal_vars, 2)
return alphas.view(batch_size)
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
unary, lengths = inputs
if self.training:
if self.batch_first:
unary = unary.transpose(0, 1)
forward = self._forward_alg(unary, lengths)
# if self.batch_first:
# forward = forward.transpose(0, 1)
return forward
with torch.no_grad():
return self.decode(unary, lengths)[0]
@jit.export
def decode(self, unary: torch.Tensor, lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Do Viterbi decode on a batch.
:param unary: torch.FloatTensor: [T, B, N] or [B, T, N]
:param lengths: torch.LongTensor: [B]
:return: torch.LongTensor: [B] the paths
:return: torch.FloatTensor: [B] the path score
"""
if self.batch_first:
unary = unary.transpose(0, 1)
trans = self.transitions # [1, N, N]
path, score = self.viterbi(unary, trans, lengths)
if self.batch_first:
path = path.transpose(0, 1)
return path, score
class SequenceModel(nn.Module):
def __init__(self, nc: int, embeddings: nn.Module, transducer: nn.Module, decoder: Optional[nn.Module] = None):
super().__init__()
self.embed_model = embeddings
self.transducer_model = transducer
# TODO: make this a separate model!
if transducer.output_dim != nc:
self.proj_layer = Dense(transducer.output_dim, nc)
else:
self.proj_layer = nn.Identity()
self.decoder_model = decoder
def transduce(self, inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
lengths = inputs["lengths"]
embedded = self.embed_model(inputs)
embedded = (embedded, lengths)
# transduced = self.transducer_model(embedded)
transduced = self.proj_layer(self.transducer_model(embedded))
return transduced
def decode(self, transduced: torch.Tensor, lengths: torch.Tensor) -> torch.Tensor:
return self.decoder_model((transduced, lengths))
def forward(self, inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
pass
class TagSequenceModel(SequenceModel):
def __init__(self, nc: int, embeddings: nn.Module, transducer: nn.Module, decoder: Optional[nn.Module] = None):
decoder_model = CRF(nc, batch_first=True) if decoder is None else decoder
super().__init__(nc, embeddings, transducer, decoder_model)
def neg_log_loss(self, unary: torch.Tensor, tags: torch.Tensor, lengths: torch.Tensor) -> torch.Tensor:
return self.decoder_model.neg_log_loss(unary, tags, lengths)
def forward(self, inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
transduced = self.transduce(inputs)
path = self.decode(transduced, inputs["lengths"])
return path
class LangSequenceModel(nn.Module):
def __init__(
self,
nc: int,
embeddings: nn.Module,
transducer: nn.Module,
decoder: Optional[nn.Module] = None,
name: Optional[str] = None,
):
super().__init__()
self.embed_model = embeddings
self.transducer_model = transducer
if hasattr(transducer, "requires_state") and transducer.requires_state:
self._call = self._call_with_state
self.requires_state = True
else:
self._call = self._call_without_state
self.requires_state = False
self.output_layer = nn.Linear(self.transducer_model.output_dim, nc)
self.decoder_model = decoder
def forward(self, inputs: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
return self._call(inputs)
def _call_with_state(self, inputs: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
h = inputs["h"]
embedded = self.embed_model(inputs)
transduced, hidden = self.transducer_model((embedded, h))
transduced = self.output_layer(transduced)
return transduced, hidden
def _call_without_state(self, inputs: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
embedded = self.embed_model(inputs)
transduced = self.transducer_model((embedded, None))
transduced = self.output_layer(transduced)
return transduced, None
def pytorch_embedding(weights: torch.Tensor, finetune: bool = True) -> nn.Embedding:
"""Creation function for making an nn.Embedding with the given weights
:param weights: The weights to use
:param finetune: Should we fine-tune the embeddings or freeze them
"""
lut = nn.Embedding(weights.shape[0], weights.shape[1], padding_idx=Offsets.PAD)
del lut.weight
lut.weight = nn.Parameter(torch.FloatTensor(weights), requires_grad=finetune)
return lut
def subsequent_mask(size: int):
"""
Creates a lower triangular mask to mask future
:param size: Temporal length
:return: A tensor of type `uint8` that is 1s along diagonals and below, zero o.w
"""
attn_shape = (1, 1, size, size)
sub_mask = np.tril(np.ones(attn_shape)).astype("uint8")
return torch.from_numpy(sub_mask)
class SequenceSequenceAttention(nn.Module):
def __init__(self, hsz: int = None, pdrop: float = 0.1, **kwargs):
super().__init__()
self.hsz = hsz
self.dropout = nn.Dropout(pdrop)
self.attn = None
def forward(self, qkvm: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
query, key, value, mask = qkvm
a = self._attention(query, key, mask)
self.attn = a
a = self.dropout(a)
return self._update(a, value)
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
pass
def _update(self, a: torch.Tensor, value: torch.Tensor) -> torch.Tensor:
"""Attention weights are applied for each value, but in a series of efficient matrix operations.
In the case of self-attention, the key and query (used to create the attention weights)
and values are all low order projections of the same input.
:param a: The attention weights [B, H, T_q, T_k]
:param values: The values [B, H, T_k, D]
:returns: A tensor of shape [B, H, T_q, D]
"""
return torch.matmul(a, value)
class SeqScaledDotProductAttention(SequenceSequenceAttention):
def __init__(self, pdrop: float = 0.1, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Scaled dot product attention, as defined in https://arxiv.org/abs/1706.03762
We apply the query to the keys to receive our weights via softmax in a series of efficient
matrix operations. In the case of self-attention the key and query are all low order
projections of the same input.
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: A tensor that is (BxHxTxT)
"""
# (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k)
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9) # [B, 1, 1, T_k] broadcast to [B, 1, T_q, T_k]
return F.softmax(scores, dim=-1)
class SeqScaledDotProductAttentionALiBi(SequenceSequenceAttention):
def __init__(self, pdrop: float = 0.1, num_heads=None, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
self.num_heads = num_heads
slopes = torch.tensor(get_alibi_slopes(self.num_heads))
self.register_buffer("slopes", slopes)
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Attention with Linear Biases, defined in https://arxiv.org/pdf/2108.12409.pdf
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: A tensor that is (BxHxTxT)
"""
# (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k)
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
T_k = scores.shape[-1]
T_q = scores.shape[-2]
offsets = - torch.abs(torch.arange(T_q).view(-1, 1) - torch.arange(T_k).view(1, -1)).to(self.slopes.device) # [T_q, T_k]
alibi = self.slopes.unsqueeze(-1).unsqueeze(-1) * offsets.unsqueeze(0) # [H, T_q, T_k]
alibi = alibi.unsqueeze(0) # [1, H, T_q, T_k]
scores += alibi
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9) # [B, 1, 1, T_k] broadcast to [B, 1, T_q, T_k]
return F.softmax(scores, dim=-1)
class SeqScaledDotProductAttentionT5(SequenceSequenceAttention):
def __init__(self, pdrop: float = 0.1, num_heads=None, bidirectional=True, num_buckets=32, max_distance=128, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
self.num_heads = num_heads
self.bidirectional = bidirectional
self.num_buckets = num_buckets
self.max_distance = max_distance
rel_embedding = torch.nn.init.kaiming_normal_(torch.empty((self.num_heads, self.num_buckets),
dtype=torch.float), nonlinearity='linear')
self.rel_embedding = nn.Parameter(rel_embedding, requires_grad=True)
def _relative_position_bucket(self, relative_position):
"""Taken from https://github.com/tensorflow/mesh/blob/bbb6ce7917e2a8ef1f3dc6990fcacd4f3b075acd/mesh_tensorflow/transformer/transformer_layers.py#L1014
"""
ret = 0
n = -relative_position
num_buckets = self.num_buckets
if self.bidirectional:
num_buckets //= 2
ret += torch.lt(n, 0).to(dtype=torch.long) * num_buckets
n = torch.abs(n).to(dtype=torch.long)
else:
n = torch.maximum(n, 0).to(dtype=torch.long)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = torch.lt(n, max_exact)
val_if_large = max_exact + (
torch.log(n.to(dtype=torch.float32) / max_exact)
/ math.log(self.max_distance / max_exact) * (num_buckets - max_exact)).to(dtype=torch.long)
val_if_large = torch.minimum(val_if_large, torch.tensor(num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Relative Attention described in https://arxiv.org/abs/1910.10683
:param query: a query for alignment.
:param key: a set of keys from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: A tensor that is (BxHxTxT)
"""
# (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k)
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
T_k = scores.shape[-1]
T_q = scores.shape[-2]
memory_position = torch.arange(T_k).view(1, -1)
query_position = torch.arange(T_q).view(-1, 1)
relative_position = memory_position - query_position
rp_bucket = self._relative_position_bucket(relative_position)
relative_attention_bias = self.rel_embedding[:, rp_bucket]
scores += relative_attention_bias
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9) # [B, 1, 1, T_k] broadcast to [B, 1, T_q, T_k]
return F.softmax(scores, dim=-1)
class SeqDotProductAttention(SequenceSequenceAttention):
def __init__(self, pdrop: float = 0.1, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
scores = torch.matmul(query, key.transpose(-2, -1))
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9)
return F.softmax(scores, dim=-1)
class SeqDotProductAttentionALiBi(SequenceSequenceAttention):
def __init__(self, pdrop: float = 0.1, num_heads=None, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
self.num_heads = num_heads
slopes = torch.tensor(get_alibi_slopes(self.num_heads))
self.register_buffer("slopes", slopes)
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
scores = torch.matmul(query, key.transpose(-2, -1))
T_k = scores.shape[-1]
T_q = scores.shape[-2]
offsets = - torch.abs(torch.arange(T_q).view(1, -1) - torch.arange(T_k).view(-1, 1)).to(self.slopes.device) # [T_q, T_k]
alibi = self.slopes.unsqueeze(-1).unsqueeze(-1) * offsets.unsqueeze(0) # [H, T_q, T_k]
alibi = alibi.unsqueeze(0) # [1, H, T_q, T_k]
scores += alibi
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9)
return F.softmax(scores, dim=-1)
class SeqDotProductAttentionT5(SequenceSequenceAttention):
def __init__(self, pdrop: float = 0.1, num_heads=None, bidirectional=True, num_buckets=32, max_distance=128, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
self.num_heads = num_heads
self.bidirectional = bidirectional
self.num_buckets = num_buckets
self.max_distance = max_distance
rel_embedding = torch.nn.init.kaiming_normal_(torch.empty((self.num_heads, self.num_buckets),
dtype=torch.float), nonlinearity='linear')
self.rel_embedding = nn.Parameter(rel_embedding, requires_grad=True)
def _relative_position_bucket(self, relative_position):
"""Taken from https://github.com/tensorflow/mesh/blob/bbb6ce7917e2a8ef1f3dc6990fcacd4f3b075acd/mesh_tensorflow/transformer/transformer_layers.py#L1014
"""
ret = 0
n = -relative_position
num_buckets = self.num_buckets
if self.bidirectional:
num_buckets //= 2
ret += torch.lt(n, 0).to(dtype=torch.long) * num_buckets
n = torch.abs(n).to(dtype=torch.long)
else:
n = torch.maximum(n, 0).to(dtype=torch.long)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = torch.lt(n, max_exact)
val_if_large = max_exact + (
torch.log(n.to(dtype=torch.float32) / max_exact)
/ math.log(self.max_distance / max_exact) * (num_buckets - max_exact)).to(dtype=torch.long)
val_if_large = torch.minimum(val_if_large, torch.tensor(num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Relative Attention described in https://arxiv.org/abs/1910.10683
:param query: a query for alignment.
:param key: a set of keys from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: A tensor that is (BxHxTxT)
"""
# (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k)
scores = torch.matmul(query, key.transpose(-2, -1))
T_k = scores.shape[-1]
T_q = scores.shape[-2]
memory_position = torch.arange(T_k).view(1, -1)
query_position = torch.arange(T_q).view(-1, 1)
relative_position = memory_position - query_position
rp_bucket = self._relative_position_bucket(relative_position)
relative_attention_bias = self.rel_embedding[:, rp_bucket]
scores += relative_attention_bias
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9) # [B, 1, 1, T_k] broadcast to [B, 1, T_q, T_k]
return F.softmax(scores, dim=-1)
class SequenceSequenceRelativeAttention(nn.Module):
"""This form of attention is specified in Shaw et al 2018: https://www.aclweb.org/anthology/N18-2074.pdf
"""
def __init__(self, hsz: int = None, pdrop: float = 0.1, **kwargs):
super().__init__()
self.hsz = hsz
self.dropout = nn.Dropout(pdrop)
self.attn = None
def forward(
self, q_k_v_ek_ev_m: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]
) -> torch.Tensor:
"""Take in a tuple of tensors corresponding to the query, key, value, edges_key, edges_value and mask variables
:param q_k_v_ek_ev_m: A tuple consisting of query, key, value, `edges_key`, `edges_value` and `mask` respectively
:return: An updated value Tensor
"""
query, key, value, edges_key, edges_value, mask = q_k_v_ek_ev_m
a = self._attention(query, key, edges_key, mask)
self.attn = a
a = self.dropout(a)
return self._update(a, value, edges_value)
def _attention(
self, query: torch.Tensor, key: torch.Tensor, edges_key: torch.Tensor, mask: Optional[torch.Tensor] = None
) -> torch.Tensor:
pass
def _update(self, a: torch.Tensor, value: torch.Tensor, edges_value: torch.Tensor) -> torch.Tensor:
"""Attention weights are applied for each value, but in a series of efficient matrix operations.
In the case of self-attention, the key and query (used to create the attention weights)
and values are all low order projections of the same input.
:param a: The attention weights [B, H, T_q, T_k]
:param value: The values [B, H, T_k, D]
:param edge_value: The edge values [T_q, T_k, D]
:returns: A tensor of shape [B, H, T, D]
"""
B, H, T_k, D = value.shape
updated_values = torch.matmul(a, value) # [B, H, T_q, D]
if edges_value is not None:
a = a.view(B * H, -1, T_k).transpose(0, 1) # (T_q, BxH, T_k)
t = torch.matmul(a, edges_value) # (T_q, BxH, D)
update_edge_values = t.transpose(0, 1).view(B, H, -1, D)
return updated_values + update_edge_values
else:
return updated_values
class SeqScaledDotProductRelativeAttention(SequenceSequenceRelativeAttention):
def __init__(self, pdrop: float = 0.1, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
def _attention(
self, query: torch.Tensor, key: torch.Tensor, edges_key: torch.Tensor, mask: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Scaled dot product attention, as defined in https://arxiv.org/abs/1706.03762
We apply the query to the keys to receive our weights via softmax in a series of efficient
matrix operations. In the case of self-attntion the key and query are all low order
projections of the same input.
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:param edges_key: a matrix of relative embeddings between each word in a sequence [T_q x T_k x D]
:return: A tensor that is (B x H x T_q x T_k)
"""
B, H, T_q, d_k = query.shape # (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k)
scores_qk = torch.matmul(query, key.transpose(-2, -1))
tbhd = query.reshape(B * H, T_q, d_k).transpose(0, 1) # [T_q, B*H, d_k]
scores_qek = torch.matmul(tbhd, edges_key.transpose(-2, -1)) # [T_q, B*H, T_k]
scores_qek = scores_qek.transpose(0, 1).view(B, H, T_q, -1) # [B, H, T_q, T_k]
scores = (scores_qk + scores_qek) / math.sqrt(d_k)
# only for cross-attention T_q != T_k. for such case, mask should be src_mask, which is a sequence_mask with
# dimension [B, 1, 1, T_k], and will be broadcast to dim of scores:
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9)
return F.softmax(scores, dim=-1)
class SeqDotProductRelativeAttention(SequenceSequenceRelativeAttention):
def __init__(self, pdrop: float = 0.1, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
def _attention(
self, query: torch.Tensor, key: torch.Tensor, edges_key: torch.Tensor, mask: Optional[torch.Tensor] = None
) -> torch.Tensor:
B, H, T_q, d_k = query.shape
scores_qk = torch.matmul(query, key.transpose(-2, -1))
tbhd = query.reshape(B * H, T_q, d_k).transpose(0, 1)
scores_qek = torch.matmul(tbhd, edges_key.transpose(-2, -1))
scores_qek = scores_qek.transpose(0, 1).view(B, H, T_q, -1)
scores = scores_qk + scores_qek
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9)
return F.softmax(scores, dim=-1)
def unfold_tensor(tensor, dim, window_sz):
"""Unfold a tensor by applying a sliding window on a certain dimension with step 1 and padding of 0's. The window
dimension is added as the last dimension
:param tensor: the tensor to be unfolded, with shape [d_1, d_2, ..., T, ..., d_n]
:param dim: the dimension along which unfolding is applied
:param window_sz: sliding window size, need to be an odd number
:return: the unfolded tensor with shape [d_1, d_2, ..., T, ..., d_n, window_sz]
"""
half_window = (window_sz - 1) // 2
if dim < 0:
dim = len(tensor.shape) + dim
# torch.nn.functional.pad apply backwardly from the last dimension
padding = [0, 0] * (len(tensor.shape) - dim - 1) + [half_window, half_window]
return F.pad(tensor, padding).unfold(dim, window_sz, 1)
class SeqScaledWindowedRelativeAttention(SequenceSequenceRelativeAttention):
"""This class implements windowed relative attention, i.e. preventing attention beyond rpr_k. For efficiency,
_attention and _update are implemented in a different way."""
def __init__(self, pdrop: float = 0.1, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
def _unfold_mask(self, mask, batchsz, rpr_k):
"""Transform mask into the unfolded format."""
window_sz = 2 * rpr_k + 1
T = mask.shape[3]
if mask.shape[2] > 1: # mask is from a subsequent mask, with [1, 1, T, T] or [B, 1, T, T]
logger.warning("Using subsequent mask with long sequence may cause OOM error.")
mask = mask.expand(batchsz, 1, T, T) # expand sequence/subsequent mask into a uniform dim
mask = F.pad(mask, [rpr_k, rpr_k]) # pad both sides with rpr_k, [B, 1, T, T + 2*rpr_k]
seq = torch.arange(T + 2 * rpr_k)
indices = seq.unfold(0, window_sz, 1) # indices of a sliding window, [T, W]
indices = indices.unsqueeze(0).unsqueeze(0).expand(batchsz, 1, T, window_sz).to(mask.device)
return torch.gather(mask, -1, indices) # [B, 1, T, W]):
else: # mask is a sequence mask [B, 1, 1, T]
unfolded = unfold_tensor(mask, dim=-1, window_sz=window_sz) # [B, 1, 1, T, W]
return unfolded.squeeze(1) # [B, 1, T, W]
def _attention(
self, query: torch.Tensor, key: torch.Tensor, rpr_key: torch.Tensor, mask: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Implementation of attention considering RA masking: using torch.Tensor.unfold to create an extra dimension
representing the sliding window. Then when applying matmul, Q, K, V share the same T dimension.
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:param rpr_key: tensor of the rpr_key embeddings [W, d_k]
:return: A tensor that is [B, H, T, 1, W] to be matmul with values
"""
B, H, T, d_k = query.shape
window_sz = rpr_key.shape[0]
rpr_k = (window_sz - 1) // 2
query = query.unsqueeze(-2) # [B, H, T, 1, d_k]
key = unfold_tensor(key, dim=2, window_sz=window_sz) # [B, H, T, d_k, W]
rpr_key = rpr_key.transpose(0, 1).unsqueeze(0).unsqueeze(0).unsqueeze(0) # [1, 1, 1, d_k, W]
scores_qk = torch.matmul(query, key) # [B, H, T, 1, W]
scores_qrk = torch.matmul(query, rpr_key) # [B, H, T, 1, W]
scores = (scores_qk + scores_qrk) / math.sqrt(d_k)
if mask is not None:
mask = self._unfold_mask(mask, B, rpr_k).unsqueeze(-2) # [B, 1, T, 1, W]
scores = scores.masked_fill(mask == False, -1e9)
return F.softmax(scores, dim=-1)
def _update(self, a: torch.Tensor, value: torch.Tensor, rpr_value: torch.Tensor) -> torch.Tensor:
# a has dim [B, H, T, 1, W]
window_sz = a.shape[-1]
value = unfold_tensor(value, dim=2, window_sz=window_sz).transpose(-1, -2) # [B, H, T, W, d_value]
updated_values = torch.matmul(a, value) # [B, H, T, 1, d_value]
if rpr_value is not None:
rpr_value = rpr_value.unsqueeze(0).unsqueeze(0).unsqueeze(0) # [1, 1, 1, W, d_value]
update_rpr_values = torch.matmul(a, rpr_value) # [B, H, T, 1, d_value]
return (updated_values + update_rpr_values).squeeze(3) # [B, H, T, d_value]
else:
return updated_values.squeeze(3)
class SeqBahdanauAttention(SequenceSequenceAttention):
def __init__(self, hsz: int, pdrop: float = 0.1, **kwargs):
super().__init__(hsz, pdrop=pdrop, **kwargs)
self.V = pytorch_linear(self.hsz, 1, bias=False)
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
# [B, H, T, 1, D] + [B, H, 1, T, D] = [B, H, T, T, D]
additive = query.unsqueeze(-2) + key.unsqueeze(-3)
non_linear = torch.tanh(additive)
# [B, H, T, T, D] @ [D, 1] = [B, H, T, T, 1]
scores = self.V(non_linear)
# [B, H, T, T]
scores = scores.squeeze(-1)
return F.softmax(scores, dim=-1)
class MultiHeadedAttention(nn.Module):
"""
Multi-headed attention from https://arxiv.org/abs/1706.03762 via http://nlp.seas.harvard.edu/2018/04/03/attention.html
Multi-headed attention provides multiple looks of low-order projections K, Q and V using an attention function
(specifically `scaled_dot_product_attention` in the paper. This allows multiple relationships to be illuminated
via attention on different positional and representational information from each head.
The number of heads `h` times the low-order projection dim `d_k` is equal to `d_model` (which is asserted upfront).
This means that each weight matrix can be simply represented as a linear transformation from `d_model` to `d_model`,
and partitioned into heads after the fact.
Finally, an output projection is applied which brings the output space back to `d_model`, in preparation for the
sub-sequent `FFN` sub-layer.
There are 3 uses of multi-head attention in the Transformer.
For encoder-decoder layers, the queries come from the previous decoder layer, and the memory keys come from
the encoder. For encoder layers, the K, Q and V all come from the output of the previous layer of the encoder.
And for self-attention in the decoder, K, Q and V all come from the decoder, but here it is masked to prevent using
future values
"""
def __init__(
self, num_heads: int, d_model: int, dropout: float = 0.1, scale: bool = False, d_k: Optional[int] = None, ra_type: Optional[str] = None,
):
"""Constructor for multi-headed attention
:param h: The number of heads
:param d_model: The model hidden size
:param dropout (``float``): The amount of dropout to use
:param scale: Should we scale the dot product attention
:param d_k: The low-order project per head. This is normally `d_model // num_heads` unless set explicitly
:param ra_type: If there is an attention bias term, that will be encapsulated in the attention computation
"""
super().__init__()
if d_k is None:
self.d_k = d_model // num_heads
if d_model % num_heads != 0:
raise Exception(f"d_model ({d_model}) must be evenly divisible by num_heads ({num_heads})")
else:
self.d_k = d_k
self.h = num_heads
# for multi-headed attention, w_V projects to h heads, each head has dim d_k; for single headed attention, w_V
# project to 1 head with dim d_model
if self.h > 1:
self.d_value = self.d_k
else:
self.d_value = d_model
self.w_Q = Dense(d_model, self.d_k * self.h)
self.w_K = Dense(d_model, self.d_k * self.h)
self.w_V = Dense(d_model, self.d_value * self.h)
if self.h > 1: # w_O is not needed for single headed attention
self.w_O = Dense(self.d_k * self.h, d_model)
if scale:
if ra_type == 'alibi':
self.attn_fn = SeqScaledDotProductAttentionALiBi(dropout, num_heads=num_heads)
elif ra_type == 't5':
# TODO: pass through options
self.attn_fn = SeqScaledDotProductAttentionT5(dropout, num_heads=num_heads)
else:
self.attn_fn = SeqScaledDotProductAttention(dropout)
else:
if ra_type == 'alibi':
self.attn_fn = SeqDotProductAttentionALiBi(dropout, num_heads=num_heads)
elif ra_type == 't5':
# TODO: pass through options
self.attn_fn = SeqDotProductAttentionT5(dropout, num_heads=num_heads)
else:
self.attn_fn = SeqDotProductAttention(dropout)
self.attn = None
def forward(self, qkvm: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Low-order projections of query, key and value into multiple heads, then attention application and dropout
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param value: a set of values from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: Multi-head attention output, result of attention application to sequence (B, T, d_model)
"""
query, key, value, mask = qkvm
batchsz = query.size(0)
# (B, H, T, D)
query = self.w_Q(query).view(batchsz, -1, self.h, self.d_k).transpose(1, 2)
key = self.w_K(key).view(batchsz, -1, self.h, self.d_k).transpose(1, 2)
value = self.w_V(value).view(batchsz, -1, self.h, self.d_value).transpose(1, 2)
x = self.attn_fn((query, key, value, mask))
self.attn = self.attn_fn.attn
x = x.transpose(1, 2).contiguous().view(batchsz, -1, self.h * self.d_value)
if self.h > 1:
return self.w_O(x)
else:
return x
class MultiHeadedRelativeAttention(nn.Module):
"""
Multi-headed relative attention from Shaw et al 2018 (https://www.aclweb.org/anthology/N18-2074.pdf)
This method follows the same approach of MultiHeadedAttention, but it computes Relative Position Representations (RPR)
which are used as part of the attention computations. To facilitate this, the model has its own internal
embeddings lookup table, and it has an updated computation for both the attention weights and the application
of those weights to follow them.
"""
def __init__(
self,
num_heads: int,
d_model: int,
rpr_k: int,
dropout: float = 0.1,
scale: bool = False,
d_k: Optional[int] = None,
windowed_ra: bool = False,
rpr_value_on: bool = True
):
"""Constructor for multi-headed attention
:param num_heads: The number of heads
:param d_model: The model hidden size
:param rpr_k: distance within which relative positional embedding will be considered
:param windowed_ra: whether prevent attention beyond rpr_k
:param dropout (``float``): The amount of dropout to use
:param scale: Should we scale the dot product attention
:param d_k: The low-order project per head. This is normally `d_model // num_heads` unless set explicitly
"""
super().__init__()
if d_k is None:
self.d_k = d_model // num_heads
if d_model % num_heads != 0:
raise Exception(f"d_model ({d_model}) must be evenly divisible by num_heads ({num_heads})")
else:
self.d_k = d_k
self.h = num_heads
# for multi-headed attention, w_V projects to h heads, each head has dim d_k; for single headed attention, w_V
# project to 1 head with dim d_model
if self.h > 1:
self.d_value = self.d_k
else:
self.d_value = d_model
self.rpr_k = rpr_k
self.rpr_value_on = rpr_value_on
self.rpr_key = nn.Embedding(2 * rpr_k + 1, self.d_k)
if self.rpr_value_on:
self.rpr_value = nn.Embedding(2 * rpr_k + 1, self.d_value)
self.windowed_ra = windowed_ra
self.w_Q = Dense(d_model, self.d_k * self.h)
self.w_K = Dense(d_model, self.d_k * self.h)
self.w_V = Dense(d_model, self.d_value * self.h)
if self.h > 1: # w_O is not needed for sinlge headed attention
self.w_O = Dense(self.d_k * self.h, d_model)
if scale:
if windowed_ra:
self.attn_fn = SeqScaledWindowedRelativeAttention(dropout)
else:
self.attn_fn = SeqScaledDotProductRelativeAttention(dropout)
else:
self.attn_fn = SeqDotProductRelativeAttention(dropout)
self.attn = None
def make_rpr(self, q_len, k_len, device) -> Tuple[torch.Tensor, torch.Tensor]:
"""Create a matrix shifted by self.rpr_k and bounded between 0 and 2*self.rpr_k to provide 0-based indexing for embedding
"""
q_seq = torch.arange(q_len).to(device)
k_seq = torch.arange(k_len).to(device)
window_len = 2 * self.rpr_k
edges = k_seq.view(1, -1) - q_seq.view(-1, 1) + self.rpr_k # [q_len, k_len]
edges = torch.clamp(edges, 0, window_len)
if self.rpr_value_on:
return self.rpr_key(edges), self.rpr_value(edges) # [q_len, k_len, d_k]
else:
return self.rpr_key(edges), None
def make_windowed_rpr(self, device):
window_len = 2 * self.rpr_k + 1
window = torch.arange(window_len).to(device)
if self.rpr_value_on:
return self.rpr_key(window), self.rpr_value(window)
else:
return self.rpr_key(window), None
def forward(self, qkvm: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Low-order projections of query, key and value into multiple heads, then attention application and dropout
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param value: a set of values from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: Multi-head attention output, result of attention application to sequence (B, T, d_model)
"""
query, key, value, mask = qkvm
batchsz = query.size(0)
query_len = query.size(1)
key_len = key.size(1) # key and value have the same length, but query can have a different length
# (B, H, T, D)
query = self.w_Q(query).view(batchsz, -1, self.h, self.d_k).transpose(1, 2)
key = self.w_K(key).view(batchsz, -1, self.h, self.d_k).transpose(1, 2)
value = self.w_V(value).view(batchsz, -1, self.h, self.d_value).transpose(1, 2)
if self.windowed_ra:
rpr_key, rpr_value = self.make_windowed_rpr(query.device)
else:
rpr_key, rpr_value = self.make_rpr(query_len, key_len, query.device)
x = self.attn_fn((query, key, value, rpr_key, rpr_value, mask))
self.attn = self.attn_fn.attn
x = x.transpose(1, 2).contiguous().view(batchsz, -1, self.h * self.d_value)
if self.h > 1:
return self.w_O(x)
else:
return x
class TransformerEncoderBase(nn.Module):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: float,
scale: bool = True,
activation_type: str = "gelu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norm_eps: float = 1.0e-6,
windowed_ra: Optional[bool] = False,
rpr_value_on: bool = True,
ra_type: Optional[str] = None,
**kwargs,
):
super().__init__()
self.d_model = d_model
self.d_ff = d_ff if d_ff is not None else 4 * d_model
if rpr_k is not None and rpr_k != 0:
self.self_attn = MultiHeadedRelativeAttention(num_heads, d_model, rpr_k, pdrop, scale, d_k=d_k,
windowed_ra=windowed_ra, rpr_value_on=rpr_value_on)
else:
self.self_attn = MultiHeadedAttention(num_heads, d_model, pdrop, scale=scale, d_k=d_k, ra_type=ra_type)
self.ffn = nn.Sequential(
Dense(self.d_model, self.d_ff),
get_activation(activation_type),
nn.Dropout(ffn_pdrop),
Dense(self.d_ff, self.d_model),
)
self.ln1 = nn.LayerNorm(self.d_model, eps=layer_norm_eps)
self.ln2 = nn.LayerNorm(self.d_model, eps=layer_norm_eps)
self.dropout = nn.Dropout(pdrop)
class PreLNTransformerEncoder(TransformerEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""
:param inputs: `(x, mask)`
:return: The output tensor
"""
x, mask = inputs
h = self.ln1(x)
x = x + self.dropout(self.self_attn((h, h, h, mask)))
x = x + self.dropout(self.ffn(self.ln2(x)))
return x
class PreLNBeforeResConnTransformerEncoder(TransformerEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""
:param inputs: `(x, mask)`
:return: The output tensor
"""
x, mask = inputs
x = self.ln1(x)
h = self.self_attn((x, x, x, mask))
x = x + self.dropout(h)
x = self.ln2(x)
x = x + self.dropout(self.ffn(x))
return x
class PostLNTransformerEncoder(TransformerEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""
:param inputs: `(x, mask)`
:return: The output tensor
"""
x, mask = inputs
h = self.self_attn((x, x, x, mask))
x = x + self.dropout(h)
x = self.ln2(x)
x = x + self.dropout(self.ffn(x))
x = self.ln1(x)
return x
class SpatialGatingUnit(nn.Module):
"""Spatial gating unit
There are 2 ways we can look at this unit, as an MLP or a Conv with kernel length 1
l = nn.Linear(T, T)
c = nn.Conv1d(T, T, 1)
l(x.transpose(1, 2)).transpose(1, 2)
c(x)
"""
def __init__(self,
d_ffn: int,
nctx: int,
layer_norm_eps: float = 1.0e-6):
super().__init__()
self.norm = nn.LayerNorm(d_ffn // 2, eps=layer_norm_eps)
self.proj = pytorch_conv1d(nctx, nctx, 1)
nn.init.constant_(self.proj.bias, 1.0)
def split(self, x):
u, v = x.chunk(2, dim=-1)
return u, v
def forward(self, x):
u, v = self.split(x)
v = self.norm(v)
v = self.proj(v)
return u * v
class GatedMLPEncoder(nn.Module):
"""Following https://arxiv.org/pdf/2105.08050.pdf
"""
def __init__(
self,
d_model: int,
pdrop: float,
nctx: int = 256,
activation_type: str = "gelu",
d_ff: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norm_eps: float = 1.0e-6
):
super().__init__()
self.d_model = d_model
self.d_ff = d_ff if d_ff is not None else 4 * d_model
self.to_ffn = Dense(self.d_model, self.d_ff)
self.activation = get_activation(activation_type)
self.ffn_drop = nn.Dropout(ffn_pdrop)
self.from_sgu = Dense(self.d_ff//2, self.d_model)
self.norm = nn.LayerNorm(self.d_model, eps=layer_norm_eps)
self.dropout = nn.Dropout(pdrop)
self.spatial_gating_unit = SpatialGatingUnit(self.d_ff, nctx, layer_norm_eps)
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Do gMLP forward
TODO: we arent using the mask ATM
:param inputs: `(x, mask)`
:return: The output tensor
"""
# The shortcut here happens pretty early
shortcut, mask = inputs
# A "channel" norm
x = self.norm(shortcut)
# A "channel" FFN
x = self.dropout(self.to_ffn(x))
# gelu according to https://arxiv.org/pdf/2105.08050.pdf
x = self.activation(x)
# "spatial" projection (over T)
x = self.spatial_gating_unit(x)
# "channel" projection
x = self.from_sgu(x)
x = self.dropout(x)
return x + shortcut
class TransformerDecoderBase(nn.Module):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: float,
scale: bool = True,
activation_type: str = "gelu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norm_eps: float = 1.0e-6,
rpr_value_on: bool = True,
ra_type: Optional[str] = None,
):
super().__init__()
self.d_model = d_model
self.d_ff = d_ff if d_ff is not None else 4 * d_model
if rpr_k is not None:
self.self_attn = MultiHeadedRelativeAttention(num_heads, d_model, rpr_k, pdrop, scale, d_k=d_k, rpr_value_on=rpr_value_on)
self.src_attn = MultiHeadedRelativeAttention(num_heads, d_model, rpr_k, pdrop, scale, d_k=d_k, rpr_value_on=rpr_value_on)
else:
self.self_attn = MultiHeadedAttention(num_heads, d_model, pdrop, scale, d_k=d_k, ra_type=ra_type)
self.src_attn = MultiHeadedAttention(num_heads, d_model, pdrop, scale, d_k=d_k, ra_type=ra_type)
self.ffn = nn.Sequential(
Dense(self.d_model, self.d_ff),
nn.Dropout(ffn_pdrop),
get_activation(activation_type),
Dense(self.d_ff, self.d_model),
)
self.ln1 = nn.LayerNorm(self.d_model, eps=layer_norm_eps)
self.ln2 = nn.LayerNorm(self.d_model, eps=layer_norm_eps)
self.ln3 = nn.LayerNorm(self.d_model, eps=layer_norm_eps)
self.dropout = nn.Dropout(pdrop)
class PreLNTransformerDecoder(TransformerDecoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, memory, src_mask, tgt_mask = inputs
h = self.ln1(x)
x = x + self.dropout(self.self_attn((h, h, h, tgt_mask)))
h = self.ln2(x)
x = x + self.dropout(self.src_attn((h, memory, memory, src_mask)))
h = self.ln3(x)
x = x + self.dropout(self.ffn(h))
return x
class PreLNBeforeResConnTransformerDecoder(TransformerDecoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, memory, src_mask, tgt_mask = inputs
x = self.ln1(x)
x = x + self.dropout(self.self_attn((x, x, x, tgt_mask)))
x = self.ln2(x)
x = x + self.dropout(self.src_attn((x, memory, memory, src_mask)))
x = self.ln3(x)
x = x + self.dropout(self.ffn(x))
return x
class PostLNTransformerDecoder(nn.Module):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, memory, src_mask, tgt_mask = inputs
x = x + self.dropout(self.self_attn((x, x, x, tgt_mask)))
x = self.ln2(x)
x = x + self.dropout(self.src_attn((x, memory, memory, src_mask)))
x = self.ln3(x)
x = x + self.dropout(self.ffn(x))
x = self.ln1(x)
return x
class TransformerEncoderStack(nn.Module):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: float,
scale: bool = True,
layers: int = 1,
activation: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[Union[int, List[int]]] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
windowed_ra: Optional[bool] = False,
rpr_value_on: bool = True,
layer_drop: float = 0.0,
ra_type: Optional[str] = None,
transformer_type: Optional[str] = False,
**kwargs,
):
super().__init__()
self.encoders = nn.ModuleList()
if layer_norms_after or transformer_type == "post-layer-norm":
logger.info("Using post-layer-norm transformer (encoder)")
TransformerEncoder = PostLNTransformerEncoder
self.ln = nn.Identity()
elif transformer_type == "pre-layer-norm":
TransformerEncoder = PreLNTransformerEncoder
self.ln = nn.LayerNorm(d_model, eps=layer_norm_eps)
else: # transformer_type == "pre-layer-norm-before-resconn"
logger.info("Using layer norm before residual connections (encoder)")
if layer_norms_after:
raise Exception(f"Mutually exclusive options ({transformer_type}) and layer_norms_after=True)",)
TransformerEncoder = PreLNBeforeResConnTransformerEncoder
self.ln = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.output_dim = d_model
self.layer_drop = layer_drop
if not is_sequence(rpr_k):
rpr_k = [rpr_k] * layers
elif len(rpr_k) == 1:
rpr_k = [rpr_k[0]] * layers
for i in range(layers):
self.encoders.append(
TransformerEncoder(
num_heads, d_model, pdrop, scale, activation, d_ff, d_k,
rpr_k=rpr_k[i], ffn_pdrop=ffn_pdrop,
layer_norm_eps=layer_norm_eps, windowed_ra=windowed_ra, rpr_value_on=rpr_value_on, ra_type=ra_type
)
)
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, mask = inputs
for layer in self.encoders:
pdrop = np.random.random()
if not self.training or (pdrop >= self.layer_drop):
x = layer((x, mask))
return self.ln(x)
class GatedMLPEncoderStack(nn.Module):
"""Following https://arxiv.org/pdf/2105.08050.pdf
"""
def __init__(
self,
d_model: int,
pdrop: float,
layers: int = 1,
nctx: int = 256,
activation: str = "gelu",
d_ff: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norm_eps: float = 1.0e-6,
layer_drop: float = 0.0,
**kwargs,
):
super().__init__()
self.encoders = nn.ModuleList()
self.ln = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.output_dim = d_model
self.layer_drop = layer_drop
for i in range(layers):
self.encoders.append(
GatedMLPEncoder(
d_model, pdrop, nctx, activation, d_ff,
ffn_pdrop=ffn_pdrop,
layer_norm_eps=layer_norm_eps,
)
)
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, mask = inputs
for layer in self.encoders:
pdrop = np.random.random()
if not self.training or (pdrop >= self.layer_drop):
x = layer((x, mask))
return self.ln(x)
class TransformerEncoderStackWithLengths(TransformerEncoderStack):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: bool,
scale: bool = True,
layers: int = 1,
activation: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[Union[int, List[int]]] = None,
input_sz: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
windowed_ra: Optional[bool] = False,
rpr_value_on: bool = True,
layer_drop: float = 0.0,
ra_type: Optional[str] = None,
transformer_type: Optional[str] = None,
**kwargs,
):
super().__init__(num_heads, d_model, pdrop, scale, layers, activation, d_ff, d_k, rpr_k,
ffn_pdrop, layer_norms_after, layer_norm_eps, windowed_ra, rpr_value_on, layer_drop, ra_type, transformer_type, **kwargs)
self.proj = WithDropout(pytorch_linear(input_sz, d_model), pdrop)
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, lengths = inputs
x = self.proj(x)
max_seqlen = x.shape[1]
mask = sequence_mask(lengths, max_seqlen).to(x.device)
return super().forward((x, mask.unsqueeze(1).unsqueeze(1)))
class TransformerEncoderStackWithTimeMask(TransformerEncoderStack):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: bool,
scale: bool = True,
layers: int = 1,
activation: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[Union[int, List[int]]] = None,
input_sz: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
windowed_ra: Optional[bool] = False,
rpr_value_on: bool = True,
layer_drop: float = 0.0,
ra_type: Optional[str] = None,
transformer_type: Optional[str] = None,
**kwargs,
):
super().__init__(num_heads, d_model, pdrop, scale, layers, activation, d_ff, d_k, rpr_k,
ffn_pdrop, layer_norms_after, layer_norm_eps, windowed_ra, rpr_value_on, layer_drop, ra_type, transformer_type, **kwargs)
self.proj = WithDropout(pytorch_linear(input_sz, d_model), pdrop)
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, lengths = inputs
x = self.proj(x)
max_seqlen = x.shape[1]
mask = subsequent_mask(max_seqlen).to(x.device)
return super().forward((x, mask.unsqueeze(1).unsqueeze(1)))
class TransformerDecoderStack(nn.Module):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: float,
scale: bool = True,
layers: int = 1,
activation_type: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[Union[int, List[int]]] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
layer_drop: float = 0.0,
rpr_value_on: bool = True,
ra_type: Optional[str] = None,
transformer_type: Optional[str] = None,
**kwargs,
):
super().__init__()
self.decoders = nn.ModuleList()
self.layer_drop = layer_drop
if layer_norms_after or transformer_type == "post-layer-norm":
logger.info("Using post-layer-norm transformer (decoder)")
TransformerDecoder = PostLNTransformerDecoder
self.ln = nn.Identity()
elif transformer_type == "pre-layer-norm":
TransformerDecoder = PreLNTransformerDecoder
self.ln = nn.LayerNorm(d_model, eps=layer_norm_eps)
else: # transformer_type == "pre-layer-norm-before-resconn"
logger.info("Using layer norm before residual connections (decoder)")
if layer_norms_after:
raise Exception(f"Mutually exclusive options ({transformer_type}) and layer_norms_after=True)",)
TransformerDecoder = PreLNBeforeResConnTransformerDecoder
self.ln = nn.LayerNorm(d_model, eps=layer_norm_eps)
if not is_sequence(rpr_k):
rpr_k = [rpr_k] * layers
elif len(rpr_k) == 1:
rpr_k = [rpr_k[0]] * layers
for i in range(layers):
self.decoders.append(
TransformerDecoder(num_heads, d_model, pdrop, scale, activation_type, d_ff,
d_k=d_k, rpr_k=rpr_k[i], ffn_pdrop=ffn_pdrop,
layer_norm_eps=layer_norm_eps,
rpr_value_on=rpr_value_on, ra_type=ra_type)
)
def forward(self, inputs):
x, memory, src_mask, tgt_mask = inputs
for layer in self.decoders:
pdrop = np.random.random()
if not self.training or (pdrop >= self.layer_drop):
x = layer((x, memory, src_mask, tgt_mask))
return self.ln(x)
def update_lengths(lengths, eoses, idx):
"""Update the length of a generated tensor based on the first EOS found.
This is useful for a decoding situation where tokens after an EOS
can be something other than EOS. This also makes sure that a second
generated EOS doesn't affect the lengths.
:param lengths: `torch.LongTensor`: The lengths where zero means an
unfinished sequence.
:param eoses: `torch.ByteTensor`: A mask that has 1 for sequences that
generated an EOS.
:param idx: `int`: What value to fill the finished lengths with (normally
the current decoding timestep).
:returns: `torch.Tensor`: The updated lengths tensor (same shape and type).
"""
# If a length is 0 it has never had a length set so it is eligible to have
# this EOS be the length.
updatable_lengths = lengths == 0
# If this length can be updated AND this token is an eos
lengths_mask = updatable_lengths & eoses
return lengths.masked_fill(lengths_mask, idx)
def gnmt_length_penalty(lengths, alpha=0.8):
"""Calculate a length penalty from https://arxiv.org/pdf/1609.08144.pdf
The paper states the penalty as (5 + |Y|)^a / (5 + 1)^a. This is implemented
as ((5 + |Y|) / 6)^a for a (very) tiny performance boost
:param lengths: `torch.LongTensor`: [B, K] The lengths of the beams.
:param alpha: `float`: A hyperparameter. See Table 2 for a search on this
parameter.
:returns:
`torch.FloatTensor`: [B, K, 1] The penalties.
"""
lengths = lengths.to(torch.float)
penalty = torch.pow(((5 + lengths) / 6), alpha)
return penalty.unsqueeze(-1)
def no_length_penalty(lengths):
"""A dummy function that returns a no penalty (1)."""
return torch.ones_like(lengths).to(torch.float).unsqueeze(-1)
def repeat_batch(t, K, dim=0):
"""Repeat a tensor while keeping the concept of a batch.
:param t: `torch.Tensor`: The tensor to repeat.
:param K: `int`: The number of times to repeat the tensor.
:param dim: `int`: The dimension to repeat in. This should be the
batch dimension.
:returns: `torch.Tensor`: The repeated tensor. The new shape will be
batch size * K at dim, the rest of the shapes will be the same.
Example::
>>> a = torch.arange(10).view(2, -1)
>>> a
tensor([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> a.repeat(2, 1)
tensor([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> repeat_batch(a, 2)
tensor([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[5, 6, 7, 8, 9]])
"""
shape = t.shape
tiling = [1] * (len(shape) + 1)
tiling[dim + 1] = K
tiled = t.unsqueeze(dim + 1).repeat(tiling)
old_bsz = shape[dim]
new_bsz = old_bsz * K
new_shape = list(shape[:dim]) + [new_bsz] + list(shape[dim + 1 :])
return tiled.view(new_shape)
class BeamSearchBase:
def __init__(self, beam=1, length_penalty=None, **kwargs):
self.length_penalty = length_penalty if length_penalty else no_length_penalty
self.K = beam
def init(self, encoder_outputs):
pass
def step(self, paths, extra):
pass
def update(self, beams, extra):
pass
def __call__(self, encoder_outputs, **kwargs):
"""Perform batched Beam Search.
Note:
The paths and lengths generated do not include the <GO> token.
:param encoder_outputs: `namedtuple` The outputs of the encoder class.
:param init: `Callable(ecnoder_outputs: encoder_outputs, K: int)` -> Any: A
callable that is called once at the start of the search to initialize
things. This returns a blob that is passed to other callables.
:param step: `Callable(paths: torch.LongTensor, extra) -> (probs: torch.FloatTensor, extra):
A callable that is does a single decoding step. It returns the log
probabilities over the vocabulary in the last dimension. It also returns
any state the decoding process needs.
:param update: `Callable(beams: torch.LongTensor, extra) -> extra:
A callable that is called to edit the decoding state based on the selected
best beams.
:param length_penalty: `Callable(lengths: torch.LongTensor) -> torch.floatTensor
A callable that generates a penalty based on the lengths. Lengths is
[B, K] and the returned penalty should be [B, K, 1] (or [B, K, V] to
have token based penalties?)
:Keyword Arguments:
* *beam* -- `int`: The number of beams to use.
* *mxlen* -- `int`: The max number of steps to run the search for.
:returns:
tuple(preds: torch.LongTensor, lengths: torch.LongTensor, scores: torch.FloatTensor)
preds: The predicted values: [B, K, max(lengths)]
lengths: The length of each prediction [B, K]
scores: The score of each path [B, K]
"""
mxlen = kwargs.get("mxlen", 100)
bsz = encoder_outputs.output.shape[0]
device = encoder_outputs.output.device
with torch.no_grad():
extra = self.init(encoder_outputs)
paths = torch.full((bsz, self.K, 1), Offsets.GO, dtype=torch.long, device=device)
# This tracks the log prob of each beam. This is distinct from score which
# is based on the log prob and penalties.
log_probs = torch.zeros((bsz, self.K), dtype=torch.float, device=device)
# Tracks the lengths of the beams, unfinished beams have lengths of zero.
lengths = torch.zeros((bsz, self.K), dtype=torch.long, device=device)
for i in range(mxlen - 1):
probs, extra = self.step(paths, extra)
V = probs.shape[-1]
probs = probs.view((bsz, self.K, V)) # [B, K, V]
if i > 0:
# This mask is for all beams that are done.
done_mask = (lengths != 0).unsqueeze(-1) # [B, K, 1]
# Can creating this mask be moved out of the loop? It never changes but we don't have V
# This mask selects the EOS token
eos_mask = torch.zeros((1, 1, V), dtype=done_mask.dtype, device=device)
eos_mask[:, :, Offsets.EOS] = 1
# This mask selects the EOS token of only the beams that are done.
mask = done_mask & eos_mask
# Put all probability mass on the EOS token for finished beams.
# Otherwise as the other beams get longer they will all give
# up and eventually select this beam and all outputs become
# the same.
probs = probs.masked_fill(done_mask, -np.inf)
probs = probs.masked_fill(mask, 0)
probs = log_probs.unsqueeze(-1) + probs # [B, K, V]
# Calculate the score of the beam based on the current length.
path_scores = probs / self.length_penalty(lengths.masked_fill(lengths == 0, i + 1))
else:
# On the first step we only look at probabilities for the first beam.
# If we don't then the probs will be the same for each beam
# This means the same token will be selected for each beam
# And we won't get any diversity.
# Using only the first beam ensures K different starting points.
path_scores = probs[:, 0, :]
flat_scores = path_scores.view(bsz, -1) # [B, K * V]
best_scores, best_idx = flat_scores.topk(self.K, 1)
# Get the log_probs of the best scoring beams
log_probs = probs.view(bsz, -1).gather(1, best_idx).view(bsz, self.K)
best_beams = best_idx // V # Get which beam it came from
best_idx = best_idx % V # Get the index of the word regardless of which beam it is.
# Best Beam index is relative within the batch (only [0, K)).
# This makes the index global (e.g. best beams for the second
# batch example is in [K, 2*K)).
offsets = torch.arange(bsz, dtype=torch.long, device=device) * self.K
offset_beams = best_beams + offsets.unsqueeze(-1)
flat_beams = offset_beams.view(bsz * self.K)
# Select the paths to extend based on the best beams
flat_paths = paths.view(bsz * self.K, -1)
new_paths = flat_paths[flat_beams, :].view(bsz, self.K, -1)
# Add the selected outputs to the paths
paths = torch.cat([new_paths, best_idx.unsqueeze(-1)], dim=2)
# Select the lengths to keep tracking based on the valid beams left.
lengths = lengths.view(-1)[flat_beams].view((bsz, self.K))
extra = self.update(flat_beams, extra)
# Updated lengths based on if we hit EOS
last = paths[:, :, -1]
eoses = last == Offsets.EOS
lengths = update_lengths(lengths, eoses, i + 1)
if (lengths != 0).all():
break
else:
# This runs if the loop didn't break meaning one beam hit the max len
# Add an EOS to anything that hasn't hit the end. This makes the scores real.
probs, extra = self.step(paths, extra)
V = probs.size(-1)
probs = probs.view((bsz, self.K, V))
probs = probs[:, :, Offsets.EOS] # Select the score of EOS
# If any of the beams are done mask out the score of this EOS (they already had an EOS)
probs = probs.masked_fill((lengths != 0), 0)
log_probs = log_probs + probs
end_tokens = torch.full((bsz, self.K, 1), Offsets.EOS, device=device, dtype=paths.dtype)
paths = torch.cat([paths, end_tokens], dim=2)
lengths = update_lengths(lengths, torch.ones_like(lengths) == 1, mxlen)
lengths = update_lengths(lengths, torch.ones_like(lengths) == 1, mxlen)
best_scores = log_probs / self.length_penalty(lengths).squeeze(-1)
# Slice off the Offsets.GO token
paths = paths[:, :, 1:]
return paths, lengths, best_scores
def checkpoint_for(model_base, epoch, tick_type='epoch'):
return '{}-{}-{}'.format(model_base, tick_type, epoch+1)
def rm_old_checkpoints(base_path, current_epoch, last_n=10):
for i in range(0, current_epoch-last_n):
checkpoint_i = checkpoint_for(base_path, i)
for extension in ('.pth', '.npz'):
checkpoint_name = checkpoint_i + extension
if os.path.exists(checkpoint_name):
os.remove(checkpoint_name)
def find_latest_checkpoint(checkpoint_dir: str, wildcard="checkpoint") -> Tuple[str, int]:
step_num = 0
for f in glob.glob(os.path.join(checkpoint_dir, f"{wildcard}*")):
base = os.path.basename(f)
if "-" not in base:
continue
last = base.split("-")[-1]
for x in ('.pth', '.npz'):
last = last.replace(x, '', -1)
this_step_num = int(last)
if this_step_num > step_num:
checkpoint = f
step_num = this_step_num
return checkpoint, step_num
def save_checkpoint(model: torch.nn.Module, model_base: str, count: int, tick_type: str = 'epoch', save_npz: bool = False):
from eight_mile.pytorch.serialize import save_tlm_npz, save_tlm_output_npz, save_transformer_seq2seq_npz, save_transformer_de_npz
checkpoint_name = checkpoint_for(model_base, count, tick_type=tick_type)
# Its possible due to how its called that we might save the same checkpoint twice if we dont check first
if os.path.exists(checkpoint_name):
logger.info("Checkpoint already exists: %s", checkpoint_name)
return
logger.info("Creating checkpoint: %s", checkpoint_name)
model_ = model.module if hasattr(model, 'module') else model
torch.save(model_.state_dict(), checkpoint_name+'.pth')
if save_npz:
if hasattr(model_, 'decoder'):
save_transformer_seq2seq_npz(model_, checkpoint_name+'.npz')
elif hasattr(model_, 'reduction_layer'):
save_transformer_de_npz(model_, checkpoint_name+'.npz')
elif hasattr(model_, 'output_layer'):
save_tlm_output_npz(model_, checkpoint_name+'.npz')
else:
save_tlm_npz(model_, checkpoint_name+'.npz')
if tick_type == 'epoch':
rm_old_checkpoints(model_base, count)
def init_distributed(local_rank):
if local_rank == -1:
# https://github.com/kubeflow/pytorch-operator/issues/128
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
logger.info("Setting local rank to RANK env variable")
local_rank = int(os.environ['RANK'])
logger.warning("Local rank (%d)", local_rank)
# In an env like k8s with kubeflow each worker will only see a single gpu
# with an id of 0. If the gpu count is 1 then we are probably in an env like
# that so we should just use the first (and only) gpu avaiable
if torch.cuda.device_count() == 1:
torch.cuda.set_device(0)
device = torch.device("cuda", 0)
# This program assumes multiprocess/multi-device on a single node. Each
# process gets a rank (via cli or ENV variable) and uses that rank to select
# which gpu to use. This only makes sense on a single node, if you had 4
# processes on 2 nodes where each node has 2 GPUs then the ranks would be
# 0, 1, 2, 3 but the gpus numbers would be node 0: 0, 1 and node 1: 0, 1
# and this assignment to gpu 3 would fail. On a single node with 4 processes
# and 4 gpus the rank and gpu ids will align and this will work
else:
torch.cuda.set_device(local_rank)
device = torch.device("cuda", local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
return device, local_rank
class AttentionReduction(nn.Module):
"""
This is a reduction that is given Q, K, V and a mask vector. Different from base reductions, which get an embedding stack
"""
def __init__(self):
super().__init__()
def forward(self, qkvm: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Inputs are the same as for a normal attention function, but the output here is a single tensor, ``[B, H]``
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param value: a set of values from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: sentence-level encoding with dim [B, d_model]
"""
class SingleHeadReduction(AttentionReduction):
"""
Implementation of the "self_attention_head" layer from the conveRT paper (https://arxiv.org/pdf/1911.03688.pdf)
"""
def __init__(
self, d_model: int, dropout: float = 0.0, scale: bool = False, d_k: Optional[int] = None, pooling: str = 'sqrt_length',
):
"""
:param d_model: The model hidden size
:param dropout (``float``): The amount of dropout to use
:param scale: should we scale the dot product attention
:param d_k: The low-order project per head. This is normally `d_model // num_heads` unless set explicitly
"""
super().__init__()
self.output_dim = d_model
if d_k is None:
self.d_k = d_model
else:
self.d_k = d_k
self.w_Q = Dense(d_model, self.d_k)
self.w_K = Dense(d_model, self.d_k)
if scale:
self.attn_fn = SeqScaledDotProductAttention(dropout)
else:
self.attn_fn = SeqDotProductAttention(dropout)
self.attn = None
pooling = pooling.lower()
self.fill = 0
if pooling == 'max':
self.pool = self._max_pool
self.fill = -1e9
elif pooling == 'mean':
self.pool = self._mean_pool
else:
self.pool = self._sqrt_length_pool
def _sqrt_length_pool(self, x, seq_lengths):
x = x.sum(dim=1) # [B, D]
x = x * seq_lengths.float().sqrt().unsqueeze(-1)
return x
def _mean_pool(self, x, seq_lengths):
return torch.sum(x, 1, keepdim=False) / torch.unsqueeze(seq_lengths, -1).to(x.dtype).to(
x.device
)
def _max_pool(self, x, _):
x, _ = torch.max(x, 1, keepdim=False)
return x
def forward(self, qkvm: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""According to conveRT model's graph, they project token encodings to lower-dimensional query and key in single
head, use them to calculate the attention score matrix that has dim [B, T, T], then sum over the query dim to
get a tensor with [B, 1, T] (meaning the amount of attentions each token gets from all other tokens), scale it
by sqrt of sequence lengths, then use it as the weight to weighted sum the token encoding to get the sentence
encoding. we implement it in an equivalent way that can best make use of the eight_mile codes: do the matrix
multiply with value first, then sum over the query dimension.
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param value: a set of values from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: sentence-level encoding with dim [B, d_model]
"""
query, key, value, mask = qkvm
batchsz = query.size(0)
seq_mask = mask.squeeze(1).squeeze(1) # [B, T]
seq_lengths = seq_mask.sum(dim=1)
# (B, H, T, D), still have num_heads = 1 to use the attention function defined in eight_miles
query = self.w_Q(query).view(batchsz, -1, 1, self.d_k).transpose(1, 2)
key = self.w_K(key).view(batchsz, -1, 1, self.d_k).transpose(1, 2)
value = value.view(batchsz, -1, 1, self.output_dim).transpose(1, 2)
x = self.attn_fn((query, key, value, mask)) # [B, 1, T, D]
self.attn = self.attn_fn.attn
x = x.squeeze(1) # [B, T, D]
x = x.masked_fill(seq_mask.unsqueeze(-1) == MASK_FALSE, self.fill)
return self.pool(x, seq_lengths)
class TransformerDiscriminator(nn.Module):
"""A Transformer model that tries to predict if each token is real or fake
This model is based on [ELECTRA: Pre-Training Text Encoders as Discriminators Rather Than Generators,
Clark et al. 2019](https://openreview.net/pdf?id=r1xMH1BtvB).
"""
def __init__(
self,
embeddings,
num_heads: int,
d_model: int,
dropout: bool,
layers: int = 1,
activation: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[Union[int, List[int]]] = None,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
embeddings_reduction: str = 'sum',
**kwargs,
):
super().__init__()
self.embeddings = EmbeddingsStack(embeddings, dropout, reduction=embeddings_reduction)
self.weight_std = kwargs.get('weight_std', 0.02)
assert self.embeddings.dsz == d_model
self.transformer = TransformerEncoderStack(
num_heads, d_model=d_model, pdrop=dropout, scale=True,
layers=layers, activation=activation, d_ff=d_ff, rpr_k=rpr_k, d_k=d_k,
layer_norms_after=layer_norms_after, layer_norm_eps=layer_norm_eps
)
self.proj_to_output = pytorch_linear(d_model, 1)
self.apply(self.init_layer_weights)
self.lengths_feature = kwargs.get('lengths_feature', list(self.embeddings.keys())[0])
def init_layer_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding, nn.LayerNorm)):
module.weight.data.normal_(mean=0.0, std=self.weight_std)
if isinstance(module, (nn.Linear, nn.LayerNorm)) and module.bias is not None:
module.bias.data.zero_()
def forward(self, features):
embedded = self.embeddings(features)
x = features[self.lengths_feature]
input_mask = torch.zeros(x.shape, device=x.device, dtype=torch.long).masked_fill(x != Offsets.PAD, 1).unsqueeze(1).unsqueeze(1)
transformer_out = self.transformer((embedded, input_mask))
binary = self.proj_to_output(transformer_out)
return torch.sigmoid(binary)
def create_loss(self):
return nn.BCELoss(reduction="none")
class PooledSequenceCriterion(nn.Module):
def __init__(self, LossFn=nn.BCEWithLogitsLoss, avg='token'):
super().__init__()
if avg == 'token':
self.crit = LossFn()
self._norm = self._no_norm
else:
self.crit = LossFn()
self._norm = self._batch_norm
def _batch_norm(self, loss, inputs):
return loss / inputs.size()[0]
def _no_norm(self, loss, inputs):
return loss
def forward(self, inputs, targets):
"""Evaluate some loss over a sequence.
:param inputs: torch.FloatTensor, [B, C] The scores from the model. Batch First
:param targets: torch.LongTensor, The labels.
:returns: torch.FloatTensor, The loss.
"""
#inputs = inputs.transpose(0, 1)
C = inputs.shape[-1]
flat_targets = torch.nn.functional.one_hot(targets, C)
# Get the offsets of the non-zero targets, the values of these are all on
flat_targets = (torch.sum(flat_targets, axis=1) != 0).float()
flat_targets[:, Offsets.PAD] = 0
flat_targets[:, Offsets.EOS] = 0
flat_targets[:, Offsets.GO] = 0
if len(inputs.shape) > 2:
max_per_vocab = inputs.max(0)[0]
loss = self.crit(max_per_vocab, flat_targets)
else:
loss = self.crit(inputs, flat_targets)
return self._norm(loss, inputs)
class SequenceCriterion(nn.Module):
def __init__(self, LossFn=nn.NLLLoss, avg='token'):
super().__init__()
if avg == 'token':
# self.crit = LossFn(ignore_index=Offsets.PAD, reduction='elementwise-mean')
self.crit = LossFn(ignore_index=Offsets.PAD, size_average=True)
self._norm = self._no_norm
else:
self.crit = LossFn(ignore_index=Offsets.PAD, size_average=False)
self._norm = self._batch_norm
def _batch_norm(self, loss, inputs):
return loss / inputs.size()[0]
def _no_norm(self, loss, inputs):
return loss
def forward(self, inputs, targets):
"""Evaluate some loss over a sequence.
:param inputs: torch.FloatTensor, [B, .., C] The scores from the model. Batch First
:param targets: torch.LongTensor, The labels.
:returns: torch.FloatTensor, The loss.
"""
total_sz = targets.nelement()
loss = self.crit(inputs.view(total_sz, -1), targets.view(total_sz))
return self._norm(loss, inputs)
def pytorch_conv1d(in_channels, out_channels, fsz, unif=0, padding=0, initializer=None, stride=1, bias=True, groups=1):
c = nn.Conv1d(in_channels, out_channels, fsz, padding=padding, stride=stride, bias=bias, groups=groups)
if unif > 0:
c.weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal_(c.weight)
if bias:
nn.init.constant_(c.bias, 0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform_(c.weight)
if bias:
nn.init.constant_(c.bias, 0)
elif initializer == "normal":
nn.init.normal(mean=0, std=unif)
if bias:
nn.init.constant_(c.bias, 0)
else:
nn.init.xavier_uniform_(c.weight)
if bias:
nn.init.constant_(c.bias, 0)
return c
def tie_weight(to_layer, from_layer):
"""Assigns a weight object to the layer weights.
This method exists to duplicate baseline functionality across packages.
:param to_layer: the pytorch layer to assign weights to
:param from_layer: pytorch layer to retrieve weights from
"""
to_layer.weight = from_layer.weight
class BilinearAttention(nn.Module):
def __init__(self, in_hsz: int, out_hsz: int = 1, bias_x: bool = True, bias_y: bool = True):
super().__init__()
self.in_hsz = in_hsz
self.out_hsz = out_hsz
self.bias_x = bias_x
self.bias_y = bias_y
a1 = in_hsz
a2 = in_hsz
if self.bias_x:
a1 += 1
if self.bias_y:
a2 += 1
self.weight = nn.Parameter(torch.Tensor(out_hsz, in_hsz + bias_x, in_hsz + bias_y))
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.weight)
#nn.init.orthogonal_(self.weight)
def forward(self, x, y, mask):
r"""
Args:
x: ``[B, T, H]``.
y: ``[B, T, H]``.
Returns:
~torch.Tensor:
A scoring tensor of shape ``[batch_size, n_out, seq_len, seq_len]``.
If ``n_out=1``, the dimension for ``n_out`` will be squeezed automatically.
"""
if self.bias_x is True:
ones = torch.ones(x.shape[:-1] + (1,), device=x.device)
x = torch.cat([x, ones], -1)
if self.bias_y is True:
ones = torch.ones(x.shape[:-1] + (1,), device=y.device)
y = torch.cat([y, ones], -1)
x = x.unsqueeze(1)
y = y.unsqueeze(1)
u = x @ self.weight
s = u @ y.transpose(-2, -1)
if self.out_hsz == 1:
s = s.squeeze(1)
s = s.masked_fill((mask.bool() == MASK_FALSE).unsqueeze(1), -1e9)
return s
class TripletLoss(nn.Module):
"""Provide a Triplet Loss using the reversed batch for negatives"""
def __init__(self, model):
super().__init__()
self.score = nn.CosineSimilarity(dim=1)
self.model = model
def forward(self, inputs, targets):
# reverse the batch and use as a negative example
neg = targets.flip(0)
query = self.model.encode_query(inputs)
response = self.model.encode_response(targets)
neg_response = self.model.encode_response(neg)
pos_score = self.score(query, response)
neg_score = self.score(query, neg_response)
score = neg_score - pos_score
score = score.masked_fill(score < 0.0, 0.0).sum(0)
return score
class ContrastiveLoss(nn.Module):
def __init__(self, model, t=1.0, train_temperature=True):
super().__init__()
self.model = model
if t is None:
t = 1.0
self.t = nn.Parameter(torch.tensor(t).float(), requires_grad=train_temperature)
def forward(self, inputs, targets):
query = self.model.encode_query(inputs) # [B, H]
response = self.model.encode_response(targets) # [B, H]
query = F.normalize(query, p=2, dim=1)
response = F.normalize(response, p=2, dim=1)
labels = torch.arange(query.shape[0], device=query.device)
logits = torch.mm(query, response.T) * self.t.exp()
loss = F.cross_entropy(logits, labels)
return loss
class SymmetricContrastiveLoss(nn.Module):
def __init__(self, model, t=1.0, train_temperature=True):
super().__init__()
self.model = model
if t is None:
t = 1.0
self.t = nn.Parameter(torch.tensor(t).float(), requires_grad=train_temperature)
def forward(self, inputs, targets):
query = self.model.encode_query(inputs) # [B, H]
response = self.model.encode_response(targets) # [B, H]
query = F.normalize(query, p=2, dim=1)
response = F.normalize(response, p=2, dim=1)
labels = torch.arange(query.shape[0], device=query.device)
logits = torch.mm(query, response.T) * self.t.exp()
loss_1 = F.cross_entropy(logits, labels)
loss_2 = F.cross_entropy(logits.T, labels)
loss = (loss_1 + loss_2) * 0.5
return loss
class AllLoss(nn.Module):
def __init__(self, model, warmup_steps=10000, reduction_type='sum'):
r"""Loss from here https://arxiv.org/pdf/1705.00652.pdf see section 4
We want to minimize the negative log prob of y given x
-log P(y|x)
P(y|x) P(x) = P(x, y) Chain Rule of Probability
P(y|x) = P(x, y) / P(x) Algebra
P(y|x) = P(x, y) / \sum_\hat(y) P(x, y = \hat(y)) Marginalize over all possible ys to get the probability of x
P_approx(y|x) = P(x, y) / \sum_i^k P(x, y_k) Approximate the Marginalization by just using the ys in the batch
S(x, y) is the score (cosine similarity between x and y in this case) from our neural network
P(x, y) = e^S(x, y)
P(y|x) = e^S(x, y) / \sum_i^k e^S(x, y_k)
log P(y|x) = log( e^S(x, y) / \sum_i^k e^S(x, y_k))
log P(y|x) = S(x, y) - log \sum_i^k e^S(x, y_k)
-log P(y|x) = -(S(x, y) - log \sum_i^k e^S(x, y_k))
"""
super().__init__()
self.score = nn.CosineSimilarity(dim=-1)
self.model = model
self.max_scale = math.sqrt(self.model.embeddings.output_dim)
self.steps = 0
self.warmup_steps = warmup_steps
self.reduction = torch.mean if reduction_type == 'mean' else torch.sum
def forward(self, inputs, targets):
# This is the cosine distance annealing referred to in https://arxiv.org/pdf/1911.03688.pdf
fract = min(self.steps / self.warmup_steps, 1)
c = (self.max_scale-1) * fract + 1
self.steps += 1
# These will get broadcast to [B, B, H]
query = self.model.encode_query(inputs).unsqueeze(1) # [B, 1, H]
response = self.model.encode_response(targets).unsqueeze(0) # [1, B, H]
# all_scores is now a batch x batch matrix where index (i, j) is the score between
# the i^th x vector and the j^th y vector
all_score = c * self.score(query, response) # [B, B]
# The diagonal has the scores of correct pair, (i, i)
pos_score = torch.diag(all_score)
# vec_log_sum_exp will calculate the batched log_sum_exp in a numerically stable way
# the result is a [B, 1] vector which we squeeze to make it [B] to match the diag
# Because we are minimizing the negative log we turned the division into a subtraction here
loss = pos_score - vec_log_sum_exp(all_score, -1).squeeze()
# Batch loss
loss = self.reduction(loss)
# minimize the negative loss
return -loss
class CosineSimilarityLoss(nn.Module):
def __init__(self, neg_value=0.3, pos_value=0.8):
super().__init__()
self.pos_value = pos_value
self.neg_value = neg_value
def forward(self, embeddings_reduction, labels):
hsz = int(embeddings_reduction.shape[-1]//2)
label_values = torch.zeros_like(labels, dtype=torch.float)
label_values[labels == 0] = self.neg_value
label_values[labels == 1] = self.pos_value
output = torch.cosine_similarity(embeddings_reduction[:,:hsz], embeddings_reduction[:,hsz:])
loss = F.mse_loss(output, label_values.view(-1), reduction='mean')
return loss
class OnlineContrastiveLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, embeddings_reduction, labels):
hsz = int(embeddings_reduction.shape[-1]//2)
x = embeddings_reduction[:,:hsz]
y = embeddings_reduction[:,hsz:]
distance_matrix = 1-F.cosine_similarity(x, y)
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(0.5 - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
class TwoHeadConcat(AttentionReduction):
"""Use two parallel SingleHeadReduction, and concatenate the outputs. It is used in the conveRT
paper (https://arxiv.org/pdf/1911.03688.pdf)"""
def __init__(self, d_model, dropout, scale=False, d_k=None, pooling='sqrt_length'):
"""Two parallel 1-head self-attention, then concatenate the output
:param d_model: dim of the self-attention
:param dropout: dropout of the self-attention
:param scale: scale fo the self-attention
:param d_k: d_k of the self-attention
:return: concatenation of the two 1-head attention
"""
super().__init__()
self.output_dim = 2*d_model
self.reduction1 = SingleHeadReduction(d_model, dropout, scale=scale, d_k=d_k, pooling=pooling)
self.reduction2 = SingleHeadReduction(d_model, dropout, scale=scale, d_k=d_k, pooling=pooling)
def forward(self, inputs: torch.Tensor):
x = inputs
encoding1 = self.reduction1(x)
encoding2 = self.reduction2(x)
x = torch.cat([encoding1, encoding2], dim=-1)
return x
class ConveRTFFN(nn.Module):
"""Implementation of the FFN layer from the convert paper (https://arxiv.org/pdf/1911.03688.pdf)"""
def __init__(self, insz, hszs, outsz, pdrop):
"""
:param insz: input dim
:param hszs: list of hidden sizes
:param outsz: output dim
:param pdrop: dropout of each hidden layer
"""
super().__init__()
self.dense_stack = DenseStack(insz,
hszs,
activation='gelu',
pdrop_value=pdrop,
skip_connect=True,
layer_norm=True)
self.final = Dense(hszs[-1], outsz)
self.proj = Dense(insz, outsz) if insz != outsz else nn.Identity()
self.ln1 = nn.LayerNorm(insz, eps=1e-6)
self.ln2 = nn.LayerNorm(outsz, eps=1e-6)
def forward(self, inputs):
x = self.ln1(inputs)
x = self.dense_stack(x)
x = self.final(x)
x = x + self.proj(inputs)
return self.ln2(x)
class DualEncoderModel(nn.Module):
"""Abstract base for dual encoders
We can assume that our dual encoder needs to end up in the same output plane between the encoders, and we can define
the set of losses here that we are likely to need for most.
"""
def __init__(self, in_sz: int, stacking_layers: Union[int, List[int]] = None, d_out: int = 512,
ffn_pdrop=0.1, in_sz_2=None, output_layer=False, output_activation='tanh', output_shared=False):
super().__init__()
if not in_sz_2:
in_sz_2 = in_sz
if stacking_layers:
stacking_layers = listify(stacking_layers)
if stacking_layers:
self.ff1 = ConveRTFFN(in_sz, stacking_layers, d_out, ffn_pdrop)
self.ff2 = ConveRTFFN(in_sz_2, stacking_layers, d_out, ffn_pdrop)
elif output_layer or in_sz != d_out or in_sz != in_sz_2:
activation = output_activation if output_layer else None
self.ff1 = Dense(in_sz, d_out, activation=activation)
if in_sz == in_sz_2 and output_shared:
self.ff2 = self.ff1
else:
self.ff2 = Dense(in_sz_2, d_out, activation=activation)
else:
self.ff1 = nn.Identity()
self.ff2 = nn.Identity()
self.output_dim = d_out
def encode_query_base(self, query: torch.Tensor) -> torch.Tensor:
pass
def encode_response_base(self, response: torch.Tensor) -> torch.Tensor:
pass
def encode_query(self, query: torch.Tensor) -> torch.Tensor:
tensor = self.encode_query_base(query)
return self.ff1(tensor)
def encode_response(self, response: torch.Tensor) -> torch.Tensor:
tensor = self.encode_response_base(response)
return self.ff2(tensor)
def forward(self, query, response):
encoded_query = self.encode_query(query)
encoded_response = self.encode_response(response)
return encoded_query, encoded_response
def create_loss(self, loss_type='symmetric', init_temp=None, learn_temp=False):
if loss_type == 'all':
return AllLoss(self)
elif loss_type == 'all_mean':
return AllLoss(self, reduction_type='mean')
elif loss_type == 'contrastive':
return ContrastiveLoss(self, init_temp, learn_temp)
elif loss_type == 'symmetric':
return SymmetricContrastiveLoss(self, init_temp, learn_temp)
return TripletLoss(self)
class BasicDualEncoderModel(DualEncoderModel):
"""A simple encoder where the encoders are injected and supply the `encode_query_base` and `encode_response_base`
"""
def __init__(self, encoder_1: nn.Module, encoder_2: nn.Module, stacking_layers: Union[int, List[int]] = None, d_out: int = 512, ffn_pdrop=0.1):
super().__init__(encoder_1.output_dim, stacking_layers, d_out, ffn_pdrop, in_sz_2=encoder_2.output_dim)
self.encoder_1 = encoder_1
self.encoder_2 = encoder_2
def encode_query_base(self, query: torch.Tensor) -> torch.Tensor:
return self.encoder_1(query)
def encode_response_base(self, response: torch.Tensor) -> torch.Tensor:
return self.encoder_2(response)
class PairedModel(DualEncoderModel):
"""Legacy model for transformer-based dual encoder
This is a dual-encoder transformer model which shares the lower layer encoder transformer sub-graph
The reduction layer is attention based and takes the same input as the transformer layers. It pools the reprs
Finally, the feed-forward stacks are applied via subclassing.
Note that this model predates the more abstract `AbstractDualEncoder` which could accomplish the same thing
by injecting the same `nn.Module` for encoder_1 and encoder_2 consisting of the transformer and reduction
"""
def __init__(self, embeddings,
d_model: int,
d_ff: int,
dropout: float,
num_heads: int,
num_layers: int,
stacking_layers: Optional[nn.Module] = None,
d_out: Optional[int] = None,
d_k: Optional[int] = None,
weight_std: float = 0.02,
rpr_k: Optional[int] = None,
reduction_d_k: int = 64,
ffn_pdrop: float = 0.1,
windowed_ra: bool = False,
rpr_value_on: bool = False,
reduction_type: str = "2ha",
freeze_encoders: bool = False,
layer_norms_after: bool = False,
embeddings_reduction: str = 'sum',
layer_norm_eps: float=1e-6,
output_layer: bool = False,
output_activation: str = 'tanh',
output_shared: bool = False,
transformer_type: Optional[str]=None,
**kwargs):
super().__init__(2*d_model if reduction_type.startswith("2") else d_model, stacking_layers,
d_out if d_out is not None else d_model, ffn_pdrop, None, output_layer,
output_activation, output_shared)
reduction_type = reduction_type.lower()
self.reduce_fn = self._reduce_3
if reduction_type == "2ha":
self.reduction_layer = TwoHeadConcat(d_model, dropout, scale=False, d_k=reduction_d_k)
elif reduction_type == "2ha_mean":
self.reduction_layer = TwoHeadConcat(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="mean")
elif reduction_type == "2ha_max":
self.reduction_layer = TwoHeadConcat(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="max")
elif reduction_type == "sha":
self.reduction_layer = SingleHeadReduction(d_model, dropout, scale=False, d_k=reduction_d_k)
elif reduction_type == "sha_mean":
self.reduction_layer = SingleHeadReduction(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="mean")
elif reduction_type == "sha_max":
self.reduction_layer = SingleHeadReduction(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="max")
elif reduction_type == 'max':
self.reduce_fn = self._reduce_1
self.reduction_layer = MaxPool1D(self.output_dim)
elif reduction_type == 'mean':
self.reduce_fn = self._reduce_1
self.reduction_layer = MeanPool1D(self.output_dim)
elif reduction_type == 'cls' or reduction_type == 'zero':
self.reduce_fn = self._reduce_0
else:
raise Exception("Unknown exception type")
self.weight_std = weight_std
ra_type = kwargs.get('ra_type')
self.transformer = TransformerEncoderStack(num_heads=num_heads, d_model=d_model,
pdrop=dropout, layers=num_layers, activation='gelu', d_ff=d_ff,
ffn_pdrop=ffn_pdrop,
d_k=d_k, rpr_k=rpr_k, windowed_ra=windowed_ra, rpr_value_on=rpr_value_on,
layer_norms_after=layer_norms_after, layer_norm_eps=layer_norm_eps,
ra_type=ra_type, transformer_type=transformer_type)
self.embeddings = EmbeddingsStack({'x': embeddings}, 0.0, False, embeddings_reduction)
self.freeze = freeze_encoders
self.apply(self.init_layer_weights)
def init_layer_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding, nn.LayerNorm)):
module.weight.data.normal_(mean=0.0, std=self.weight_std)
if isinstance(module, (nn.Linear, nn.LayerNorm)) and module.bias is not None:
module.bias.data.zero_()
def _reduce_3(self, encoded, att_mask):
"""The attention modules originally created for DE have 3 (redundant) inputs, so use all 3 here
"""
return self.reduction_layer((encoded, encoded, encoded, att_mask))
def _reduce_1(self, encoded, att_mask):
"""The standard reduction modules use an input and a length
"""
lengths = att_mask.squeeze(1).squeeze(1).sum(-1)
return self.reduction_layer((encoded, lengths))
def _reduce_0(self, encoded, _):
"""The [CLS] or <s> reduction on the first token just needs the first timestep
"""
return encoded[:, 0]
def encode_query_base(self, query):
query_mask = (query != Offsets.PAD)
att_mask = query_mask.unsqueeze(1).unsqueeze(1)
with torch.no_grad() if self.freeze else contextlib.ExitStack():
embedded = self.embeddings({'x': query})
encoded_query = self.transformer((embedded, att_mask))
encoded_query = self.reduce_fn(encoded_query, att_mask)
return encoded_query
def encode_response_base(self, response):
response_mask = (response != Offsets.PAD)
att_mask = response_mask.unsqueeze(1).unsqueeze(1)
with torch.no_grad() if self.freeze else contextlib.ExitStack():
embedded = self.embeddings({'x': response})
encoded_response = self.transformer((embedded, att_mask))
encoded_response = self.reduce_fn(encoded_response, att_mask)
return encoded_response
class TransformerBoWPairedModel(DualEncoderModel):
"""2 Encoders (E1, E2). E1 is a Transformer followed by attention reduction. E2 is just a pooling of embeddings
"""
def __init__(self, embeddings,
d_model,
d_ff,
dropout,
num_heads,
num_layers,
stacking_layers=None,
d_out=512,
d_k=None,
weight_std=0.02,
rpr_k=None,
reduction_d_k=64,
ffn_pdrop=0.1,
windowed_ra=False,
rpr_value_on=False,
reduction_type_1="2ha",
freeze_encoders=False,
layer_norms_after=False,
transformer_type: Optional[str]=None,
**kwargs):
super().__init__(d_model, stacking_layers, d_out, ffn_pdrop)
reduction_type_1 = reduction_type_1.lower()
if reduction_type_1 == "2ha":
self.reduction_layer_1 = nn.Sequential(TwoHeadConcat(d_model, dropout, scale=False, d_k=reduction_d_k),
nn.Linear(2*d_model, d_model))
elif reduction_type_1 == "2ha_mean":
self.reduction_layer_1 = nn.Sequential(TwoHeadConcat(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="mean"),
nn.Linear(2 * d_model, d_model))
elif reduction_type_1 == "2ha_max":
self.reduction_layer_1 = nn.Sequential(TwoHeadConcat(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="max"),
nn.Linear(2 * d_model, d_model))
elif reduction_type_1 == "sha":
self.reduction_layer_1 = SingleHeadReduction(d_model, dropout, scale=False, d_k=reduction_d_k)
elif reduction_type_1 == "sha_mean":
self.reduction_layer_1 = SingleHeadReduction(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="mean")
elif reduction_type_1 == "sha_max":
self.reduction_layer_1 = SingleHeadReduction(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="max")
else:
raise Exception("Unknown exception type")
self.weight_std = weight_std
ra_type = kwargs.get('ra_type')
self.transformer = TransformerEncoderStack(num_heads=num_heads, d_model=d_model,
pdrop=dropout, layers=num_layers, activation='gelu', d_ff=d_ff,
ffn_pdrop=ffn_pdrop,
d_k=d_k, rpr_k=rpr_k, windowed_ra=windowed_ra, rpr_value_on=rpr_value_on,
layer_norms_after=layer_norms_after, ra_type=ra_type, transformer_type=transformer_type)
self.embeddings = EmbeddingsStack({'x': embeddings})
self.freeze = freeze_encoders
self.reduction_layer_2 = MaxPool1D(d_out) if reduction_type_1.endswith('max') else MeanPool1D(d_out)
self.apply(self.init_layer_weights)
def init_layer_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding, nn.LayerNorm)):
module.weight.data.normal_(mean=0.0, std=self.weight_std)
if isinstance(module, (nn.Linear, nn.LayerNorm)) and module.bias is not None:
module.bias.data.zero_()
def encode_query_base(self, query):
query_mask = (query != Offsets.PAD)
att_mask = query_mask.unsqueeze(1).unsqueeze(1)
with torch.no_grad() if self.freeze else contextlib.ExitStack():
embedded = self.embeddings({'x': query})
encoded_query = self.transformer((embedded, att_mask))
encoded_query = self.reduction_layer_1((encoded_query, encoded_query, encoded_query, att_mask))
return encoded_query
def encode_response_base(self, response):
response_lengths = torch.sum(response != Offsets.PAD, dim=1)
with torch.no_grad() if self.freeze else contextlib.ExitStack():
embedded = self.embeddings({'x': response})
encoded_response = self.reduction_layer_2((embedded, response_lengths))
return encoded_response
class CudaTimer:
"""A CUDA timer context manager that can be used to track and record events
The timer is only enabled if `MEAD_PYTORCH_TIMER` is true. If its enabled, it
will cause a large slowdown (similar to `CUDA_LAUNCH_BLOCKING`).
"""
def __init__(self, name, sync_before=True):
"""
:param name:
:param sync_before:
"""
self.enabled = str2bool(os.getenv('MEAD_PYTORCH_TIMER', False))
if self.enabled:
self._name = name
self._start = torch.cuda.Event(enable_timing=True)
self._end = torch.cuda.Event(enable_timing=True)
if sync_before:
torch.cuda.synchronize()
def __enter__(self):
if self.enabled:
self._start.record()
def __exit__(self, exc_type, exc_value, exc_traceback):
if self.enabled:
self._end.record()
torch.cuda.synchronize()
elapsed = self._start.elapsed_time(self._end)
print(f"({os.getpid()}) {self._name} {elapsed}")
class WeightedNLLLoss(nn.Module):
"""Weight individual training examples
"""
def __init__(self):
super().__init__()
self.loss = nn.NLLLoss(reduction='none')
def forward(self, pred, y, weight):
loss = self.loss(pred, y)
weight = weight.type_as(loss)
return torch.dot(loss, weight)/len(weight)
class WeightedMultiHeadNLLLoss(nn.Module):
"""Weight individual training examples with multiple heads
"""
def __init__(self):
super().__init__()
self.loss = nn.NLLLoss(reduction='none')
def forward(self, preds, targets, weights):
loss = sum([self.loss(pred, targets[:, i]) for i, pred in enumerate(preds)])
weights = weights.type_as(loss)
return torch.dot(loss, weights)/len(weights)
class WeightedSequenceLoss(nn.Module):
"""Weight individual training examples
"""
def __init__(self, LossFn: nn.Module = nn.NLLLoss, avg: str = "token"):
super().__init__()
self.avg = avg
self.crit = LossFn(ignore_index=Offsets.PAD, reduction="none")
if avg == 'token':
self._reduce = self._mean
else:
self._reduce = self._sum
def _mean(self, loss):
return loss.mean(axis=1)
def _sum(self, loss):
return loss.sum(axis=1)
def forward(self, inputs: torch.Tensor, targets: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
"""Evaluate some loss over a sequence.
:param inputs: torch.FloatTensor, [B, T, C] The scores from the model. Batch First
:param targets: torch.LongTensor, [B, T] The labels.
:param weight: sample weights [B, ]
:returns: torch.FloatTensor, The loss.
"""
total_sz = targets.nelement()
batchsz = weight.shape[0]
loss = self.crit(inputs.view(total_sz, -1), targets.view(total_sz)).view(batchsz, -1) # [B, T]
loss = torch.dot(self._reduce(loss), weight.type_as(loss)) / batchsz
return loss
def extra_repr(self):
return f"reduction={self.avg}"
| [((448, 480), 'logging.getLogger', 'logging.getLogger', (['"""mead.layers"""'], {}), "('mead.layers')\n", (465, 480), False, 'import logging\n'), ((2275, 2308), 'torch.max', 'torch.max', (['vec', 'dim'], {'keepdim': '(True)'}), '(vec, dim, keepdim=True)\n', (2284, 2308), False, 'import torch\n'), ((13080, 13089), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (13087, 13089), True, 'import torch.nn as nn\n'), ((25643, 25678), 'torch.nn.Linear', 'nn.Linear', (['in_sz', 'out_sz'], {'bias': 'bias'}), '(in_sz, out_sz, bias=bias)\n', (25652, 25678), True, 'import torch.nn as nn\n'), ((34032, 34160), 'torch.nn.LSTM', 'torch.nn.LSTM', (['insz', 'layer_hsz', 'nlayers'], {'dropout': 'dropout', 'bidirectional': '(True if ndir > 1 else False)', 'batch_first': 'batch_first'}), '(insz, layer_hsz, nlayers, dropout=dropout, bidirectional=True if\n ndir > 1 else False, batch_first=batch_first)\n', (34045, 34160), False, 'import torch\n'), ((76947, 76967), 'torch.nn.functional.softmax', 'F.softmax', (['a'], {'dim': '(-1)'}), '(a, dim=-1)\n', (76956, 76967), True, 'import torch.nn.functional as F\n'), ((85167, 85234), 'eight_mile.utils.transition_mask', 'transition_mask_np', (['vocab', 'span_type', 's_idx', 'e_idx'], {'pad_idx': 'pad_idx'}), '(vocab, span_type, s_idx, e_idx, pad_idx=pad_idx)\n', (85185, 85234), True, 'from eight_mile.utils import transition_mask as transition_mask_np\n'), ((85961, 86036), 'torch.full', 'torch.full', (['(num_tags,)', 'fill_value'], {'dtype': 'torch.float', 'device': 'unary.device'}), '((num_tags,), fill_value, dtype=torch.float, device=unary.device)\n', (85971, 86036), False, 'import torch\n'), ((86057, 86109), 'torch.full', 'torch.full', (['(num_tags,)', 'start_idx'], {'dtype': 'torch.long'}), '((num_tags,), start_idx, dtype=torch.long)\n', (86067, 86109), False, 'import torch\n'), ((86675, 86702), 'torch.max', 'torch.max', (['terminal_vars', '(0)'], {}), '(terminal_vars, 0)\n', (86684, 86702), False, 'import torch\n'), ((90766, 90841), 'torch.full', 'torch.full', (['(num_tags,)', 'fill_value'], {'dtype': 'torch.float', 'device': 'unary.device'}), '((num_tags,), fill_value, dtype=torch.float, device=unary.device)\n', (90776, 90841), False, 'import torch\n'), ((90862, 90914), 'torch.full', 'torch.full', (['(num_tags,)', 'start_idx'], {'dtype': 'torch.long'}), '((num_tags,), start_idx, dtype=torch.long)\n', (90872, 90914), False, 'import torch\n'), ((91530, 91557), 'torch.max', 'torch.max', (['terminal_vars', '(0)'], {}), '(terminal_vars, 0)\n', (91539, 91557), False, 'import torch\n'), ((108389, 108462), 'torch.nn.Embedding', 'nn.Embedding', (['weights.shape[0]', 'weights.shape[1]'], {'padding_idx': 'Offsets.PAD'}), '(weights.shape[0], weights.shape[1], padding_idx=Offsets.PAD)\n', (108401, 108462), True, 'import torch.nn as nn\n'), ((108907, 108933), 'torch.from_numpy', 'torch.from_numpy', (['sub_mask'], {}), '(sub_mask)\n', (108923, 108933), False, 'import torch\n'), ((159767, 159802), 'torch.pow', 'torch.pow', (['((5 + lengths) / 6)', 'alpha'], {}), '((5 + lengths) / 6, alpha)\n', (159776, 159802), False, 'import torch\n'), ((170132, 170163), 'os.path.exists', 'os.path.exists', (['checkpoint_name'], {}), '(checkpoint_name)\n', (170146, 170163), False, 'import os\n'), ((172282, 172356), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""', 'init_method': '"""env://"""'}), "(backend='nccl', init_method='env://')\n", (172318, 172356), False, 'import torch\n'), ((181876, 181979), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_channels', 'out_channels', 'fsz'], {'padding': 'padding', 'stride': 'stride', 'bias': 'bias', 'groups': 'groups'}), '(in_channels, out_channels, fsz, padding=padding, stride=stride,\n bias=bias, groups=groups)\n', (181885, 181979), True, 'import torch.nn as nn\n'), ((886, 901), 'torch.max', 'torch.max', (['lens'], {}), '(lens)\n', (895, 901), False, 'import torch\n'), ((3088, 3111), 'torch.zeros_like', 'torch.zeros_like', (['batch'], {}), '(batch)\n', (3104, 3111), False, 'import torch\n'), ((3870, 3914), 'torch.argmax', 'torch.argmax', (['(non_pad_loc * offsets)'], {'dim': 'dim'}), '(non_pad_loc * offsets, dim=dim)\n', (3882, 3914), False, 'import torch\n'), ((8406, 8456), 'torch.full', 'torch.full', (['(B, V)', '(self.label_smoothing / (V - 2))'], {}), '((B, V), self.label_smoothing / (V - 2))\n', (8416, 8456), False, 'import torch\n'), ((8687, 8739), 'torch.nn.functional.kl_div', 'F.kl_div', (['output', 'smoothed'], {'reduction': 'self.reduction'}), '(output, smoothed, reduction=self.reduction)\n', (8695, 8739), True, 'import torch.nn.functional as F\n'), ((11333, 11385), 'torch.max', 'torch.max', (['tensor', 'self.reduction_dim'], {'keepdim': '(False)'}), '(tensor, self.reduction_dim, keepdim=False)\n', (11342, 11385), False, 'import torch\n'), ((11653, 11680), 'torch.nn.functional.gelu', 'torch.nn.functional.gelu', (['x'], {}), '(x)\n', (11677, 11680), False, 'import torch\n'), ((12501, 12514), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (12512, 12514), True, 'import torch.nn as nn\n'), ((12553, 12562), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (12560, 12562), True, 'import torch.nn as nn\n'), ((12650, 12663), 'torch.nn.Hardtanh', 'nn.Hardtanh', ([], {}), '()\n', (12661, 12663), True, 'import torch.nn as nn\n'), ((12708, 12722), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (12720, 12722), True, 'import torch.nn as nn\n'), ((12762, 12772), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (12770, 12772), True, 'import torch.nn as nn\n'), ((12814, 12826), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (12824, 12826), True, 'import torch.nn as nn\n'), ((12872, 12887), 'torch.nn.LogSigmoid', 'nn.LogSigmoid', ([], {}), '()\n', (12885, 12887), True, 'import torch.nn as nn\n'), ((12933, 12954), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (12946, 12954), True, 'import torch.nn as nn\n'), ((12996, 13014), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (13006, 13014), True, 'import torch.nn as nn\n'), ((17022, 17039), 'torch.nn.Dropout', 'nn.Dropout', (['pdrop'], {}), '(pdrop)\n', (17032, 17039), True, 'import torch.nn as nn\n'), ((23521, 23541), 'torch.nn.ModuleList', 'nn.ModuleList', (['convs'], {}), '(convs)\n', (23534, 23541), True, 'import torch.nn as nn\n'), ((24446, 24464), 'torch.cat', 'torch.cat', (['mots', '(1)'], {}), '(mots, 1)\n', (24455, 24464), False, 'import torch\n'), ((24835, 24868), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'input_size'], {}), '(input_size, input_size)\n', (24844, 24868), True, 'import torch.nn as nn\n'), ((24894, 24927), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'input_size'], {}), '(input_size, input_size)\n', (24903, 24927), True, 'import torch.nn as nn\n'), ((26250, 26269), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (26260, 26269), True, 'import torch.nn as nn\n'), ((26329, 26344), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (26342, 26344), True, 'import torch.nn as nn\n'), ((27198, 27213), 'torch.stack', 'torch.stack', (['hs'], {}), '(hs)\n', (27209, 27213), False, 'import torch\n'), ((27227, 27242), 'torch.stack', 'torch.stack', (['cs'], {}), '(cs)\n', (27238, 27242), False, 'import torch\n'), ((27507, 27526), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (27517, 27526), True, 'import torch.nn as nn\n'), ((27586, 27601), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (27599, 27601), True, 'import torch.nn as nn\n'), ((28373, 28388), 'torch.stack', 'torch.stack', (['hs'], {}), '(hs)\n', (28384, 28388), False, 'import torch\n'), ((34227, 34263), 'torch.nn.init.orthogonal', 'nn.init.orthogonal', (['rnn.weight_hh_l0'], {}), '(rnn.weight_hh_l0)\n', (34245, 34263), True, 'import torch.nn as nn\n'), ((34272, 34308), 'torch.nn.init.orthogonal', 'nn.init.orthogonal', (['rnn.weight_ih_l0'], {}), '(rnn.weight_ih_l0)\n', (34290, 34308), True, 'import torch.nn as nn\n'), ((36997, 37095), 'torch.nn.LSTM', 'torch.nn.LSTM', (['insz', 'hsz', 'nlayers'], {'dropout': 'pdrop', 'bidirectional': '(False)', 'batch_first': 'batch_first'}), '(insz, hsz, nlayers, dropout=pdrop, bidirectional=False,\n batch_first=batch_first)\n', (37010, 37095), False, 'import torch\n'), ((40005, 40081), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['output'], {'batch_first': 'self.batch_first'}), '(output, batch_first=self.batch_first)\n', (40043, 40081), False, 'import torch\n'), ((41387, 41485), 'torch.nn.LSTM', 'torch.nn.LSTM', (['insz', 'hsz', 'nlayers'], {'dropout': 'pdrop', 'bidirectional': '(False)', 'batch_first': 'batch_first'}), '(insz, hsz, nlayers, dropout=pdrop, bidirectional=False,\n batch_first=batch_first)\n', (41400, 41485), False, 'import torch\n'), ((43681, 43757), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['output'], {'batch_first': 'self.batch_first'}), '(output, batch_first=self.batch_first)\n', (43719, 43757), False, 'import torch\n'), ((44727, 44803), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['output'], {'batch_first': 'self.batch_first'}), '(output, batch_first=self.batch_first)\n', (44765, 44803), False, 'import torch\n'), ((45297, 45373), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['output'], {'batch_first': 'self.batch_first'}), '(output, batch_first=self.batch_first)\n', (45335, 45373), False, 'import torch\n'), ((47967, 48069), 'torch.nn.LSTM', 'torch.nn.LSTM', (['insz', '(hsz // 2)', 'nlayers'], {'dropout': 'pdrop', 'bidirectional': '(True)', 'batch_first': 'batch_first'}), '(insz, hsz // 2, nlayers, dropout=pdrop, bidirectional=True,\n batch_first=batch_first)\n', (47980, 48069), False, 'import torch\n'), ((49344, 49420), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['output'], {'batch_first': 'self.batch_first'}), '(output, batch_first=self.batch_first)\n', (49382, 49420), False, 'import torch\n'), ((50663, 50739), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['output'], {'batch_first': 'self.batch_first'}), '(output, batch_first=self.batch_first)\n', (50701, 50739), False, 'import torch\n'), ((52127, 52203), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['output'], {'batch_first': 'self.batch_first'}), '(output, batch_first=self.batch_first)\n', (52165, 52203), False, 'import torch\n'), ((53125, 53201), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['output'], {'batch_first': 'self.batch_first'}), '(output, batch_first=self.batch_first)\n', (53163, 53201), False, 'import torch\n'), ((53638, 53714), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['output'], {'batch_first': 'self.batch_first'}), '(output, batch_first=self.batch_first)\n', (53676, 53714), False, 'import torch\n'), ((56111, 56208), 'torch.nn.GRU', 'torch.nn.GRU', (['insz', 'hsz', 'nlayers'], {'dropout': 'pdrop', 'bidirectional': '(False)', 'batch_first': 'batch_first'}), '(insz, hsz, nlayers, dropout=pdrop, bidirectional=False,\n batch_first=batch_first)\n', (56123, 56208), False, 'import torch\n'), ((58118, 58194), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['output'], {'batch_first': 'self.batch_first'}), '(output, batch_first=self.batch_first)\n', (58156, 58194), False, 'import torch\n'), ((59295, 59371), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['output'], {'batch_first': 'self.batch_first'}), '(output, batch_first=self.batch_first)\n', (59333, 59371), False, 'import torch\n'), ((60338, 60414), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['output'], {'batch_first': 'self.batch_first'}), '(output, batch_first=self.batch_first)\n', (60376, 60414), False, 'import torch\n'), ((63032, 63133), 'torch.nn.GRU', 'torch.nn.GRU', (['insz', '(hsz // 2)', 'nlayers'], {'dropout': 'pdrop', 'bidirectional': '(True)', 'batch_first': 'batch_first'}), '(insz, hsz // 2, nlayers, dropout=pdrop, bidirectional=True,\n batch_first=batch_first)\n', (63044, 63133), False, 'import torch\n'), ((64332, 64408), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['output'], {'batch_first': 'self.batch_first'}), '(output, batch_first=self.batch_first)\n', (64370, 64408), False, 'import torch\n'), ((65555, 65631), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['output'], {'batch_first': 'self.batch_first'}), '(output, batch_first=self.batch_first)\n', (65593, 65631), False, 'import torch\n'), ((67000, 67076), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['output'], {'batch_first': 'self.batch_first'}), '(output, batch_first=self.batch_first)\n', (67038, 67076), False, 'import torch\n'), ((67987, 68063), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['output'], {'batch_first': 'self.batch_first'}), '(output, batch_first=self.batch_first)\n', (68025, 68063), False, 'import torch\n'), ((68727, 68755), 'torch.cat', 'torch.cat', (['inputs', 'self.axis'], {}), '(inputs, self.axis)\n', (68736, 68755), False, 'import torch\n'), ((69401, 69433), 'torch.abs', 'torch.abs', (['(inputs[0] - inputs[1])'], {}), '(inputs[0] - inputs[1])\n', (69410, 69433), False, 'import torch\n'), ((69449, 69498), 'torch.cat', 'torch.cat', (['[inputs[0], inputs[1], sub]', 'self.axis'], {}), '([inputs[0], inputs[1], sub], self.axis)\n', (69458, 69498), False, 'import torch\n'), ((70147, 70196), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.output_dim'], {'eps': 'layer_norm_eps'}), '(self.output_dim, eps=layer_norm_eps)\n', (70159, 70196), True, 'import torch.nn as nn\n'), ((71323, 71353), 'torch.nn.ModuleList', 'nn.ModuleList', (['embeddings_list'], {}), '(embeddings_list)\n', (71336, 71353), True, 'import torch.nn as nn\n'), ((72086, 72110), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (72096, 72110), True, 'import torch.nn as nn\n'), ((74342, 74354), 'eight_mile.utils.listify', 'listify', (['hsz'], {}), '(hsz)\n', (74349, 74354), False, 'from eight_mile.utils import listify, Offsets, is_sequence, str2bool, get_alibi_slopes\n'), ((74411, 74430), 'eight_mile.utils.listify', 'listify', (['activation'], {}), '(activation)\n', (74418, 74430), False, 'from eight_mile.utils import listify, Offsets, is_sequence, str2bool, get_alibi_slopes\n'), ((75257, 75284), 'torch.nn.Sequential', 'nn.Sequential', (['*layer_stack'], {}), '(*layer_stack)\n', (75270, 75284), True, 'import torch.nn as nn\n'), ((75854, 75894), 'torch.nn.Linear', 'nn.Linear', (['(2 * self.hsz)', 'hsz'], {'bias': '(False)'}), '(2 * self.hsz, hsz, bias=False)\n', (75863, 75894), True, 'import torch.nn as nn\n'), ((76538, 76567), 'torch.cat', 'torch.cat', (['[c_t, query_t]', '(-1)'], {}), '([c_t, query_t], -1)\n', (76547, 76567), False, 'import torch\n'), ((77919, 77939), 'torch.nn.functional.softmax', 'F.softmax', (['a'], {'dim': '(-1)'}), '(a, dim=-1)\n', (77928, 77939), True, 'import torch.nn.functional as F\n'), ((78091, 78132), 'torch.nn.Linear', 'nn.Linear', (['self.hsz', 'self.hsz'], {'bias': '(False)'}), '(self.hsz, self.hsz, bias=False)\n', (78100, 78132), True, 'import torch.nn as nn\n'), ((78324, 78344), 'torch.nn.functional.softmax', 'F.softmax', (['a'], {'dim': '(-1)'}), '(a, dim=-1)\n', (78333, 78344), True, 'import torch.nn.functional as F\n'), ((78515, 78556), 'torch.nn.Linear', 'nn.Linear', (['self.hsz', 'self.hsz'], {'bias': '(False)'}), '(self.hsz, self.hsz, bias=False)\n', (78524, 78556), True, 'import torch.nn as nn\n'), ((78576, 78617), 'torch.nn.Linear', 'nn.Linear', (['self.hsz', 'self.hsz'], {'bias': '(False)'}), '(self.hsz, self.hsz, bias=False)\n', (78585, 78617), True, 'import torch.nn as nn\n'), ((78635, 78669), 'torch.nn.Linear', 'nn.Linear', (['self.hsz', '(1)'], {'bias': '(False)'}), '(self.hsz, 1, bias=False)\n', (78644, 78669), True, 'import torch.nn as nn\n'), ((78880, 78897), 'torch.tanh', 'torch.tanh', (['(q + u)'], {}), '(q + u)\n', (78890, 78897), False, 'import torch\n'), ((79019, 79039), 'torch.nn.functional.softmax', 'F.softmax', (['a'], {'dim': '(-1)'}), '(a, dim=-1)\n', (79028, 79039), True, 'import torch.nn.functional as F\n'), ((79403, 79432), 'torch.cat', 'torch.cat', (['[c_t, query_t]', '(-1)'], {}), '([c_t, query_t], -1)\n', (79412, 79432), False, 'import torch\n'), ((80547, 80568), 'torch.nn.ModuleList', 'nn.ModuleList', (['models'], {}), '(models)\n', (80560, 80568), True, 'import torch.nn as nn\n'), ((81074, 81095), 'torch.cat', 'torch.cat', (['pooled', '(-1)'], {}), '(pooled, -1)\n', (81083, 81095), False, 'import torch\n'), ((85246, 85271), 'torch.from_numpy', 'torch.from_numpy', (['np_mask'], {}), '(np_mask)\n', (85262, 85271), False, 'import torch\n'), ((86156, 86180), 'torch.zeros', 'torch.zeros', (['(num_tags,)'], {}), '((num_tags,))\n', (86167, 86180), False, 'import torch\n'), ((86430, 86456), 'torch.max', 'torch.max', (['next_tag_var', '(1)'], {}), '(next_tag_var, 1)\n', (86439, 86456), False, 'import torch\n'), ((88343, 88361), 'torch.min', 'torch.min', (['lengths'], {}), '(lengths)\n', (88352, 88361), False, 'import torch\n'), ((88434, 88502), 'torch.full', 'torch.full', (['(batch_size, 1, tag_size)', '(-10000.0)'], {'device': 'unary.device'}), '((batch_size, 1, tag_size), -10000.0, device=unary.device)\n', (88444, 88502), False, 'import torch\n'), ((89262, 89288), 'torch.max', 'torch.max', (['terminal_var', '(1)'], {}), '(terminal_var, 1)\n', (89271, 89288), False, 'import torch\n'), ((90036, 90058), 'torch.stack', 'torch.stack', (['best_path'], {}), '(best_path)\n', (90047, 90058), False, 'import torch\n'), ((90961, 90985), 'torch.zeros', 'torch.zeros', (['(num_tags,)'], {}), '((num_tags,))\n', (90972, 90985), False, 'import torch\n'), ((91043, 91068), 'torch.nn.functional.softmax', 'F.softmax', (['alphas'], {'dim': '(-1)'}), '(alphas, dim=-1)\n', (91052, 91068), True, 'import torch.nn.functional as F\n'), ((91285, 91311), 'torch.max', 'torch.max', (['next_tag_var', '(1)'], {}), '(next_tag_var, 1)\n', (91294, 91311), False, 'import torch\n'), ((93028, 93046), 'torch.min', 'torch.min', (['lengths'], {}), '(lengths)\n', (93037, 93046), False, 'import torch\n'), ((93119, 93187), 'torch.full', 'torch.full', (['(batch_size, 1, tag_size)', '(-10000.0)'], {'device': 'unary.device'}), '((batch_size, 1, tag_size), -10000.0, device=unary.device)\n', (93129, 93187), False, 'import torch\n'), ((93242, 93271), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['alphas'], {'dim': '(-1)'}), '(alphas, dim=-1)\n', (93255, 93271), True, 'import torch.nn.functional as F\n'), ((93913, 93939), 'torch.max', 'torch.max', (['terminal_var', '(1)'], {}), '(terminal_var, 1)\n', (93922, 93939), False, 'import torch\n'), ((94687, 94709), 'torch.stack', 'torch.stack', (['best_path'], {}), '(best_path)\n', (94698, 94709), False, 'import torch\n'), ((100457, 100473), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (100467, 100473), False, 'import torch\n'), ((101054, 101140), 'torch.full', 'torch.full', (['(1, batch_size)', 'self.start_idx'], {'dtype': 'tags.dtype', 'device': 'tags.device'}), '((1, batch_size), self.start_idx, dtype=tags.dtype, device=tags.\n device)\n', (101064, 101140), False, 'import torch\n'), ((101161, 101188), 'torch.cat', 'torch.cat', (['[start, tags]', '(0)'], {}), '([start, tags], 0)\n', (101170, 101188), False, 'import torch\n'), ((102331, 102349), 'torch.min', 'torch.min', (['lengths'], {}), '(lengths)\n', (102340, 102349), False, 'import torch\n'), ((102448, 102521), 'torch.full', 'torch.full', (['(batch_size, 1, self.num_tags)', '(-10000.0)'], {'device': 'unary.device'}), '((batch_size, 1, self.num_tags), -10000.0, device=unary.device)\n', (102458, 102521), False, 'import torch\n'), ((107237, 107284), 'torch.nn.Linear', 'nn.Linear', (['self.transducer_model.output_dim', 'nc'], {}), '(self.transducer_model.output_dim, nc)\n', (107246, 107284), True, 'import torch.nn as nn\n'), ((108512, 108538), 'torch.FloatTensor', 'torch.FloatTensor', (['weights'], {}), '(weights)\n', (108529, 108538), False, 'import torch\n'), ((109124, 109141), 'torch.nn.Dropout', 'nn.Dropout', (['pdrop'], {}), '(pdrop)\n', (109134, 109141), True, 'import torch.nn as nn\n'), ((110115, 110137), 'torch.matmul', 'torch.matmul', (['a', 'value'], {}), '(a, value)\n', (110127, 110137), False, 'import torch\n'), ((111375, 111400), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (111384, 111400), True, 'import torch.nn.functional as F\n'), ((112943, 112968), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (112952, 112968), True, 'import torch.nn.functional as F\n'), ((113606, 113653), 'torch.nn.Parameter', 'nn.Parameter', (['rel_embedding'], {'requires_grad': '(True)'}), '(rel_embedding, requires_grad=True)\n', (113618, 113653), True, 'import torch.nn as nn\n'), ((114321, 114343), 'torch.lt', 'torch.lt', (['n', 'max_exact'], {}), '(n, max_exact)\n', (114329, 114343), False, 'import torch\n'), ((114643, 114681), 'torch.where', 'torch.where', (['is_small', 'n', 'val_if_large'], {}), '(is_small, n, val_if_large)\n', (114654, 114681), False, 'import torch\n'), ((115877, 115902), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (115886, 115902), True, 'import torch.nn.functional as F\n'), ((116359, 116384), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (116368, 116384), True, 'import torch.nn.functional as F\n'), ((117371, 117396), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (117380, 117396), True, 'import torch.nn.functional as F\n'), ((118029, 118076), 'torch.nn.Parameter', 'nn.Parameter', (['rel_embedding'], {'requires_grad': '(True)'}), '(rel_embedding, requires_grad=True)\n', (118041, 118076), True, 'import torch.nn as nn\n'), ((118744, 118766), 'torch.lt', 'torch.lt', (['n', 'max_exact'], {}), '(n, max_exact)\n', (118752, 118766), False, 'import torch\n'), ((119074, 119112), 'torch.where', 'torch.where', (['is_small', 'n', 'val_if_large'], {}), '(is_small, n, val_if_large)\n', (119085, 119112), False, 'import torch\n'), ((120263, 120288), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (120272, 120288), True, 'import torch.nn.functional as F\n'), ((120606, 120623), 'torch.nn.Dropout', 'nn.Dropout', (['pdrop'], {}), '(pdrop)\n', (120616, 120623), True, 'import torch.nn as nn\n'), ((122166, 122188), 'torch.matmul', 'torch.matmul', (['a', 'value'], {}), '(a, value)\n', (122178, 122188), False, 'import torch\n'), ((124406, 124431), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (124415, 124431), True, 'import torch.nn.functional as F\n'), ((125217, 125242), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (125226, 125242), True, 'import torch.nn.functional as F\n'), ((128654, 128678), 'torch.matmul', 'torch.matmul', (['query', 'key'], {}), '(query, key)\n', (128666, 128678), False, 'import torch\n'), ((128719, 128747), 'torch.matmul', 'torch.matmul', (['query', 'rpr_key'], {}), '(query, rpr_key)\n', (128731, 128747), False, 'import torch\n'), ((129017, 129042), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (129026, 129042), True, 'import torch.nn.functional as F\n'), ((129347, 129369), 'torch.matmul', 'torch.matmul', (['a', 'value'], {}), '(a, value)\n', (129359, 129369), False, 'import torch\n'), ((130252, 130272), 'torch.tanh', 'torch.tanh', (['additive'], {}), '(additive)\n', (130262, 130272), False, 'import torch\n'), ((130436, 130461), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (130445, 130461), True, 'import torch.nn.functional as F\n'), ((137531, 137568), 'torch.nn.Embedding', 'nn.Embedding', (['(2 * rpr_k + 1)', 'self.d_k'], {}), '(2 * rpr_k + 1, self.d_k)\n', (137543, 137568), True, 'import torch.nn as nn\n'), ((138780, 138813), 'torch.clamp', 'torch.clamp', (['edges', '(0)', 'window_len'], {}), '(edges, 0, window_len)\n', (138791, 138813), False, 'import torch\n'), ((142184, 142230), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.d_model'], {'eps': 'layer_norm_eps'}), '(self.d_model, eps=layer_norm_eps)\n', (142196, 142230), True, 'import torch.nn as nn\n'), ((142250, 142296), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.d_model'], {'eps': 'layer_norm_eps'}), '(self.d_model, eps=layer_norm_eps)\n', (142262, 142296), True, 'import torch.nn as nn\n'), ((142320, 142337), 'torch.nn.Dropout', 'nn.Dropout', (['pdrop'], {}), '(pdrop)\n', (142330, 142337), True, 'import torch.nn as nn\n'), ((144093, 144137), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['(d_ffn // 2)'], {'eps': 'layer_norm_eps'}), '(d_ffn // 2, eps=layer_norm_eps)\n', (144105, 144137), True, 'import torch.nn as nn\n'), ((144196, 144234), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.proj.bias', '(1.0)'], {}), '(self.proj.bias, 1.0)\n', (144213, 144234), True, 'import torch.nn as nn\n'), ((145091, 145112), 'torch.nn.Dropout', 'nn.Dropout', (['ffn_pdrop'], {}), '(ffn_pdrop)\n', (145101, 145112), True, 'import torch.nn as nn\n'), ((145191, 145237), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.d_model'], {'eps': 'layer_norm_eps'}), '(self.d_model, eps=layer_norm_eps)\n', (145203, 145237), True, 'import torch.nn as nn\n'), ((145261, 145278), 'torch.nn.Dropout', 'nn.Dropout', (['pdrop'], {}), '(pdrop)\n', (145271, 145278), True, 'import torch.nn as nn\n'), ((147469, 147515), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.d_model'], {'eps': 'layer_norm_eps'}), '(self.d_model, eps=layer_norm_eps)\n', (147481, 147515), True, 'import torch.nn as nn\n'), ((147535, 147581), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.d_model'], {'eps': 'layer_norm_eps'}), '(self.d_model, eps=layer_norm_eps)\n', (147547, 147581), True, 'import torch.nn as nn\n'), ((147601, 147647), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.d_model'], {'eps': 'layer_norm_eps'}), '(self.d_model, eps=layer_norm_eps)\n', (147613, 147647), True, 'import torch.nn as nn\n'), ((147671, 147688), 'torch.nn.Dropout', 'nn.Dropout', (['pdrop'], {}), '(pdrop)\n', (147681, 147688), True, 'import torch.nn as nn\n'), ((149916, 149931), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (149929, 149931), True, 'import torch.nn as nn\n'), ((152218, 152233), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (152231, 152233), True, 'import torch.nn as nn\n'), ((152252, 152293), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {'eps': 'layer_norm_eps'}), '(d_model, eps=layer_norm_eps)\n', (152264, 152293), True, 'import torch.nn as nn\n'), ((156442, 156457), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (156455, 156457), True, 'import torch.nn as nn\n'), ((169269, 169313), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'f"""{wildcard}*"""'], {}), "(checkpoint_dir, f'{wildcard}*')\n", (169281, 169313), False, 'import os\n'), ((169331, 169350), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (169347, 169350), False, 'import os\n'), ((171532, 171557), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (171555, 171557), False, 'import torch\n'), ((171572, 171596), 'torch.cuda.set_device', 'torch.cuda.set_device', (['(0)'], {}), '(0)\n', (171593, 171596), False, 'import torch\n'), ((171614, 171637), 'torch.device', 'torch.device', (['"""cuda"""', '(0)'], {}), "('cuda', 0)\n", (171626, 171637), False, 'import torch\n'), ((172194, 172227), 'torch.cuda.set_device', 'torch.cuda.set_device', (['local_rank'], {}), '(local_rank)\n', (172215, 172227), False, 'import torch\n'), ((172245, 172277), 'torch.device', 'torch.device', (['"""cuda"""', 'local_rank'], {}), "('cuda', local_rank)\n", (172257, 172277), False, 'import torch\n'), ((174972, 175002), 'torch.max', 'torch.max', (['x', '(1)'], {'keepdim': '(False)'}), '(x, 1, keepdim=False)\n', (174981, 175002), False, 'import torch\n'), ((179169, 179190), 'torch.sigmoid', 'torch.sigmoid', (['binary'], {}), '(binary)\n', (179182, 179190), False, 'import torch\n'), ((179234, 179262), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (179244, 179262), True, 'import torch.nn as nn\n'), ((180114, 180153), 'torch.nn.functional.one_hot', 'torch.nn.functional.one_hot', (['targets', 'C'], {}), '(targets, C)\n', (180141, 180153), False, 'import torch\n'), ((183497, 183524), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['self.weight'], {}), '(self.weight)\n', (183511, 183524), True, 'import torch.nn as nn\n'), ((184656, 184682), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', ([], {'dim': '(1)'}), '(dim=1)\n', (184675, 184682), True, 'import torch.nn as nn\n'), ((185677, 185707), 'torch.nn.functional.normalize', 'F.normalize', (['query'], {'p': '(2)', 'dim': '(1)'}), '(query, p=2, dim=1)\n', (185688, 185707), True, 'import torch.nn.functional as F\n'), ((185727, 185760), 'torch.nn.functional.normalize', 'F.normalize', (['response'], {'p': '(2)', 'dim': '(1)'}), '(response, p=2, dim=1)\n', (185738, 185760), True, 'import torch.nn.functional as F\n'), ((185778, 185827), 'torch.arange', 'torch.arange', (['query.shape[0]'], {'device': 'query.device'}), '(query.shape[0], device=query.device)\n', (185790, 185827), False, 'import torch\n'), ((185903, 185934), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels'], {}), '(logits, labels)\n', (185918, 185934), True, 'import torch.nn.functional as F\n'), ((186426, 186456), 'torch.nn.functional.normalize', 'F.normalize', (['query'], {'p': '(2)', 'dim': '(1)'}), '(query, p=2, dim=1)\n', (186437, 186456), True, 'import torch.nn.functional as F\n'), ((186476, 186509), 'torch.nn.functional.normalize', 'F.normalize', (['response'], {'p': '(2)', 'dim': '(1)'}), '(response, p=2, dim=1)\n', (186487, 186509), True, 'import torch.nn.functional as F\n'), ((186527, 186576), 'torch.arange', 'torch.arange', (['query.shape[0]'], {'device': 'query.device'}), '(query.shape[0], device=query.device)\n', (186539, 186576), False, 'import torch\n'), ((186654, 186685), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels'], {}), '(logits, labels)\n', (186669, 186685), True, 'import torch.nn.functional as F\n'), ((186703, 186736), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits.T', 'labels'], {}), '(logits.T, labels)\n', (186718, 186736), True, 'import torch.nn.functional as F\n'), ((187872, 187899), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', ([], {'dim': '(-1)'}), '(dim=-1)\n', (187891, 187899), True, 'import torch.nn as nn\n'), ((187952, 187995), 'math.sqrt', 'math.sqrt', (['self.model.embeddings.output_dim'], {}), '(self.model.embeddings.output_dim)\n', (187961, 187995), False, 'import math\n'), ((188890, 188911), 'torch.diag', 'torch.diag', (['all_score'], {}), '(all_score)\n', (188900, 188911), False, 'import torch\n'), ((189701, 189744), 'torch.zeros_like', 'torch.zeros_like', (['labels'], {'dtype': 'torch.float'}), '(labels, dtype=torch.float)\n', (189717, 189744), False, 'import torch\n'), ((189864, 189954), 'torch.cosine_similarity', 'torch.cosine_similarity', (['embeddings_reduction[:, :hsz]', 'embeddings_reduction[:, hsz:]'], {}), '(embeddings_reduction[:, :hsz], embeddings_reduction\n [:, hsz:])\n', (189887, 189954), False, 'import torch\n'), ((191934, 191975), 'torch.cat', 'torch.cat', (['[encoding1, encoding2]'], {'dim': '(-1)'}), '([encoding1, encoding2], dim=-1)\n', (191943, 191975), False, 'import torch\n'), ((192838, 192867), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['insz'], {'eps': '(1e-06)'}), '(insz, eps=1e-06)\n', (192850, 192867), True, 'import torch.nn as nn\n'), ((192886, 192916), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['outsz'], {'eps': '(1e-06)'}), '(outsz, eps=1e-06)\n', (192898, 192916), True, 'import torch.nn as nn\n'), ((206449, 206490), 'torch.sum', 'torch.sum', (['(response != Offsets.PAD)'], {'dim': '(1)'}), '(response != Offsets.PAD, dim=1)\n', (206458, 206490), False, 'import torch\n'), ((207949, 207977), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (207959, 207977), True, 'import torch.nn as nn\n'), ((208328, 208356), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (208338, 208356), True, 'import torch.nn as nn\n'), ((3700, 3785), 'torch.arange', 'torch.arange', (['(1)', '(tensor.shape[dim] + 1)'], {'device': 'tensor.device', 'dtype': 'tensor.dtype'}), '(1, tensor.shape[dim] + 1, device=tensor.device, dtype=tensor.dtype\n )\n', (3712, 3785), False, 'import torch\n'), ((9861, 9913), 'torch.sum', 'torch.sum', (['tensor', 'self.reduction_dim'], {'keepdim': '(False)'}), '(tensor, self.reduction_dim, keepdim=False)\n', (9870, 9913), False, 'import torch\n'), ((15021, 15064), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', (['(start_pad, end_pad)', '(0.0)'], {}), '((start_pad, end_pad), 0.0)\n', (15037, 15064), True, 'import torch.nn as nn\n'), ((17183, 17216), 'torch.nn.Sequential', 'nn.Sequential', (['conv', 'act', 'dropout'], {}), '(conv, act, dropout)\n', (17196, 17216), True, 'import torch.nn as nn\n'), ((25781, 25809), 'torch.nn.init.orthogonal', 'nn.init.orthogonal', (['l.weight'], {}), '(l.weight)\n', (25799, 25809), True, 'import torch.nn as nn\n'), ((34375, 34416), 'torch.nn.init.kaiming_uniform', 'nn.init.kaiming_uniform', (['rnn.weight_hh_l0'], {}), '(rnn.weight_hh_l0)\n', (34398, 34416), True, 'import torch.nn as nn\n'), ((34425, 34466), 'torch.nn.init.kaiming_uniform', 'nn.init.kaiming_uniform', (['rnn.weight_ih_l0'], {}), '(rnn.weight_ih_l0)\n', (34448, 34466), True, 'import torch.nn as nn\n'), ((37139, 37180), 'torch.nn.init.orthogonal', 'nn.init.orthogonal', (['self.rnn.weight_hh_l0'], {}), '(self.rnn.weight_hh_l0)\n', (37157, 37180), True, 'import torch.nn as nn\n'), ((37193, 37234), 'torch.nn.init.orthogonal', 'nn.init.orthogonal', (['self.rnn.weight_ih_l0'], {}), '(self.rnn.weight_ih_l0)\n', (37211, 37234), True, 'import torch.nn as nn\n'), ((41529, 41570), 'torch.nn.init.orthogonal', 'nn.init.orthogonal', (['self.rnn.weight_hh_l0'], {}), '(self.rnn.weight_hh_l0)\n', (41547, 41570), True, 'import torch.nn as nn\n'), ((41583, 41624), 'torch.nn.init.orthogonal', 'nn.init.orthogonal', (['self.rnn.weight_ih_l0'], {}), '(self.rnn.weight_ih_l0)\n', (41601, 41624), True, 'import torch.nn as nn\n'), ((48113, 48154), 'torch.nn.init.orthogonal', 'nn.init.orthogonal', (['self.rnn.weight_hh_l0'], {}), '(self.rnn.weight_hh_l0)\n', (48131, 48154), True, 'import torch.nn as nn\n'), ((48167, 48208), 'torch.nn.init.orthogonal', 'nn.init.orthogonal', (['self.rnn.weight_ih_l0'], {}), '(self.rnn.weight_ih_l0)\n', (48185, 48208), True, 'import torch.nn as nn\n'), ((56252, 56294), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['self.rnn.weight_ih_l0'], {}), '(self.rnn.weight_ih_l0)\n', (56271, 56294), True, 'import torch.nn as nn\n'), ((56307, 56349), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['self.rnn.weight_hh_l0'], {}), '(self.rnn.weight_hh_l0)\n', (56326, 56349), True, 'import torch.nn as nn\n'), ((63177, 63218), 'torch.nn.init.orthogonal', 'nn.init.orthogonal', (['self.rnn.weight_hh_l0'], {}), '(self.rnn.weight_hh_l0)\n', (63195, 63218), True, 'import torch.nn as nn\n'), ((63231, 63272), 'torch.nn.init.orthogonal', 'nn.init.orthogonal', (['self.rnn.weight_ih_l0'], {}), '(self.rnn.weight_ih_l0)\n', (63249, 63272), True, 'import torch.nn as nn\n'), ((77819, 77838), 'math.sqrt', 'math.sqrt', (['self.hsz'], {}), '(self.hsz)\n', (77828, 77838), False, 'import math\n'), ((81832, 81845), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (81843, 81845), True, 'import torch.nn as nn\n'), ((83479, 83496), 'torch.nn.Dropout', 'nn.Dropout', (['pdrop'], {}), '(pdrop)\n', (83489, 83496), True, 'import torch.nn as nn\n'), ((84462, 84479), 'torch.nn.Dropout', 'nn.Dropout', (['pdrop'], {}), '(pdrop)\n', (84472, 84479), True, 'import torch.nn as nn\n'), ((85599, 85614), 'torch.tensor', 'torch.tensor', (['i'], {}), '(i)\n', (85611, 85614), False, 'import torch\n'), ((86248, 86287), 'torch.zeros', 'torch.zeros', (['num_tags'], {'dtype': 'torch.long'}), '(num_tags, dtype=torch.long)\n', (86259, 86287), False, 'import torch\n'), ((88700, 88726), 'torch.max', 'torch.max', (['next_tag_var', '(2)'], {}), '(next_tag_var, 2)\n', (88709, 88726), False, 'import torch\n'), ((91103, 91142), 'torch.zeros', 'torch.zeros', (['num_tags'], {'dtype': 'torch.long'}), '(num_tags, dtype=torch.long)\n', (91114, 91142), False, 'import torch\n'), ((93395, 93421), 'torch.max', 'torch.max', (['next_tag_var', '(2)'], {}), '(next_tag_var, 2)\n', (93404, 93421), False, 'import torch\n'), ((97001, 97029), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['probv'], {'dim': '(-1)'}), '(probv, dim=-1)\n', (97014, 97029), True, 'import torch.nn.functional as F\n'), ((97335, 97357), 'torch.max', 'torch.max', (['unaries', '(-1)'], {}), '(unaries, -1)\n', (97344, 97357), False, 'import torch\n'), ((104110, 104125), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (104123, 104125), False, 'import torch\n'), ((105286, 105299), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (105297, 105299), True, 'import torch.nn as nn\n'), ((111202, 111216), 'math.sqrt', 'math.sqrt', (['d_k'], {}), '(d_k)\n', (111211, 111216), False, 'import math\n'), ((111654, 111686), 'eight_mile.utils.get_alibi_slopes', 'get_alibi_slopes', (['self.num_heads'], {}), '(self.num_heads)\n', (111670, 111686), False, 'from eight_mile.utils import listify, Offsets, is_sequence, str2bool, get_alibi_slopes\n'), ((112401, 112415), 'math.sqrt', 'math.sqrt', (['d_k'], {}), '(d_k)\n', (112410, 112415), False, 'import math\n'), ((113420, 113486), 'torch.empty', 'torch.empty', (['(self.num_heads, self.num_buckets)'], {'dtype': 'torch.float'}), '((self.num_heads, self.num_buckets), dtype=torch.float)\n', (113431, 113486), False, 'import torch\n'), ((114597, 114626), 'torch.tensor', 'torch.tensor', (['(num_buckets - 1)'], {}), '(num_buckets - 1)\n', (114609, 114626), False, 'import torch\n'), ((115288, 115302), 'math.sqrt', 'math.sqrt', (['d_k'], {}), '(d_k)\n', (115297, 115302), False, 'import math\n'), ((116632, 116664), 'eight_mile.utils.get_alibi_slopes', 'get_alibi_slopes', (['self.num_heads'], {}), '(self.num_heads)\n', (116648, 116664), False, 'from eight_mile.utils import listify, Offsets, is_sequence, str2bool, get_alibi_slopes\n'), ((117843, 117909), 'torch.empty', 'torch.empty', (['(self.num_heads, self.num_buckets)'], {'dtype': 'torch.float'}), '((self.num_heads, self.num_buckets), dtype=torch.float)\n', (117854, 117909), False, 'import torch\n'), ((119028, 119057), 'torch.tensor', 'torch.tensor', (['(num_buckets - 1)'], {}), '(num_buckets - 1)\n', (119040, 119057), False, 'import torch\n'), ((122333, 122361), 'torch.matmul', 'torch.matmul', (['a', 'edges_value'], {}), '(a, edges_value)\n', (122345, 122361), False, 'import torch\n'), ((124088, 124102), 'math.sqrt', 'math.sqrt', (['d_k'], {}), '(d_k)\n', (124097, 124102), False, 'import math\n'), ((126019, 126041), 'torch.nn.functional.pad', 'F.pad', (['tensor', 'padding'], {}), '(tensor, padding)\n', (126024, 126041), True, 'import torch.nn.functional as F\n'), ((126908, 126935), 'torch.nn.functional.pad', 'F.pad', (['mask', '[rpr_k, rpr_k]'], {}), '(mask, [rpr_k, rpr_k])\n', (126913, 126935), True, 'import torch.nn.functional as F\n'), ((127007, 127034), 'torch.arange', 'torch.arange', (['(T + 2 * rpr_k)'], {}), '(T + 2 * rpr_k)\n', (127019, 127034), False, 'import torch\n'), ((127248, 127279), 'torch.gather', 'torch.gather', (['mask', '(-1)', 'indices'], {}), '(mask, -1, indices)\n', (127260, 127279), False, 'import torch\n'), ((128811, 128825), 'math.sqrt', 'math.sqrt', (['d_k'], {}), '(d_k)\n', (128820, 128825), False, 'import math\n'), ((129559, 129585), 'torch.matmul', 'torch.matmul', (['a', 'rpr_value'], {}), '(a, rpr_value)\n', (129571, 129585), False, 'import torch\n'), ((137628, 137669), 'torch.nn.Embedding', 'nn.Embedding', (['(2 * rpr_k + 1)', 'self.d_value'], {}), '(2 * rpr_k + 1, self.d_value)\n', (137640, 137669), True, 'import torch.nn as nn\n'), ((142088, 142109), 'torch.nn.Dropout', 'nn.Dropout', (['ffn_pdrop'], {}), '(ffn_pdrop)\n', (142098, 142109), True, 'import torch.nn as nn\n'), ((147327, 147348), 'torch.nn.Dropout', 'nn.Dropout', (['ffn_pdrop'], {}), '(ffn_pdrop)\n', (147337, 147348), True, 'import torch.nn as nn\n'), ((150154, 150167), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (150165, 150167), True, 'import torch.nn as nn\n'), ((150860, 150878), 'eight_mile.utils.is_sequence', 'is_sequence', (['rpr_k'], {}), '(rpr_k)\n', (150871, 150878), False, 'from eight_mile.utils import listify, Offsets, is_sequence, str2bool, get_alibi_slopes\n'), ((151538, 151556), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (151554, 151556), True, 'import numpy as np\n'), ((152812, 152830), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (152828, 152830), True, 'import numpy as np\n'), ((156717, 156730), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (156728, 156730), True, 'import torch.nn as nn\n'), ((157351, 157369), 'eight_mile.utils.is_sequence', 'is_sequence', (['rpr_k'], {}), '(rpr_k)\n', (157362, 157369), False, 'from eight_mile.utils import listify, Offsets, is_sequence, str2bool, get_alibi_slopes\n'), ((158011, 158029), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (158027, 158029), True, 'import numpy as np\n'), ((163315, 163330), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (163328, 163330), False, 'import torch\n'), ((163399, 163472), 'torch.full', 'torch.full', (['(bsz, self.K, 1)', 'Offsets.GO'], {'dtype': 'torch.long', 'device': 'device'}), '((bsz, self.K, 1), Offsets.GO, dtype=torch.long, device=device)\n', (163409, 163472), False, 'import torch\n'), ((163638, 163698), 'torch.zeros', 'torch.zeros', (['(bsz, self.K)'], {'dtype': 'torch.float', 'device': 'device'}), '((bsz, self.K), dtype=torch.float, device=device)\n', (163649, 163698), False, 'import torch\n'), ((163807, 163866), 'torch.zeros', 'torch.zeros', (['(bsz, self.K)'], {'dtype': 'torch.long', 'device': 'device'}), '((bsz, self.K), dtype=torch.long, device=device)\n', (163818, 163866), False, 'import torch\n'), ((169060, 169091), 'os.path.exists', 'os.path.exists', (['checkpoint_name'], {}), '(checkpoint_name)\n', (169074, 169091), False, 'import os\n'), ((170504, 170566), 'eight_mile.pytorch.serialize.save_transformer_seq2seq_npz', 'save_transformer_seq2seq_npz', (['model_', "(checkpoint_name + '.npz')"], {}), "(model_, checkpoint_name + '.npz')\n", (170532, 170566), False, 'from eight_mile.pytorch.serialize import save_tlm_npz, save_tlm_output_npz, save_transformer_seq2seq_npz, save_transformer_de_npz\n'), ((174812, 174842), 'torch.sum', 'torch.sum', (['x', '(1)'], {'keepdim': '(False)'}), '(x, 1, keepdim=False)\n', (174821, 174842), False, 'import torch\n'), ((182078, 182107), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['c.weight'], {}), '(c.weight)\n', (182097, 182107), True, 'import torch.nn as nn\n'), ((183367, 183422), 'torch.Tensor', 'torch.Tensor', (['out_hsz', '(in_hsz + bias_x)', '(in_hsz + bias_y)'], {}), '(out_hsz, in_hsz + bias_x, in_hsz + bias_y)\n', (183379, 183422), False, 'import torch\n'), ((183974, 184022), 'torch.ones', 'torch.ones', (['(x.shape[:-1] + (1,))'], {'device': 'x.device'}), '(x.shape[:-1] + (1,), device=x.device)\n', (183984, 184022), False, 'import torch\n'), ((184039, 184063), 'torch.cat', 'torch.cat', (['[x, ones]', '(-1)'], {}), '([x, ones], -1)\n', (184048, 184063), False, 'import torch\n'), ((184115, 184163), 'torch.ones', 'torch.ones', (['(x.shape[:-1] + (1,))'], {'device': 'y.device'}), '(x.shape[:-1] + (1,), device=y.device)\n', (184125, 184163), False, 'import torch\n'), ((184180, 184204), 'torch.cat', 'torch.cat', (['[y, ones]', '(-1)'], {}), '([y, ones], -1)\n', (184189, 184204), False, 'import torch\n'), ((185845, 185872), 'torch.mm', 'torch.mm', (['query', 'response.T'], {}), '(query, response.T)\n', (185853, 185872), False, 'import torch\n'), ((186594, 186621), 'torch.mm', 'torch.mm', (['query', 'response.T'], {}), '(query, response.T)\n', (186602, 186621), False, 'import torch\n'), ((190355, 190380), 'torch.nn.functional.cosine_similarity', 'F.cosine_similarity', (['x', 'y'], {}), '(x, y)\n', (190374, 190380), True, 'import torch.nn.functional as F\n'), ((192805, 192818), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (192816, 192818), True, 'import torch.nn as nn\n'), ((193721, 193745), 'eight_mile.utils.listify', 'listify', (['stacking_layers'], {}), '(stacking_layers)\n', (193728, 193745), False, 'from eight_mile.utils import listify, Offsets, is_sequence, str2bool, get_alibi_slopes\n'), ((207146, 207184), 'os.getenv', 'os.getenv', (['"""MEAD_PYTORCH_TIMER"""', '(False)'], {}), "('MEAD_PYTORCH_TIMER', False)\n", (207155, 207184), False, 'import os\n'), ((207267, 207303), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (207283, 207303), False, 'import torch\n'), ((207328, 207364), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (207344, 207364), False, 'import torch\n'), ((207647, 207671), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (207669, 207671), False, 'import torch\n'), ((208106, 208129), 'torch.dot', 'torch.dot', (['loss', 'weight'], {}), '(loss, weight)\n', (208115, 208129), False, 'import torch\n'), ((208546, 208570), 'torch.dot', 'torch.dot', (['loss', 'weights'], {}), '(loss, weights)\n', (208555, 208570), False, 'import torch\n'), ((2406, 2443), 'torch.exp', 'torch.exp', (['(vec - max_scores_broadcast)'], {}), '(vec - max_scores_broadcast)\n', (2415, 2443), False, 'import torch\n'), ((23252, 23301), 'torch.nn.Conv1d', 'nn.Conv1d', (['insz', 'outsz_filts[i]', 'fsz'], {'padding': 'pad'}), '(insz, outsz_filts[i], fsz, padding=pad)\n', (23261, 23301), True, 'import torch.nn as nn\n'), ((25876, 25909), 'torch.nn.init.kaiming_uniform', 'nn.init.kaiming_uniform', (['l.weight'], {}), '(l.weight)\n', (25899, 25909), True, 'import torch.nn as nn\n'), ((25928, 25961), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['l.weight'], {}), '(l.weight)\n', (25951, 25961), True, 'import torch.nn as nn\n'), ((26413, 26481), 'torch.nn.LSTMCell', 'nn.LSTMCell', ([], {'input_size': 'input_size', 'hidden_size': 'rnn_size', 'bias': '(False)'}), '(input_size=input_size, hidden_size=rnn_size, bias=False)\n', (26424, 26481), True, 'import torch.nn as nn\n'), ((27670, 27725), 'torch.nn.GRUCell', 'nn.GRUCell', ([], {'input_size': 'input_size', 'hidden_size': 'rnn_size'}), '(input_size=input_size, hidden_size=rnn_size)\n', (27680, 27725), True, 'import torch.nn as nn\n'), ((34590, 34631), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['rnn.weight_hh_l0'], {}), '(rnn.weight_hh_l0)\n', (34613, 34631), True, 'import torch.nn as nn\n'), ((34640, 34681), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['rnn.weight_ih_l0'], {}), '(rnn.weight_ih_l0)\n', (34663, 34681), True, 'import torch.nn as nn\n'), ((37309, 37355), 'torch.nn.init.kaiming_uniform', 'nn.init.kaiming_uniform', (['self.rnn.weight_hh_l0'], {}), '(self.rnn.weight_hh_l0)\n', (37332, 37355), True, 'import torch.nn as nn\n'), ((37368, 37414), 'torch.nn.init.kaiming_uniform', 'nn.init.kaiming_uniform', (['self.rnn.weight_ih_l0'], {}), '(self.rnn.weight_ih_l0)\n', (37391, 37414), True, 'import torch.nn as nn\n'), ((41699, 41745), 'torch.nn.init.kaiming_uniform', 'nn.init.kaiming_uniform', (['self.rnn.weight_hh_l0'], {}), '(self.rnn.weight_hh_l0)\n', (41722, 41745), True, 'import torch.nn as nn\n'), ((41758, 41804), 'torch.nn.init.kaiming_uniform', 'nn.init.kaiming_uniform', (['self.rnn.weight_ih_l0'], {}), '(self.rnn.weight_ih_l0)\n', (41781, 41804), True, 'import torch.nn as nn\n'), ((48283, 48329), 'torch.nn.init.kaiming_uniform', 'nn.init.kaiming_uniform', (['self.rnn.weight_hh_l0'], {}), '(self.rnn.weight_hh_l0)\n', (48306, 48329), True, 'import torch.nn as nn\n'), ((48342, 48388), 'torch.nn.init.kaiming_uniform', 'nn.init.kaiming_uniform', (['self.rnn.weight_ih_l0'], {}), '(self.rnn.weight_ih_l0)\n', (48365, 48388), True, 'import torch.nn as nn\n'), ((56424, 56471), 'torch.nn.init.kaiming_uniform_', 'nn.init.kaiming_uniform_', (['self.rnn.weight_ih_l0'], {}), '(self.rnn.weight_ih_l0)\n', (56448, 56471), True, 'import torch.nn as nn\n'), ((56484, 56531), 'torch.nn.init.kaiming_uniform_', 'nn.init.kaiming_uniform_', (['self.rnn.weight_hh_l0'], {}), '(self.rnn.weight_hh_l0)\n', (56508, 56531), True, 'import torch.nn as nn\n'), ((63347, 63393), 'torch.nn.init.kaiming_uniform', 'nn.init.kaiming_uniform', (['self.rnn.weight_hh_l0'], {}), '(self.rnn.weight_hh_l0)\n', (63370, 63393), True, 'import torch.nn as nn\n'), ((63406, 63452), 'torch.nn.init.kaiming_uniform', 'nn.init.kaiming_uniform', (['self.rnn.weight_ih_l0'], {}), '(self.rnn.weight_ih_l0)\n', (63429, 63452), True, 'import torch.nn as nn\n'), ((76482, 76506), 'torch.bmm', 'torch.bmm', (['a', 'values_bth'], {}), '(a, values_bth)\n', (76491, 76506), False, 'import torch\n'), ((108859, 108878), 'numpy.ones', 'np.ones', (['attn_shape'], {}), '(attn_shape)\n', (108866, 108878), True, 'import numpy as np\n'), ((115391, 115408), 'torch.arange', 'torch.arange', (['T_k'], {}), '(T_k)\n', (115403, 115408), False, 'import torch\n'), ((115446, 115463), 'torch.arange', 'torch.arange', (['T_q'], {}), '(T_q)\n', (115458, 115463), False, 'import torch\n'), ((119776, 119793), 'torch.arange', 'torch.arange', (['T_k'], {}), '(T_k)\n', (119788, 119793), False, 'import torch\n'), ((119831, 119848), 'torch.arange', 'torch.arange', (['T_q'], {}), '(T_q)\n', (119843, 119848), False, 'import torch\n'), ((138565, 138584), 'torch.arange', 'torch.arange', (['q_len'], {}), '(q_len)\n', (138577, 138584), False, 'import torch\n'), ((138612, 138631), 'torch.arange', 'torch.arange', (['k_len'], {}), '(k_len)\n', (138624, 138631), False, 'import torch\n'), ((139087, 139111), 'torch.arange', 'torch.arange', (['window_len'], {}), '(window_len)\n', (139099, 139111), False, 'import torch\n'), ((150298, 150339), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {'eps': 'layer_norm_eps'}), '(d_model, eps=layer_norm_eps)\n', (150310, 150339), True, 'import torch.nn as nn\n'), ((150731, 150772), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {'eps': 'layer_norm_eps'}), '(d_model, eps=layer_norm_eps)\n', (150743, 150772), True, 'import torch.nn as nn\n'), ((156861, 156902), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {'eps': 'layer_norm_eps'}), '(d_model, eps=layer_norm_eps)\n', (156873, 156902), True, 'import torch.nn as nn\n'), ((157293, 157334), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {'eps': 'layer_norm_eps'}), '(d_model, eps=layer_norm_eps)\n', (157305, 157334), True, 'import torch.nn as nn\n'), ((168152, 168227), 'torch.full', 'torch.full', (['(bsz, self.K, 1)', 'Offsets.EOS'], {'device': 'device', 'dtype': 'paths.dtype'}), '((bsz, self.K, 1), Offsets.EOS, device=device, dtype=paths.dtype)\n', (168162, 168227), False, 'import torch\n'), ((168252, 168289), 'torch.cat', 'torch.cat', (['[paths, end_tokens]'], {'dim': '(2)'}), '([paths, end_tokens], dim=2)\n', (168261, 168289), False, 'import torch\n'), ((169109, 169135), 'os.remove', 'os.remove', (['checkpoint_name'], {}), '(checkpoint_name)\n', (169118, 169135), False, 'import os\n'), ((170626, 170683), 'eight_mile.pytorch.serialize.save_transformer_de_npz', 'save_transformer_de_npz', (['model_', "(checkpoint_name + '.npz')"], {}), "(model_, checkpoint_name + '.npz')\n", (170649, 170683), False, 'from eight_mile.pytorch.serialize import save_tlm_npz, save_tlm_output_npz, save_transformer_seq2seq_npz, save_transformer_de_npz\n'), ((182137, 182165), 'torch.nn.init.constant_', 'nn.init.constant_', (['c.bias', '(0)'], {}), '(c.bias, 0)\n', (182154, 182165), True, 'import torch.nn as nn\n'), ((182232, 182266), 'torch.nn.init.kaiming_uniform_', 'nn.init.kaiming_uniform_', (['c.weight'], {}), '(c.weight)\n', (182256, 182266), True, 'import torch.nn as nn\n'), ((194342, 194355), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (194353, 194355), True, 'import torch.nn as nn\n'), ((194379, 194392), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (194390, 194392), True, 'import torch.nn as nn\n'), ((201718, 201733), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (201731, 201733), False, 'import torch\n'), ((201754, 201776), 'contextlib.ExitStack', 'contextlib.ExitStack', ([], {}), '()\n', (201774, 201776), False, 'import contextlib\n'), ((202161, 202176), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (202174, 202176), False, 'import torch\n'), ((202197, 202219), 'contextlib.ExitStack', 'contextlib.ExitStack', ([], {}), '()\n', (202217, 202219), False, 'import contextlib\n'), ((203628, 203659), 'torch.nn.Linear', 'nn.Linear', (['(2 * d_model)', 'd_model'], {}), '(2 * d_model, d_model)\n', (203637, 203659), True, 'import torch.nn as nn\n'), ((206062, 206077), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (206075, 206077), False, 'import torch\n'), ((206098, 206120), 'contextlib.ExitStack', 'contextlib.ExitStack', ([], {}), '()\n', (206118, 206120), False, 'import contextlib\n'), ((206504, 206519), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (206517, 206519), False, 'import torch\n'), ((206540, 206562), 'contextlib.ExitStack', 'contextlib.ExitStack', ([], {}), '()\n', (206560, 206562), False, 'import contextlib\n'), ((207409, 207433), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (207431, 207433), False, 'import torch\n'), ((962, 988), 'torch.arange', 'torch.arange', (['(0)', 'max_len_v'], {}), '(0, max_len_v)\n', (974, 988), False, 'import torch\n'), ((1652, 1678), 'torch.arange', 'torch.arange', (['(0)', 'max_len_v'], {}), '(0, max_len_v)\n', (1664, 1678), False, 'import torch\n'), ((19260, 19291), 'copy.deepcopy', 'copy.deepcopy', (['subsequent_layer'], {}), '(subsequent_layer)\n', (19273, 19291), False, 'import copy\n'), ((37563, 37609), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.rnn.weight_hh_l0'], {}), '(self.rnn.weight_hh_l0)\n', (37586, 37609), True, 'import torch.nn as nn\n'), ((37622, 37668), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.rnn.weight_ih_l0'], {}), '(self.rnn.weight_ih_l0)\n', (37645, 37668), True, 'import torch.nn as nn\n'), ((41953, 41999), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.rnn.weight_hh_l0'], {}), '(self.rnn.weight_hh_l0)\n', (41976, 41999), True, 'import torch.nn as nn\n'), ((42012, 42058), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.rnn.weight_ih_l0'], {}), '(self.rnn.weight_ih_l0)\n', (42035, 42058), True, 'import torch.nn as nn\n'), ((48537, 48583), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.rnn.weight_hh_l0'], {}), '(self.rnn.weight_hh_l0)\n', (48560, 48583), True, 'import torch.nn as nn\n'), ((48596, 48642), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.rnn.weight_ih_l0'], {}), '(self.rnn.weight_ih_l0)\n', (48619, 48642), True, 'import torch.nn as nn\n'), ((56680, 56726), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.rnn.weight_ih_l0'], {}), '(self.rnn.weight_ih_l0)\n', (56703, 56726), True, 'import torch.nn as nn\n'), ((56739, 56785), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.rnn.weight_hh_l0'], {}), '(self.rnn.weight_hh_l0)\n', (56762, 56785), True, 'import torch.nn as nn\n'), ((63601, 63647), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.rnn.weight_hh_l0'], {}), '(self.rnn.weight_hh_l0)\n', (63624, 63647), True, 'import torch.nn as nn\n'), ((63660, 63706), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.rnn.weight_ih_l0'], {}), '(self.rnn.weight_ih_l0)\n', (63683, 63706), True, 'import torch.nn as nn\n'), ((75101, 75138), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['hsz'], {'eps': 'layer_norm_eps'}), '(hsz, eps=layer_norm_eps)\n', (75113, 75138), True, 'import torch.nn as nn\n'), ((99195, 99240), 'torch.Tensor', 'torch.Tensor', (['(1)', 'self.num_tags', 'self.num_tags'], {}), '(1, self.num_tags, self.num_tags)\n', (99207, 99240), False, 'import torch\n'), ((114118, 114130), 'torch.abs', 'torch.abs', (['n'], {}), '(n)\n', (114127, 114130), False, 'import torch\n'), ((114182, 114201), 'torch.maximum', 'torch.maximum', (['n', '(0)'], {}), '(n, 0)\n', (114195, 114201), False, 'import torch\n'), ((118541, 118553), 'torch.abs', 'torch.abs', (['n'], {}), '(n)\n', (118550, 118553), False, 'import torch\n'), ((118605, 118624), 'torch.maximum', 'torch.maximum', (['n', '(0)'], {}), '(n, 0)\n', (118618, 118624), False, 'import torch\n'), ((159941, 159965), 'torch.ones_like', 'torch.ones_like', (['lengths'], {}), '(lengths)\n', (159956, 159965), False, 'import torch\n'), ((164421, 164481), 'torch.zeros', 'torch.zeros', (['(1, 1, V)'], {'dtype': 'done_mask.dtype', 'device': 'device'}), '((1, 1, V), dtype=done_mask.dtype, device=device)\n', (164432, 164481), False, 'import torch\n'), ((166481, 166531), 'torch.arange', 'torch.arange', (['bsz'], {'dtype': 'torch.long', 'device': 'device'}), '(bsz, dtype=torch.long, device=device)\n', (166493, 166531), False, 'import torch\n'), ((170740, 170793), 'eight_mile.pytorch.serialize.save_tlm_output_npz', 'save_tlm_output_npz', (['model_', "(checkpoint_name + '.npz')"], {}), "(model_, checkpoint_name + '.npz')\n", (170759, 170793), False, 'from eight_mile.pytorch.serialize import save_tlm_npz, save_tlm_output_npz, save_transformer_seq2seq_npz, save_transformer_de_npz\n'), ((170818, 170864), 'eight_mile.pytorch.serialize.save_tlm_npz', 'save_tlm_npz', (['model_', "(checkpoint_name + '.npz')"], {}), "(model_, checkpoint_name + '.npz')\n", (170830, 170864), False, 'from eight_mile.pytorch.serialize import save_tlm_npz, save_tlm_output_npz, save_transformer_seq2seq_npz, save_transformer_de_npz\n'), ((180261, 180292), 'torch.sum', 'torch.sum', (['flat_targets'], {'axis': '(1)'}), '(flat_targets, axis=1)\n', (180270, 180292), False, 'import torch\n'), ((182296, 182324), 'torch.nn.init.constant_', 'nn.init.constant_', (['c.bias', '(0)'], {}), '(c.bias, 0)\n', (182313, 182324), True, 'import torch.nn as nn\n'), ((182367, 182399), 'torch.nn.init.normal', 'nn.init.normal', ([], {'mean': '(0)', 'std': 'unif'}), '(mean=0, std=unif)\n', (182381, 182399), True, 'import torch.nn as nn\n'), ((182476, 182509), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['c.weight'], {}), '(c.weight)\n', (182499, 182509), True, 'import torch.nn as nn\n'), ((185439, 185454), 'torch.tensor', 'torch.tensor', (['t'], {}), '(t)\n', (185451, 185454), False, 'import torch\n'), ((186188, 186203), 'torch.tensor', 'torch.tensor', (['t'], {}), '(t)\n', (186200, 186203), False, 'import torch\n'), ((203887, 203918), 'torch.nn.Linear', 'nn.Linear', (['(2 * d_model)', 'd_model'], {}), '(2 * d_model, d_model)\n', (203896, 203918), True, 'import torch.nn as nn\n'), ((12140, 12164), 'math.sqrt', 'math.sqrt', (['(2.0 / math.pi)'], {}), '(2.0 / math.pi)\n', (12149, 12164), False, 'import math\n'), ((95881, 95915), 'torch.zeros', 'torch.zeros', (['constraint_mask.shape'], {}), '(constraint_mask.shape)\n', (95892, 95915), False, 'import torch\n'), ((114052, 114066), 'torch.lt', 'torch.lt', (['n', '(0)'], {}), '(n, 0)\n', (114060, 114066), False, 'import torch\n'), ((118475, 118489), 'torch.lt', 'torch.lt', (['n', '(0)'], {}), '(n, 0)\n', (118483, 118489), False, 'import torch\n'), ((168340, 168364), 'torch.ones_like', 'torch.ones_like', (['lengths'], {}), '(lengths)\n', (168355, 168364), False, 'import torch\n'), ((168428, 168452), 'torch.ones_like', 'torch.ones_like', (['lengths'], {}), '(lengths)\n', (168443, 168452), False, 'import torch\n'), ((182429, 182457), 'torch.nn.init.constant_', 'nn.init.constant_', (['c.bias', '(0)'], {}), '(c.bias, 0)\n', (182446, 182457), True, 'import torch.nn as nn\n'), ((182539, 182567), 'torch.nn.init.constant_', 'nn.init.constant_', (['c.bias', '(0)'], {}), '(c.bias, 0)\n', (182556, 182567), True, 'import torch.nn as nn\n'), ((190773, 190801), 'torch.nn.functional.relu', 'F.relu', (['(0.5 - negative_pairs)'], {}), '(0.5 - negative_pairs)\n', (190779, 190801), True, 'import torch.nn.functional as F\n'), ((204146, 204177), 'torch.nn.Linear', 'nn.Linear', (['(2 * d_model)', 'd_model'], {}), '(2 * d_model, d_model)\n', (204155, 204177), True, 'import torch.nn as nn\n'), ((207752, 207763), 'os.getpid', 'os.getpid', ([], {}), '()\n', (207761, 207763), False, 'import os\n'), ((9916, 9944), 'torch.unsqueeze', 'torch.unsqueeze', (['lengths', '(-1)'], {}), '(lengths, -1)\n', (9931, 9944), False, 'import torch\n'), ((114456, 114495), 'math.log', 'math.log', (['(self.max_distance / max_exact)'], {}), '(self.max_distance / max_exact)\n', (114464, 114495), False, 'import math\n'), ((118887, 118926), 'math.log', 'math.log', (['(self.max_distance / max_exact)'], {}), '(self.max_distance / max_exact)\n', (118895, 118926), False, 'import math\n'), ((174845, 174877), 'torch.unsqueeze', 'torch.unsqueeze', (['seq_lengths', '(-1)'], {}), '(seq_lengths, -1)\n', (174860, 174877), False, 'import torch\n'), ((12187, 12208), 'torch.pow', 'torch.pow', (['input', '(3.0)'], {}), '(input, 3.0)\n', (12196, 12208), False, 'import torch\n'), ((178918, 178973), 'torch.zeros', 'torch.zeros', (['x.shape'], {'device': 'x.device', 'dtype': 'torch.long'}), '(x.shape, device=x.device, dtype=torch.long)\n', (178929, 178973), False, 'import torch\n'), ((112508, 112525), 'torch.arange', 'torch.arange', (['T_q'], {}), '(T_q)\n', (112520, 112525), False, 'import torch\n'), ((112540, 112557), 'torch.arange', 'torch.arange', (['T_k'], {}), '(T_k)\n', (112552, 112557), False, 'import torch\n'), ((116985, 117002), 'torch.arange', 'torch.arange', (['T_q'], {}), '(T_q)\n', (116997, 117002), False, 'import torch\n'), ((117017, 117034), 'torch.arange', 'torch.arange', (['T_k'], {}), '(T_k)\n', (117029, 117034), False, 'import torch\n')] |
flother/pdf-search | setup.py | fa4c519a673bf5a5d25e1ab44e971690ab3cf781 | from setuptools import setup
setup(
name='espdf',
version='0.1.0-dev',
url='https://github.com/flother/pdf-search',
py_modules=(
'espdf',
),
install_requires=(
'certifi',
'elasticsearch-dsl',
),
entry_points={
'console_scripts': (
'espdf=espdf:cli',
),
},
)
| [((31, 260), 'setuptools.setup', 'setup', ([], {'name': '"""espdf"""', 'version': '"""0.1.0-dev"""', 'url': '"""https://github.com/flother/pdf-search"""', 'py_modules': "('espdf',)", 'install_requires': "('certifi', 'elasticsearch-dsl')", 'entry_points': "{'console_scripts': ('espdf=espdf:cli',)}"}), "(name='espdf', version='0.1.0-dev', url=\n 'https://github.com/flother/pdf-search', py_modules=('espdf',),\n install_requires=('certifi', 'elasticsearch-dsl'), entry_points={\n 'console_scripts': ('espdf=espdf:cli',)})\n", (36, 260), False, 'from setuptools import setup\n')] |
martexcoin/pywallet | pywallet/network.py | dca53f124452869890b0247c40afba821b650c6b | class BitcoinGoldMainNet(object):
"""Bitcoin Gold MainNet version bytes. """
NAME = "Bitcoin Gold Main Net"
COIN = "BTG"
SCRIPT_ADDRESS = 0x17 # int(0x17) = 23
PUBKEY_ADDRESS = 0x26 # int(0x26) = 38 # Used to create payment addresses
SECRET_KEY = 0x80 # int(0x80) = 128 # Used for WIF format
EXT_PUBLIC_KEY = 0x0488b21E # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x0488ADE4 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/0'/0'/"
class BitcoinCashMainNet(object):
"""Bitcoin Cash MainNet version bytes."""
NAME = "Bitcoin Cash Main Net"
COIN = "BCH"
SCRIPT_ADDRESS = 0x28 # int(0x28) = 40
PUBKEY_ADDRESS = 0x1C # int(0x00) = 28 # Used to create payment addresses
SECRET_KEY = 0x80 # int(0x80) = 128 # Used for WIF format
EXT_PUBLIC_KEY = 0x0488b21E # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x0488ADE4 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/145'/0'/"
class DashMainNet(object):
"""Dash MainNet version bytes."""
NAME = "Dash Main Net"
COIN = "DASH"
SCRIPT_ADDRESS = 0x10 # int(0x10) = 16
PUBKEY_ADDRESS = 0x4C # int(0x4C) = 76 # Used to create payment addresses
SECRET_KEY = 0xCC # int(0xCC) = 204 # Used for WIF format
EXT_PUBLIC_KEY = 0X0488B21E # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0X0488ADE4 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/5'/0'/"
class DashTestNet(object):
"""Dash TestNet version bytes."""
NAME = "Dash Test Net"
COIN = "DASH"
SCRIPT_ADDRESS = 0x13 # int(0x13) = 19
PUBKEY_ADDRESS = 0x8C # int(0x8C) = 140 # Used to create payment addresses
SECRET_KEY = 0xEF # int(0xEF) = 239 # Used for WIF format
EXT_PUBLIC_KEY = 0x043587CF # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x04358394 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/1'/0'/"
class MarteXMainNet(object):
"""MarteX MainNet version bytes."""
NAME = "MarteX Main Net"
COIN = "MXT"
SCRIPT_ADDRESS = 0x05 # int(0x05) = 05
PUBKEY_ADDRESS = 0x32 # int(0x32) = 50 # Used to create payment addresses
SECRET_KEY = 0xB2 # int(0xB2) = 178 # Used for WIF format
EXT_PUBLIC_KEY = 0X0488B21E # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0X0488ADE4 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/180'/0'/"
class MarteXTestNet(object):
"""MarteX TestNet version bytes."""
NAME = "MarteX Test Net"
COIN = "MXT"
SCRIPT_ADDRESS = 0xC4 # int(0xC4) = 196
PUBKEY_ADDRESS = 0x6C # int(0x6F) = 111 # Used to create payment addresses
SECRET_KEY = 0x144 # int(0x144) = 324 # Used for WIF format
EXT_PUBLIC_KEY = 0x043587CF # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x04358394 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/1'/0'/"
class OmniMainNet(object):
"""Bitcoin MainNet version bytes.
From https://github.com/OmniLayer/omnicore/blob/develop/src/chainparams.cpp
"""
NAME = "Omni Main Net"
COIN = "USDT"
SCRIPT_ADDRESS = 0x00 # int(0x00) = 0
PUBKEY_ADDRESS = 0x05 # int(0x05) = 5 # Used to create payment addresses
SECRET_KEY = 0x80 # int(0x80) = 128 # Used for WIF format
EXT_PUBLIC_KEY = 0x0488B21E # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x0488ADE4 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/0'/0'/"
class OmniTestNet(object):
"""Bitcoin MainNet version bytes.
From https://github.com/OmniLayer/omnicore/blob/develop/src/chainparams.cpp
"""
NAME = "Omni Test Net"
COIN = "USDT"
SCRIPT_ADDRESS = 0x6f # int(0x6f) = 111
PUBKEY_ADDRESS = 0xc4 # int(0xc4) = 196 # Used to create payment addresses
SECRET_KEY = 0xef # int(0xef) = 239 # Used for WIF format
EXT_PUBLIC_KEY = 0x043587CF # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x04358394 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/0'/0'/"
class BitcoinMainNet(object):
"""Bitcoin MainNet version bytes.
From https://github.com/bitcoin/bitcoin/blob/v0.9.0rc1/src/chainparams.cpp
"""
NAME = "Bitcoin Main Net"
COIN = "BTC"
SCRIPT_ADDRESS = 0x05 # int(0x05) = 5
PUBKEY_ADDRESS = 0x00 # int(0x00) = 0 # Used to create payment addresses
SECRET_KEY = 0x80 # int(0x80) = 128 # Used for WIF format
EXT_PUBLIC_KEY = 0x0488B21E # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x0488ADE4 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/0'/0'/"
class FeathercoinMainNet(object):
"""Feathercoin MainNet version bytes.
From https://github.com/FeatherCoin/Feathercoin/blob/master-0.13/src/chainparams.cpp
"""
NAME = "Feathercoin Main Net"
COIN = "FTC"
SCRIPT_ADDRESS = 0x05 # int(0x05) = 5
PUBKEY_ADDRESS = 0x0E # int(0x0E) = 14 # Used to create payment addresses
SECRET_KEY = 0x8E # int(0x8E) = 142 # Used for WIF format
EXT_PUBLIC_KEY = 0x0488BC26 # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x0488DAEE # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/4'/0'/"
class BitcoinTestNet(object):
"""Bitcoin TestNet version bytes.
From https://github.com/bitcoin/bitcoin/blob/v0.9.0rc1/src/chainparams.cpp
"""
NAME = "Bitcoin Test Net"
COIN = "BTC"
SCRIPT_ADDRESS = 0xc4 # int(0xc4) = 196
PUBKEY_ADDRESS = 0x6f # int(0x6f) = 111
SECRET_KEY = 0xEF # int(0xef) = 239
EXT_PUBLIC_KEY = 0x043587CF
EXT_SECRET_KEY = 0x04358394
BIP32_PATH = "m/44'/1'/0'/"
class LitecoinMainNet(object):
"""Litecoin MainNet version bytes
Primary version bytes from:
https://github.com/litecoin-project/litecoin/blob/master-0.8/src/base58.h
Unofficial extended version bytes from
https://bitcointalk.org/index.php?topic=453395.0
"""
NAME = "Litecoin Main Net"
COIN = "LTC"
SCRIPT_ADDRESS = 0x05 # int(0x05) = 5
PUBKEY_ADDRESS = 0x30 # int(0x30) = 48
SECRET_KEY = PUBKEY_ADDRESS + 128 # = int(0xb0) = 176
# Unofficial extended version bytes taken from
# https://bitcointalk.org/index.php?topic=453395.0
# EXT_PUBLIC_KEY = 0x019da462
# EXT_SECRET_KEY = 0x019d9cfe
# same as Bitcoin's
# https://github.com/ranaroussi/pywallet/issues/6
EXT_PUBLIC_KEY = 0x0488B21E
EXT_SECRET_KEY = 0x0488ADE4
BIP32_PATH = "m/44'/2'/0'/"
class LitecoinTestNet(object):
"""Litecoin TestNet version bytes
Primary version bytes from:
https://github.com/litecoin-project/litecoin/blob/master-0.8/src/base58.h
Unofficial extended version bytes from
https://bitcointalk.org/index.php?topic=453395.0
"""
NAME = "Litecoin Test Net"
COIN = "LTC"
SCRIPT_ADDRESS = 0xc4 # int(0xc4) = 196
PUBKEY_ADDRESS = 0x6f # int(0x6f) = 111
SECRET_KEY = PUBKEY_ADDRESS + 128 # = int(0xef) = 239
# Unofficial extended version bytes taken from
# https://bitcointalk.org/index.php?topic=453395.0
# EXT_PUBLIC_KEY = 0x0436f6e1
# EXT_SECRET_KEY = 0x0436ef7d
# same as Bitcoin's
# https://github.com/ranaroussi/pywallet/issues/6
EXT_PUBLIC_KEY = 0x043587CF
EXT_SECRET_KEY = 0x04358394
BIP32_PATH = "m/44'/1'/0'/"
class DogecoinMainNet(object):
"""Dogecoin MainNet version bytes
Primary version bytes from:
https://github.com/dogecoin/dogecoin/blob/1.5.2/src/base58.h
Unofficial extended version bytes from
https://bitcointalk.org/index.php?topic=409731
"""
NAME = "Dogecoin Main Net"
COIN = "DOGE"
SCRIPT_ADDRESS = 0x16 # int(0x16) = 22
PUBKEY_ADDRESS = 0x1e # int(0x1e) = 30
SECRET_KEY = PUBKEY_ADDRESS + 128 # int(0x9e) = 158
# Unofficial extended version bytes taken from
# https://bitcointalk.org/index.php?topic=409731
EXT_PUBLIC_KEY = 0x02facafd
EXT_SECRET_KEY = 0x02fac398
BIP32_PATH = "m/44'/3'/0'/"
class DogecoinTestNet(object):
"""Dogecoin TestNet version bytes
Primary version bytes from:
https://github.com/dogecoin/dogecoin/blob/1.5.2/src/base58.h
Unofficial extended version bytes from
https://bitcointalk.org/index.php?topic=409731
"""
NAME = "Dogecoin Test Net"
COIN = "DOGE"
SCRIPT_ADDRESS = 0xc4 # int(0xc4) = 196
PUBKEY_ADDRESS = 0x71 # int(0x71) = 113
SECRET_KEY = PUBKEY_ADDRESS + 128 # int(0xf1) = 241
# Unofficial extended version bytes taken from
# https://bitcointalk.org/index.php?topic=409731
EXT_PUBLIC_KEY = 0x0432a9a8
EXT_SECRET_KEY = 0x0432a243
BIP32_PATH = "m/44'/1'/0'/"
class BlockCypherTestNet(object):
"""BlockCypher TestNet version bytes.
From http://dev.blockcypher.com/#testing
"""
NAME = "BlockCypher Test Net"
COIN = "BlockCypher"
SCRIPT_ADDRESS = 0x1f # int(0x1f) = 31
PUBKEY_ADDRESS = 0x1b # int(0x1b) = 27 # Used to create payment addresses
SECRET_KEY = 0x49 # int(0x49) = 73 # Used for WIF format
EXT_PUBLIC_KEY = 0x2d413ff # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x2d40fc3 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/1'/0'/"
class QtumMainNet(object):
"""Qtum MainNet version bytes
Primary version bytes from:
https://github.com/qtumproject/qtum/blob/master/src/chainparams.cpp
"""
NAME = "Qtum Main Net"
COIN = "QTUM"
SCRIPT_ADDRESS = 0x32 # int(0x32) = 50
PUBKEY_ADDRESS = 0x3A # int(0x3A) = 58 # Used to create payment addresses
SECRET_KEY = 0x80 # int(0x80) = 128 # Used for WIF format
EXT_PUBLIC_KEY = 0x0488B21E # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x0488ADE4 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/88'/0'/"
class QtumTestNet(object):
"""Qtum TestNet version bytes
Primary version bytes from:
https://github.com/qtumproject/qtum/blob/master/src/chainparams.cpp
"""
NAME = "Qtum Test Net"
COIN = "QTUM"
SCRIPT_ADDRESS = 0x6E # int(0x6e) = 110
PUBKEY_ADDRESS = 0x78 # int(0x78) = 120
SECRET_KEY = 0xEF # int(0xef) = 239
EXT_PUBLIC_KEY = 0x043587CF
EXT_SECRET_KEY = 0x04358394
BIP32_PATH = "m/44'/88'/0'/"
| [] |
sintef-ocean/conan-clapack | conanfile.py | 9c472130eaadee71253ced9b5fe25ee1b868bcb3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, CMake, tools
import shutil
class ClapackConan(ConanFile):
name = "clapack"
version = "3.2.1"
license = "BSD 3-Clause"
# BSD-3-Clause-Clear
url = "https://github.com/sintef-ocean/conan-clapack"
author = "SINTEF Ocean"
homepage = "http://www.netlib.org/clapack/"
description = \
"CLAPACK's goal is to provide LAPACK for someone who does " \
"not have access to a Fortran compiler"
topics = ("clapack", "LAPACK", "Port to C", "Numerical linear algebra")
settings = "os", "compiler", "build_type", "arch"
options = {
"fPIC": [True, False],
}
default_options = {
"fPIC": True,
}
generators = ("cmake_paths", "cmake_find_package")
exports = ["patch/*"]
source_file = "clapack-{}-CMAKE.tgz".format(version)
source_subfolder = source_file[:-4]
build_subfolder = "build_subfolder"
def source(self):
link = "http://www.netlib.org/clapack/" + self.source_file
tools.get(link, sha1="5ea1bcc4314e392bca8b9e5f61d44355cf9f4cc1")
tools.patch(patch_file="patch/MainCMakeLists.patch",
base_path=self.source_subfolder)
tools.patch(patch_file="patch/SRC_CMakeLists.patch",
base_path=self.source_subfolder)
tools.patch(patch_file="patch/F2C_CMakeLists.patch",
base_path=self.source_subfolder)
tools.patch(patch_file="patch/BLAS_CMakeLists.patch",
base_path=self.source_subfolder)
shutil.move(self.source_subfolder + "/COPYING",
self.source_subfolder + "/LICENSE")
def build(self):
cmake = CMake(self)
if self.settings.os != "Windows":
cmake.definitions['CMAKE_POSITION_INDEPENDENT_CODE'] = self.options.fPIC
cmake.configure(source_folder=self.source_subfolder,
build_folder=self.build_subfolder)
cmake.build()
cmake.install()
def package(self):
self.copy("COPYING", dst="licenses", src=self.source_subfolder,
ignore_case=True, keep_path=False)
def package_info(self):
self.cpp_info.name = 'CLAPACK'
if self.settings.compiler == "Visual Studio":
self.cpp_info.libs = ["libf2c", "blas", "lapack"]
if self.settings.build_type == "Debug":
for i in range(len(self.cpp_info.libs)):
self.cpp_info.libs[i] += 'd'
else:
self.cpp_info.libs = ["lapack", "blas", "f2c"]
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
del self.settings.compiler.libcxx
| [((1058, 1122), 'conans.tools.get', 'tools.get', (['link'], {'sha1': '"""5ea1bcc4314e392bca8b9e5f61d44355cf9f4cc1"""'}), "(link, sha1='5ea1bcc4314e392bca8b9e5f61d44355cf9f4cc1')\n", (1067, 1122), False, 'from conans import ConanFile, CMake, tools\n'), ((1132, 1222), 'conans.tools.patch', 'tools.patch', ([], {'patch_file': '"""patch/MainCMakeLists.patch"""', 'base_path': 'self.source_subfolder'}), "(patch_file='patch/MainCMakeLists.patch', base_path=self.\n source_subfolder)\n", (1143, 1222), False, 'from conans import ConanFile, CMake, tools\n'), ((1246, 1336), 'conans.tools.patch', 'tools.patch', ([], {'patch_file': '"""patch/SRC_CMakeLists.patch"""', 'base_path': 'self.source_subfolder'}), "(patch_file='patch/SRC_CMakeLists.patch', base_path=self.\n source_subfolder)\n", (1257, 1336), False, 'from conans import ConanFile, CMake, tools\n'), ((1360, 1450), 'conans.tools.patch', 'tools.patch', ([], {'patch_file': '"""patch/F2C_CMakeLists.patch"""', 'base_path': 'self.source_subfolder'}), "(patch_file='patch/F2C_CMakeLists.patch', base_path=self.\n source_subfolder)\n", (1371, 1450), False, 'from conans import ConanFile, CMake, tools\n'), ((1474, 1565), 'conans.tools.patch', 'tools.patch', ([], {'patch_file': '"""patch/BLAS_CMakeLists.patch"""', 'base_path': 'self.source_subfolder'}), "(patch_file='patch/BLAS_CMakeLists.patch', base_path=self.\n source_subfolder)\n", (1485, 1565), False, 'from conans import ConanFile, CMake, tools\n'), ((1589, 1676), 'shutil.move', 'shutil.move', (["(self.source_subfolder + '/COPYING')", "(self.source_subfolder + '/LICENSE')"], {}), "(self.source_subfolder + '/COPYING', self.source_subfolder +\n '/LICENSE')\n", (1600, 1676), False, 'import shutil\n'), ((1731, 1742), 'conans.CMake', 'CMake', (['self'], {}), '(self)\n', (1736, 1742), False, 'from conans import ConanFile, CMake, tools\n')] |
ComputerSystemsLaboratory/YaCoS | yacos/algorithm/metaheuristics.py | abd5d3c6e227e5c7a563493f7855ebf58ba3de05 | """
Copyright 2021 Anderson Faustino da Silva.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from dataclasses import dataclass
import pygmo as pg
from yacos.essential import Sequence
from yacos.essential import IO
from yacos.essential import Engine
class Pygmo:
"""A Pygmo's strategy."""
__version__ = '1.0.0'
__flags = None
# {key: {'goal': float,
# 'seq': list}}
__results = None
# SGA
# {gen = {'fevals': int,
# 'best': float,
# 'improvement': float}}
#
# PSO
# {gen: {'fevals': int,
# 'gbest': float,
# 'meanvel': float,
# 'meanlbest': float,
# 'avgdist': float}
__log = None
class Problem:
"""Pygmo's problem."""
def __init__(self,
first_key,
last_key,
passes_dict,
dimension,
goal,
compiler,
benchmark_directory,
working_set,
times,
tool,
verify_output):
"""Construct a Pygmo problem.
Parameters
----------
first_key : int
The index of the first pass.
last_key : int
The index of the last pass.
passes_dict : dict
The dictionary with the available passes.
dimension : int
The length of a sequence.
goal : str
compiler : str
benchmark_directory : str
working_set : int
times: int
tool: str
Execution tool
verify_output: bool
The goal is valid only if the execution status is OK.
"""
self.first_key = first_key
self.last_key = last_key
self.passes_dict = passes_dict
self.dimension = dimension
self.goal = goal
self.compiler = compiler
self.benchmark_directory = benchmark_directory
self.working_set = working_set
self.times = times
self.tool = tool
self.verify_output = verify_output
def __deepcopy__(self,
*args,
**kwargs):
"""Deeep copy."""
return self
def fitness(self,
sequence):
"""Calculate and return the fitness."""
sequence = Sequence.fix_index(list(sequence))
sequence = Sequence.sanitize(sequence)
sequence = Sequence.index_pass_to_list(sequence,
self.passes_dict)
goal_value = Engine.evaluate(self.goal,
Sequence.name_pass_to_string(
sequence
),
self.compiler,
self.benchmark_directory,
self.working_set,
self.times,
self.tool,
self.verify_output)
return [goal_value]
def get_nix(self):
"""Integer dimension of the problem."""
return self.dimension
def get_bounds(self):
"""Box-bounds."""
return ([self.first_key] * self.dimension,
[self.last_key] * self.dimension)
def get_name(self):
"""Problem name."""
return 'Optimization Selection'
def get_extra_info(self):
"""Info."""
return '\tDimensions: ' + str(self.dimension)
@dataclass
class PygmoFlags:
"""Pygmo flags.
Parameters
----------
first_key : int
The index of the first pass.
last_key : int
The index of the last pass.
passes_dict : dict
The dictionary with the available passes.
dimension : int
The length of a sequence.
population : int
goals : dict
compiler : str
benchmarks_directory : str
working_set : int
The dataset to execute the benchmark.
times: int
Execution times
tool : str
Execution tool
verify_output: bool
The goal is valid only if the execution status is OK.
"""
first_key: int
last_key: int
passes_dict: dict
dimension: int
population: int
goals: dict
compiler: str
benchmarks_directory: str
working_set: int
times: int
tool: str
verify_output: bool
def __init__(self,
dimension,
population,
passes_filename,
goals,
compiler,
benchmarks_directory,
working_set,
times,
tool,
verify_output):
"""Initialize the arguments.
Parameters
----------
dimension : int
The length of a sequence.
population : int
passes_filename : str
The file that describes the passes to use.
goals : dict
compiler : str
benchmarks_directory : str
working_set : int
The dataset to execute the benchmark.
times: int
Execution times
tool: str
Execution tool
verify_output: bool
The goal is valid only if the execution status is OK.
"""
first_key, last_key, passes_dict = IO.load_passes(passes_filename)
# When the goal is obtained during compile time
# and the working set is not defined during compilation,
# we do not need the working set.
self.__flags = self.PygmoFlags(first_key,
last_key,
passes_dict,
dimension,
population,
goals,
compiler,
benchmarks_directory,
working_set,
times,
tool,
verify_output)
@property
def results(self):
"""Getter."""
return self.__results
@property
def log(self):
"""Getter."""
return self.__log
def exec(self, algorithm, benchmark):
"""Execute the algorithm.
Parameter
---------
algorithm : Pygmo algorithm
benchmark : str
"""
# Step 1: Algorithm
algorithm = pg.algorithm(algorithm)
# algorithm.set_verbosity(1)
# Step 2: Instantiate a pygmo problem
index = benchmark.find('.')
# Benchmark directtory
bench_dir = os.path.join(self.__flags.benchmarks_directory,
benchmark[:index],
benchmark[index+1:])
problem = self.Problem(self.__flags.first_key,
self.__flags.last_key,
self.__flags.passes_dict,
self.__flags.dimension,
self.__flags.goals,
self.__flags.compiler,
bench_dir,
self.__flags.working_set,
self.__flags.times,
self.__flags.tool,
self.__flags.verify_output)
problem = pg.problem(problem)
# Step 3: The initial population
population = pg.population(problem,
self.__flags.population)
# Step 4: Evolve the population
population = algorithm.evolve(population)
# Step 5: Get the results
sga_sequence = population.get_x().tolist()
sga_fitness = population.get_f().tolist()
self.__results = {}
for index in range(self.__flags.population):
sequence = Sequence.index_pass_to_list(sga_sequence[index],
self.__flags.passes_dict)
goal_value = sga_fitness[index][0]
if goal_value == float('inf'):
continue
self.__results[index] = {'seq': sequence,
'goal': goal_value}
# Step 6: Get the log
self.__log = {}
if algorithm.get_name() == 'SGA: Genetic Algorithm':
uda = algorithm.extract(pg.sga)
log = uda.get_log()
for (gen, fevals, best, improvement) in log:
self.__log[gen] = {'fevals': fevals,
'best': best,
'improvement': improvement}
elif algorithm.get_name() == 'PSO: Particle Swarm Optimization':
uda = algorithm.extract(pg.pso)
log = uda.get_log()
for (gen, fevals, gbest, meanvel, meanlbest, avgdist) in log:
self.__log[gen] = {'fevals': fevals,
'gbest': gbest,
'meanvel': meanvel,
'meanlbest': meanlbest,
'avgdist': avgdist}
class SGA(Pygmo):
"""Simple Genetic Algorithm."""
__version__ = '1.0.0'
__flags = None
@dataclass
class Flags:
"""Pygmo flags.
Parameters
----------
generations : int
cr : float
Crossover probability
m : float
Mutation probability
param_m : float
Distribution index (polynomial mutation),
gaussian width (gaussian mutation) or
inactive (uniform mutation)
param_s : float
The number of best individuals to use in “truncated”
selection or the size of the tournament in
tournament selection.
crossover : str
exponential, binomial or single
mutation : str
gaussian, polynomial or uniform
selection : str
tournament or truncated
seed : int
"""
generations: int
cr: float
m: float
param_m: float
param_s: float
crossover: str
mutation: str
selection: str
seed: int
def __init__(self,
generations,
population,
cr,
m,
param_m,
param_s,
crossover,
mutation,
selection,
seed,
dimension,
passes_filename,
goals,
compiler,
benchmarks_directory,
working_set,
times,
tool,
verify_output):
"""Initialize a SGA object.
Parameters
----------
generations : int
population : int
cr : float
Crossover probability
m : float
Mutation probability
param_m : float
Distribution index (polynomial mutation),
gaussian width (gaussian mutation) or
inactive (uniform mutation)
param_s : float
The number of best individuals to use in “truncated”
selection or the size of the tournament in
tournament selection.
crossover : str
exponential, binomial or single
mutation : str
gaussian, polynomial or uniform
selection : str
tournament or truncated
seed : int
dimension : int
The length of a sequence.
passes_filename : str
The file that describes the passes to use.
goals : dict
compiler : str
benchmarks_directory : str
working_set : int
The dataset to execute the benchmark.
times : int
Execution times
tool : str
Execution tool
verify_output: bool
The goal is valid only if the execution status is OK.
"""
self.__flags = self.Flags(generations,
cr,
m,
param_m,
param_s,
crossover,
mutation,
selection,
seed)
super().__init__(dimension,
population,
passes_filename,
goals,
compiler,
benchmarks_directory,
working_set,
times,
tool,
verify_output)
def run(self, benchmark):
"""Execute the algorithm.
Parameter
--------
benchmark: str
"""
if self.__flags.seed is None:
algorithm = pg.sga(gen=self.__flags.generations,
cr=self.__flags.cr,
m=self.__flags.m,
param_m=self.__flags.param_m,
param_s=self.__flags.param_s,
crossover=self.__flags.crossover,
mutation=self.__flags.mutation,
selection=self.__flags.selection)
else:
algorithm = pg.sga(gen=self.__flags.generations,
cr=self.__flags.cr,
m=self.__flags.m,
param_m=self.__flags.param_m,
param_s=self.__flags.param_s,
crossover=self.__flags.crossover,
mutation=self.__flags.mutation,
selection=self.__flags.selection,
seed=self.__flags.seed)
# Execute
super().exec(algorithm, benchmark)
class PSO(Pygmo):
"""Particle Swarm Optimization."""
__version__ = '1.0.0'
__flags = None
@dataclass
class Flags:
"""PSO flags.
Parameters
----------
generations : int
omega : float
Inertia weight (or constriction factor)
eta1 : float
Social component
eta2 : float
Cognitive component
max_vel : float
Maximum allowed particle velocities
(normalized with respect to the bounds width)
variant : int
Algorithmic variant
neighb_type : int
Swarm topology (defining each particle’s neighbours)
neighb_param : int
Topology parameter (defines how many neighbours to consider)
memory : bool
When true the velocities are not reset between successive
calls to the evolve method
seed : int
Seed used by the internal random number generator.
"""
generations: int
omega: float
eta1: float
eta2: float
max_vel: float
variant: int
neighb_type: int
neighb_param: int
memory: bool
seed: int
def __init__(self,
generations,
population,
omega,
eta1,
eta2,
max_vel,
variant,
neighb_type,
neighb_param,
memory,
seed,
dimension,
passes_filename,
goals,
compiler,
benchmarks_directory,
working_set,
times,
tool,
verify_output):
"""Initialize a PSO object.
Parameters
----------
generations : int
population : int
omega : float
Inertia weight (or constriction factor)
eta1 : float
Social component
eta2 : float
Cognitive component
max_vel : float
Maximum allowed particle velocities
(normalized with respect to the bounds width)
variant : int
Algorithmic variant
neighb_type : int
Swarm topology (defining each particle’s neighbours)
neighb_param : int
Topology parameter (defines how many neighbours to consider)
memory : bool
When true the velocities are not reset between successive
calls to the evolve method
seed : int
Seed used by the internal random number generator.
"""
self.__flags = self.Flags(generations,
omega,
eta1,
eta2,
max_vel,
variant,
neighb_type,
neighb_param,
memory,
seed)
super().__init__(dimension,
population,
passes_filename,
goals,
compiler,
benchmarks_directory,
working_set,
times,
tool,
verify_output)
def run(self, benchmark):
"""Execute the algorithm.
Parameter
--------
benchmark : str
"""
if self.__flags.seed:
algorithm = pg.pso(self.__flags.generations,
self.__flags.omega,
self.__flags.eta1,
self.__flags.eta2,
self.__flags.max_vel,
self.__flags.variant,
self.__flags.neighb_type,
self.__flags.neighb_param,
self.__flags.memory,
self.__flags.seed)
else:
algorithm = pg.pso(self.__flags.generations,
self.__flags.omega,
self.__flags.eta1,
self.__flags.eta2,
self.__flags.max_vel,
self.__flags.variant,
self.__flags.neighb_type,
self.__flags.neighb_param,
self.__flags.memory)
# Execute
super().exec(algorithm, benchmark)
| [((6407, 6438), 'yacos.essential.IO.load_passes', 'IO.load_passes', (['passes_filename'], {}), '(passes_filename)\n', (6421, 6438), False, 'from yacos.essential import IO\n'), ((7615, 7638), 'pygmo.algorithm', 'pg.algorithm', (['algorithm'], {}), '(algorithm)\n', (7627, 7638), True, 'import pygmo as pg\n'), ((7811, 7904), 'os.path.join', 'os.path.join', (['self.__flags.benchmarks_directory', 'benchmark[:index]', 'benchmark[index + 1:]'], {}), '(self.__flags.benchmarks_directory, benchmark[:index],\n benchmark[index + 1:])\n', (7823, 7904), False, 'import os\n'), ((8569, 8588), 'pygmo.problem', 'pg.problem', (['problem'], {}), '(problem)\n', (8579, 8588), True, 'import pygmo as pg\n'), ((8652, 8699), 'pygmo.population', 'pg.population', (['problem', 'self.__flags.population'], {}), '(problem, self.__flags.population)\n', (8665, 8699), True, 'import pygmo as pg\n'), ((3127, 3154), 'yacos.essential.Sequence.sanitize', 'Sequence.sanitize', (['sequence'], {}), '(sequence)\n', (3144, 3154), False, 'from yacos.essential import Sequence\n'), ((3178, 3233), 'yacos.essential.Sequence.index_pass_to_list', 'Sequence.index_pass_to_list', (['sequence', 'self.passes_dict'], {}), '(sequence, self.passes_dict)\n', (3205, 3233), False, 'from yacos.essential import Sequence\n'), ((9067, 9141), 'yacos.essential.Sequence.index_pass_to_list', 'Sequence.index_pass_to_list', (['sga_sequence[index]', 'self.__flags.passes_dict'], {}), '(sga_sequence[index], self.__flags.passes_dict)\n', (9094, 9141), False, 'from yacos.essential import Sequence\n'), ((14240, 14488), 'pygmo.sga', 'pg.sga', ([], {'gen': 'self.__flags.generations', 'cr': 'self.__flags.cr', 'm': 'self.__flags.m', 'param_m': 'self.__flags.param_m', 'param_s': 'self.__flags.param_s', 'crossover': 'self.__flags.crossover', 'mutation': 'self.__flags.mutation', 'selection': 'self.__flags.selection'}), '(gen=self.__flags.generations, cr=self.__flags.cr, m=self.__flags.m,\n param_m=self.__flags.param_m, param_s=self.__flags.param_s, crossover=\n self.__flags.crossover, mutation=self.__flags.mutation, selection=self.\n __flags.selection)\n', (14246, 14488), True, 'import pygmo as pg\n'), ((14730, 15002), 'pygmo.sga', 'pg.sga', ([], {'gen': 'self.__flags.generations', 'cr': 'self.__flags.cr', 'm': 'self.__flags.m', 'param_m': 'self.__flags.param_m', 'param_s': 'self.__flags.param_s', 'crossover': 'self.__flags.crossover', 'mutation': 'self.__flags.mutation', 'selection': 'self.__flags.selection', 'seed': 'self.__flags.seed'}), '(gen=self.__flags.generations, cr=self.__flags.cr, m=self.__flags.m,\n param_m=self.__flags.param_m, param_s=self.__flags.param_s, crossover=\n self.__flags.crossover, mutation=self.__flags.mutation, selection=self.\n __flags.selection, seed=self.__flags.seed)\n', (14736, 15002), True, 'import pygmo as pg\n'), ((19014, 19254), 'pygmo.pso', 'pg.pso', (['self.__flags.generations', 'self.__flags.omega', 'self.__flags.eta1', 'self.__flags.eta2', 'self.__flags.max_vel', 'self.__flags.variant', 'self.__flags.neighb_type', 'self.__flags.neighb_param', 'self.__flags.memory', 'self.__flags.seed'], {}), '(self.__flags.generations, self.__flags.omega, self.__flags.eta1,\n self.__flags.eta2, self.__flags.max_vel, self.__flags.variant, self.\n __flags.neighb_type, self.__flags.neighb_param, self.__flags.memory,\n self.__flags.seed)\n', (19020, 19254), True, 'import pygmo as pg\n'), ((19559, 19776), 'pygmo.pso', 'pg.pso', (['self.__flags.generations', 'self.__flags.omega', 'self.__flags.eta1', 'self.__flags.eta2', 'self.__flags.max_vel', 'self.__flags.variant', 'self.__flags.neighb_type', 'self.__flags.neighb_param', 'self.__flags.memory'], {}), '(self.__flags.generations, self.__flags.omega, self.__flags.eta1,\n self.__flags.eta2, self.__flags.max_vel, self.__flags.variant, self.\n __flags.neighb_type, self.__flags.neighb_param, self.__flags.memory)\n', (19565, 19776), True, 'import pygmo as pg\n'), ((3378, 3416), 'yacos.essential.Sequence.name_pass_to_string', 'Sequence.name_pass_to_string', (['sequence'], {}), '(sequence)\n', (3406, 3416), False, 'from yacos.essential import Sequence\n')] |
solnishko-pvs/Modeling_BMSTU | lab_03/main.py | 0ecb82aea23b6726912f72d3230097d7b679eaf9 | import tkinter as tk
from scipy.stats import chi2, chisquare
COLOR = '#dddddd'
COLUMNS_COLOR = '#ffffff'
MAX_SIZE = 10
WIDGET_WIDTH = 25
class LinearCongruent:
m = 2**32
a = 1664525
c = 1013904223
_cur = 1
def next(self):
self._cur = (self.a * self._cur + self.c) % self.m
return self._cur
def khi_krit(arr):
min_ = min(arr)
cnt = [0 for _ in range(max(arr) - min_ + 1)]
for elem in arr:
cnt[elem-min_] += 1
n = sum(cnt)
k = len(cnt)
p = 1 / k
chisq = 0
for j in range(k):
chisq += cnt[j]**2 / p
chisq = chisq / n - n
#print(chisquare(cnt))
return (1 - chi2.cdf(chisq, k)) * 100
def get_10_nums(arr, num):
cnt = 0
res = []
i = 0
while cnt != 10:
if arr[i] > num:
res.append(arr[i])
cnt += 1
i += 1
return res
class file_nums:
def __init__(self):
self.nums = None
with open('nums.txt', 'r') as f:
nums = [list(i.split()) for i in list(f.read().split('\n'))]
self.columns = len(nums)
self.rows = len(nums[0])
self.nums = [[] for _ in range(self.rows)]
for i in range(self.columns):
for j in range(self.rows):
self.nums[j].append(nums[i][j])
self.cur_x = 0
self.cur_y = 0
def next(self):
self.cur_x += 1
if self.cur_x == self.columns:
self.cur_x = 0
self.cur_y += 1
if self.cur_y == self.rows:
self.cur_y = 0
return self.nums[self.cur_y][self.cur_x]
class Block:
def __init__(self, master):
self.frame = tk.LabelFrame(master, bg=COLOR, text='Ввод данных', width=480, height=110)
self.frame.columnconfigure(0, weight=1)
self.frame.rowconfigure(0, weight=1)
self.frame.grid_propagate(False)
self.label_input = tk.Label(self.frame, text='Ваши числа: ', bg=COLOR)
self.entry_numbers = tk.Entry(self.frame, width=WIDGET_WIDTH+10)
self.calculate_custom_result_btn = tk.Button(self.frame, text="Статистика хи-квадрат ваших чисел: ", width=WIDGET_WIDTH+6,
bg=COLOR,
command=self.user_solve)
self.label_result = tk.Label(self.frame, text='', bg=COLOR)
self.calculate_result_btn = tk.Button(self.frame, text="Вычислить для 1000 чисел", width=WIDGET_WIDTH, bg=COLOR, command=self.solve)
self.listbox_frame = tk.LabelFrame(master, text='Матрица', bg=COLOR, width=530, height=200)
self.listbox_frame.grid_propagate(False)
self.result_frame = tk.LabelFrame(master, bg=COLOR, text='Результат', width=510, height=270)
self.result_frame.grid_propagate(False)
self.table_label = tk.Label(self.result_frame, text='Табличный способ', bg=COLOR, bd=3)
self.algorithm_label = tk.Label(self.result_frame, text='Алгоритмический способ', bg=COLOR, bd=3)
self.one_digit_table = tk.Listbox(self.result_frame, selectmode=tk.SINGLE, width=13, bg=COLUMNS_COLOR, height=1)
self.two_digit_table = tk.Listbox(self.result_frame, selectmode=tk.SINGLE, width=13, bg=COLUMNS_COLOR, height=1)
self.three_digit_table = tk.Listbox(self.result_frame, selectmode=tk.SINGLE, width=13, bg=COLUMNS_COLOR, height=1)
self.one_digit_algorithm = tk.Listbox(self.result_frame, selectmode=tk.SINGLE, width=13, bg=COLUMNS_COLOR, height=1)
self.two_digit_algorithm = tk.Listbox(self.result_frame, selectmode=tk.SINGLE, width=13, bg=COLUMNS_COLOR, height=1)
self.three_digit_algorithm = tk.Listbox(self.result_frame, selectmode=tk.SINGLE, width=13, bg=COLUMNS_COLOR, height=1)
self.one_digit_table.insert(tk.END, '1 разряд')
self.two_digit_table.insert(tk.END, '2 разряда')
self.three_digit_table.insert(tk.END, '3 разряда')
self.one_digit_algorithm.insert(tk.END, '1 разряд')
self.two_digit_algorithm.insert(tk.END, '2 разряда')
self.three_digit_algorithm.insert(tk.END, '3 разряда')
self.label_khi = tk.Label(self.result_frame, text='% статистики хи-квадрат', bg=COLOR, bd=3)
self.one_digit_table_khi = tk.Label(self.result_frame, text='', bg=COLOR, bd=3)
self.two_digit_table_khi = tk.Label(self.result_frame, text='', bg=COLOR, bd=3)
self.three_digit_table_khi = tk.Label(self.result_frame, text='', bg=COLOR, bd=3)
self.one_digit_algorithm_khi = tk.Label(self.result_frame, text='', bg=COLOR, bd=3)
self.two_digit_algorithm_khi = tk.Label(self.result_frame, text='', bg=COLOR, bd=3)
self.three_digit_algorithm_khi = tk.Label(self.result_frame, text='', bg=COLOR, bd=3)
self.table_label.grid(row=0, column=0, columnspan=3)
self.algorithm_label.grid(row=0, column=3, columnspan=3)
self.one_digit_table.grid(row=1, column=0, padx=1)
self.two_digit_table.grid(row=1, column=1, padx=1)
self.three_digit_table.grid(row=1, column=2, padx=1)
self.one_digit_algorithm.grid(row=1, column=3, padx=1)
self.two_digit_algorithm.grid(row=1, column=4, padx=1)
self.three_digit_algorithm.grid(row=1, column=5, padx=1)
self.one_digit_table_khi.grid(row=3, column=0, padx=1)
self.two_digit_table_khi.grid(row=3, column=1, padx=1)
self.three_digit_table_khi.grid(row=3, column=2, padx=1)
self.one_digit_algorithm_khi.grid(row=3, column=3, padx=1)
self.two_digit_algorithm_khi.grid(row=3, column=4, padx=1)
self.three_digit_algorithm_khi.grid(row=3, column=5, padx=1)
self.label_khi.grid(row=2, column=0, columnspan=6)
self.label_input.grid(row=0, column=0)
self.entry_numbers.grid(row=0, column=1, padx=10)
self.calculate_custom_result_btn.grid(row=1, column=0, pady=4)
self.label_result.grid(row=1, column=1)
self.calculate_result_btn.grid(row=2, column=0, columnspan=2, pady=2)
self.data = None
self.size = None
self.table_gen = file_nums()
self.listbox_list = [tk.Listbox(self.listbox_frame, selectmode=tk.SINGLE, width=8, bg=COLOR) for _ in range(MAX_SIZE)]
def defocus(self, event):
event.widget.master.focus_set()
def make_view(self):
self.frame.pack()
#self.listbox_frame.pack()
self.result_frame.pack()
def fill_data(self, size):
for i in range(size):
for j in range(size):
self.listbox_list[i].insert(tk.END, self.data[j, i])
def user_solve(self):
inp = self.entry_numbers.get()
try:
x = list(map(int, inp.split()))
self.label_result['text'] = str(round(khi_krit(x), 4)) + '%'
except:
self.label_result['text'] = 'Ошибка ввода!!!'
def solve(self):
alg_arrs = [[int(generator.next()) % j for _ in range(1000)] for j in [10, 100, 1000]]
table_arrs = [[int(self.table_gen.next()[:j]) for _ in range(1000)] for j in [1, 2, 3]]
self.one_digit_algorithm.delete(1, tk.END)
self.two_digit_algorithm.delete(1, tk.END)
self.three_digit_algorithm.delete(1, tk.END)
self.one_digit_algorithm['height'] = 11
self.two_digit_algorithm['height'] = 11
self.three_digit_algorithm['height'] = 11
self.one_digit_table.delete(1, tk.END)
self.two_digit_table.delete(1, tk.END)
self.three_digit_table.delete(1, tk.END)
self.one_digit_table['height'] = 11
self.two_digit_table['height'] = 11
self.three_digit_table['height'] = 11
[self.one_digit_algorithm.insert(tk.END, i) for i in get_10_nums(alg_arrs[0], -1)]
[self.two_digit_algorithm.insert(tk.END, i) for i in get_10_nums(alg_arrs[1], 9)]
[self.three_digit_algorithm.insert(tk.END, i) for i in get_10_nums(alg_arrs[2], 99)]
[self.one_digit_table.insert(tk.END, i) for i in get_10_nums(table_arrs[0], -1)]
[self.two_digit_table.insert(tk.END, i) for i in get_10_nums(table_arrs[1], 9)]
[self.three_digit_table.insert(tk.END, i) for i in get_10_nums(table_arrs[2], 99)]
self.one_digit_algorithm_khi['text'] = str(round(khi_krit(alg_arrs[0]), 4)) + '%'
self.two_digit_algorithm_khi['text'] = str(round(khi_krit(alg_arrs[1]), 4)) + '%'
self.three_digit_algorithm_khi['text'] = str(round(khi_krit(alg_arrs[2]), 4)) + '%'
self.one_digit_table_khi['text'] = str(round(khi_krit(table_arrs[0]), 4)) + '%'
self.two_digit_table_khi['text'] = str(round(khi_krit(table_arrs[1]), 4)) + '%'
self.three_digit_table_khi['text'] = str(round(khi_krit(table_arrs[2]), 4)) + '%'
generator = LinearCongruent()
root = tk.Tk()
root['bg'] = COLOR
root.geometry('540x390')
first_block = Block(root)
first_block.make_view()
root.mainloop()
| [((8804, 8811), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (8809, 8811), True, 'import tkinter as tk\n'), ((1698, 1772), 'tkinter.LabelFrame', 'tk.LabelFrame', (['master'], {'bg': 'COLOR', 'text': '"""Ввод данных"""', 'width': '(480)', 'height': '(110)'}), "(master, bg=COLOR, text='Ввод данных', width=480, height=110)\n", (1711, 1772), True, 'import tkinter as tk\n'), ((1935, 1986), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Ваши числа: """', 'bg': 'COLOR'}), "(self.frame, text='Ваши числа: ', bg=COLOR)\n", (1943, 1986), True, 'import tkinter as tk\n'), ((2016, 2061), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'width': '(WIDGET_WIDTH + 10)'}), '(self.frame, width=WIDGET_WIDTH + 10)\n', (2024, 2061), True, 'import tkinter as tk\n'), ((2103, 2232), 'tkinter.Button', 'tk.Button', (['self.frame'], {'text': '"""Статистика хи-квадрат ваших чисел: """', 'width': '(WIDGET_WIDTH + 6)', 'bg': 'COLOR', 'command': 'self.user_solve'}), "(self.frame, text='Статистика хи-квадрат ваших чисел: ', width=\n WIDGET_WIDTH + 6, bg=COLOR, command=self.user_solve)\n", (2112, 2232), True, 'import tkinter as tk\n'), ((2360, 2399), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '""""""', 'bg': 'COLOR'}), "(self.frame, text='', bg=COLOR)\n", (2368, 2399), True, 'import tkinter as tk\n'), ((2436, 2544), 'tkinter.Button', 'tk.Button', (['self.frame'], {'text': '"""Вычислить для 1000 чисел"""', 'width': 'WIDGET_WIDTH', 'bg': 'COLOR', 'command': 'self.solve'}), "(self.frame, text='Вычислить для 1000 чисел', width=WIDGET_WIDTH,\n bg=COLOR, command=self.solve)\n", (2445, 2544), True, 'import tkinter as tk\n'), ((2571, 2641), 'tkinter.LabelFrame', 'tk.LabelFrame', (['master'], {'text': '"""Матрица"""', 'bg': 'COLOR', 'width': '(530)', 'height': '(200)'}), "(master, text='Матрица', bg=COLOR, width=530, height=200)\n", (2584, 2641), True, 'import tkinter as tk\n'), ((2722, 2794), 'tkinter.LabelFrame', 'tk.LabelFrame', (['master'], {'bg': 'COLOR', 'text': '"""Результат"""', 'width': '(510)', 'height': '(270)'}), "(master, bg=COLOR, text='Результат', width=510, height=270)\n", (2735, 2794), True, 'import tkinter as tk\n'), ((2871, 2939), 'tkinter.Label', 'tk.Label', (['self.result_frame'], {'text': '"""Табличный способ"""', 'bg': 'COLOR', 'bd': '(3)'}), "(self.result_frame, text='Табличный способ', bg=COLOR, bd=3)\n", (2879, 2939), True, 'import tkinter as tk\n'), ((2971, 3045), 'tkinter.Label', 'tk.Label', (['self.result_frame'], {'text': '"""Алгоритмический способ"""', 'bg': 'COLOR', 'bd': '(3)'}), "(self.result_frame, text='Алгоритмический способ', bg=COLOR, bd=3)\n", (2979, 3045), True, 'import tkinter as tk\n'), ((3078, 3172), 'tkinter.Listbox', 'tk.Listbox', (['self.result_frame'], {'selectmode': 'tk.SINGLE', 'width': '(13)', 'bg': 'COLUMNS_COLOR', 'height': '(1)'}), '(self.result_frame, selectmode=tk.SINGLE, width=13, bg=\n COLUMNS_COLOR, height=1)\n', (3088, 3172), True, 'import tkinter as tk\n'), ((3199, 3293), 'tkinter.Listbox', 'tk.Listbox', (['self.result_frame'], {'selectmode': 'tk.SINGLE', 'width': '(13)', 'bg': 'COLUMNS_COLOR', 'height': '(1)'}), '(self.result_frame, selectmode=tk.SINGLE, width=13, bg=\n COLUMNS_COLOR, height=1)\n', (3209, 3293), True, 'import tkinter as tk\n'), ((3322, 3416), 'tkinter.Listbox', 'tk.Listbox', (['self.result_frame'], {'selectmode': 'tk.SINGLE', 'width': '(13)', 'bg': 'COLUMNS_COLOR', 'height': '(1)'}), '(self.result_frame, selectmode=tk.SINGLE, width=13, bg=\n COLUMNS_COLOR, height=1)\n', (3332, 3416), True, 'import tkinter as tk\n'), ((3447, 3541), 'tkinter.Listbox', 'tk.Listbox', (['self.result_frame'], {'selectmode': 'tk.SINGLE', 'width': '(13)', 'bg': 'COLUMNS_COLOR', 'height': '(1)'}), '(self.result_frame, selectmode=tk.SINGLE, width=13, bg=\n COLUMNS_COLOR, height=1)\n', (3457, 3541), True, 'import tkinter as tk\n'), ((3572, 3666), 'tkinter.Listbox', 'tk.Listbox', (['self.result_frame'], {'selectmode': 'tk.SINGLE', 'width': '(13)', 'bg': 'COLUMNS_COLOR', 'height': '(1)'}), '(self.result_frame, selectmode=tk.SINGLE, width=13, bg=\n COLUMNS_COLOR, height=1)\n', (3582, 3666), True, 'import tkinter as tk\n'), ((3699, 3793), 'tkinter.Listbox', 'tk.Listbox', (['self.result_frame'], {'selectmode': 'tk.SINGLE', 'width': '(13)', 'bg': 'COLUMNS_COLOR', 'height': '(1)'}), '(self.result_frame, selectmode=tk.SINGLE, width=13, bg=\n COLUMNS_COLOR, height=1)\n', (3709, 3793), True, 'import tkinter as tk\n'), ((4171, 4246), 'tkinter.Label', 'tk.Label', (['self.result_frame'], {'text': '"""% статистики хи-квадрат"""', 'bg': 'COLOR', 'bd': '(3)'}), "(self.result_frame, text='% статистики хи-квадрат', bg=COLOR, bd=3)\n", (4179, 4246), True, 'import tkinter as tk\n'), ((4283, 4335), 'tkinter.Label', 'tk.Label', (['self.result_frame'], {'text': '""""""', 'bg': 'COLOR', 'bd': '(3)'}), "(self.result_frame, text='', bg=COLOR, bd=3)\n", (4291, 4335), True, 'import tkinter as tk\n'), ((4371, 4423), 'tkinter.Label', 'tk.Label', (['self.result_frame'], {'text': '""""""', 'bg': 'COLOR', 'bd': '(3)'}), "(self.result_frame, text='', bg=COLOR, bd=3)\n", (4379, 4423), True, 'import tkinter as tk\n'), ((4461, 4513), 'tkinter.Label', 'tk.Label', (['self.result_frame'], {'text': '""""""', 'bg': 'COLOR', 'bd': '(3)'}), "(self.result_frame, text='', bg=COLOR, bd=3)\n", (4469, 4513), True, 'import tkinter as tk\n'), ((4553, 4605), 'tkinter.Label', 'tk.Label', (['self.result_frame'], {'text': '""""""', 'bg': 'COLOR', 'bd': '(3)'}), "(self.result_frame, text='', bg=COLOR, bd=3)\n", (4561, 4605), True, 'import tkinter as tk\n'), ((4645, 4697), 'tkinter.Label', 'tk.Label', (['self.result_frame'], {'text': '""""""', 'bg': 'COLOR', 'bd': '(3)'}), "(self.result_frame, text='', bg=COLOR, bd=3)\n", (4653, 4697), True, 'import tkinter as tk\n'), ((4739, 4791), 'tkinter.Label', 'tk.Label', (['self.result_frame'], {'text': '""""""', 'bg': 'COLOR', 'bd': '(3)'}), "(self.result_frame, text='', bg=COLOR, bd=3)\n", (4747, 4791), True, 'import tkinter as tk\n'), ((656, 674), 'scipy.stats.chi2.cdf', 'chi2.cdf', (['chisq', 'k'], {}), '(chisq, k)\n', (664, 674), False, 'from scipy.stats import chi2, chisquare\n'), ((6163, 6234), 'tkinter.Listbox', 'tk.Listbox', (['self.listbox_frame'], {'selectmode': 'tk.SINGLE', 'width': '(8)', 'bg': 'COLOR'}), '(self.listbox_frame, selectmode=tk.SINGLE, width=8, bg=COLOR)\n', (6173, 6234), True, 'import tkinter as tk\n')] |
tylermneher/python-api-challenge | VacationPy/api_keys.py | 28c88b4fff13c8b752096b0776a3d4645ad5fddb | # OpenWeatherMap API Key
weather_api_key = "MyOpenWeatherMapAPIKey"
# Google API Key
g_key = "MyGoogleKey" | [] |
panickervinod/aries-cloudagent-python | aries_cloudagent/protocols/actionmenu/v1_0/messages/menu_request.py | bb4627fe62ee42ffeeb435cf3d8bfbd66c10d02f | """Represents a request for an action menu."""
from .....messaging.agent_message import AgentMessage, AgentMessageSchema
from ..message_types import MENU_REQUEST, PROTOCOL_PACKAGE
HANDLER_CLASS = f"{PROTOCOL_PACKAGE}.handlers.menu_request_handler.MenuRequestHandler"
class MenuRequest(AgentMessage):
"""Class representing a request for an action menu."""
class Meta:
"""Metadata for action menu request."""
handler_class = HANDLER_CLASS
message_type = MENU_REQUEST
schema_class = "MenuRequestSchema"
def __init__(self, **kwargs):
"""Initialize a menu request object."""
super().__init__(**kwargs)
class MenuRequestSchema(AgentMessageSchema):
"""MenuRequest schema class."""
class Meta:
"""MenuRequest schema metadata."""
model_class = MenuRequest
| [] |