content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _daily_prevalence(data):
"""
Returns a series where each value is a true fraction of currently infected population.
Args:
(dict): tracker data loaded from pkl file.
Returns:
(np.array): 1D array where each value is the above described fraction
"""
n_infected_per_day = data['ei_per_day']
n_people = data['n_humans']
prevalence = np.array(n_infected_per_day) / n_people
return prevalence | 2a5b83c09d9f06a8021c27dec45be9a872f3a9bb | 1,700 |
from operator import or_
def users_view(page):
"""
The user view page
Returns:
a rendered user view template
"""
user_search = request.args.get("search")
user_role = request.args.get("user_role")
users_query = model.User.query
if user_search:
term = "%" + user_search + "%"
users_query = users_query.filter(
or_(model.User.name.ilike(term), model.User.username.ilike(term))
)
if user_role and user_role != "all":
users_query = users_query.join(model.User.user_roles).filter(
model.UserRole.name == user_role
)
users_pagination = util.paginate(users_query, page, 30)
users = users_pagination.items
metrics = {}
for user in users:
user_metrics = {}
run_query = model.Run.query.filter_by(user_id=user.id)
user_metrics["num_runs"] = run_query.count()
user_metrics["last_run"] = run_query.order_by(
model.Run.submit_time.desc()
).limit(
1
).first()
metrics[user.id] = user_metrics
return render_template(
"users/view.html",
users_pagination=users_pagination,
users=users,
metrics=metrics,
user_role=user_role,
search=user_search,
) | 9646b76721ec26f9177bff99cca02d0c4b5a7954 | 1,701 |
def serial_christie_power_state(connection):
"""Ask a Christie projector for its power state and parse the response"""
connection.reset_input_buffer()
response = serial_send_command(connection, "(PWR?)", char_to_read=21)
result = None
if len(response) > 0:
if "PWR!001" in response:
result = "on"
if "PWR!000" in response:
result = "off"
if "PWR!010" in response:
result = "powering_off"
if "PWR!011" in response:
result = "powering_on"
return result | 4ce78b773ecf2f4ed9a515e4653512a461a82f24 | 1,702 |
def cuda_tanh(a):
""" Hyperbolic tangent of GPUArray elements.
Parameters:
a (gpu): GPUArray with elements to be operated on.
Returns:
gpu: tanh(GPUArray)
Examples:
>>> a = cuda_tanh(cuda_give([0, pi / 4]))
array([ 0., 0.6557942])
>>> type(a)
<class 'pycuda.gpuarray.GPUArray'>
"""
return pycuda.cumath.tanh(a) | 6d7608c943ded7eeeb37194a4d938aa4e47c16ef | 1,703 |
def normalize_multi_header(df):
"""将有MultiIndex的column字符串做标准化处理,去掉两边空格等"""
df_copy = df.copy()
df_copy_columns = [ tuple(y.strip().lower() for y in x) for x in df_copy.columns ]
df_copy.columns = pd.core.index.MultiIndex.from_tuples(df_copy_columns)
return df_copy | acd75a73919f1fd9b8f8c57e2f70cbb607634c82 | 1,704 |
def scapy_packet_Packet_hasflag(self, field_name, value):
"""Is the specified flag value set in the named field"""
field, val = self.getfield_and_val(field_name)
if isinstance(field, EnumField):
if val not in field.i2s:
return False
return field.i2s[val] == value
else:
return (1 << field.names.index([value])) & self.__getattr__(field_name) != 0 | 77b4a1d772e61c7bfaaf7e9d9d3debe95f799f62 | 1,705 |
def grid_points_2d(length, width, div, width_div=None):
"""Returns a regularly spaced grid of points occupying a rectangular
region of length x width partitioned into div intervals. If different
spacing is desired in width, then width_div can be specified, otherwise
it will default to div. If div < 2 in either x or y, then the corresponding
coordinate will be set to length or width respectively."""
if div > 1:
px = [-length / 2.0 + (x / (div - 1)) * length for x in range(div)]
else:
px = [length]
if width_div is not None:
wd = width_div
else:
wd = div
if wd > 1:
py = [-width / 2.0 + (y / (wd - 1)) * width for y in range(wd)]
else:
py = [width]
pts = []
for x in px:
for y in py:
pts.append((x, y))
return pts | d041f563bb8d3cd84e1829f49e6786b0331e1ef0 | 1,706 |
import itertools
def analytic_gradient(circuit, parameter=None):
"""Return the analytic gradient of the input circuit."""
if parameter is not None:
if parameter not in circuit.parameters:
raise ValueError('Parameter not in this circuit.')
if len(circuit._parameter_table[parameter]) > 1:
raise NotImplementedError('No product rule support yet, params must be unique.')
summands, op_context = [], []
for i, op in enumerate(circuit.data):
gate = op[0]
op_context += [op[1:]]
if (parameter is None and len(gate.params) > 0) or parameter in gate.params:
summands += [gradient_lookup(gate)]
else:
summands += [[[1, gate]]]
gradient = []
for product_rule_term in itertools.product(*summands):
summand_circuit = QuantumCircuit(*circuit.qregs)
coeff = 1
for i, a in enumerate(product_rule_term):
coeff *= a[0]
summand_circuit.data.append([a[1], *op_context[i]])
gradient += [[coeff, summand_circuit.copy()]]
return gradient | 126357a3aa25a1a38e5226657f92e626cb8b2339 | 1,707 |
def _get_shipping_voucher_discount_for_cart(voucher, cart):
"""Calculate discount value for a voucher of shipping type."""
if not cart.is_shipping_required():
msg = pgettext(
'Voucher not applicable',
'Your order does not require shipping.')
raise NotApplicable(msg)
shipping_method = cart.shipping_method
if not shipping_method:
msg = pgettext(
'Voucher not applicable',
'Please select a shipping method first.')
raise NotApplicable(msg)
# check if voucher is limited to specified countries
shipping_country = cart.shipping_address.country
if voucher.countries and shipping_country.code not in voucher.countries:
msg = pgettext(
'Voucher not applicable',
'This offer is not valid in your country.')
raise NotApplicable(msg)
return get_shipping_voucher_discount(
voucher, cart.get_subtotal(), shipping_method.get_total()) | cba3299f02d3cc2169e9ad8367b8c5e67a08a459 | 1,708 |
def ottawa(location, **kwargs):
"""Ottawa Provider
:param location: Your search location you want geocoded.
"""
return get(location, provider='ottawa', **kwargs) | b1e32842ce887b72f317c5b88cbcb390524b59af | 1,709 |
import copy
import random
def entries():
""" Basic data for a test case """
return copy.deepcopy(
{"arb_key": "text", "randn": random.randint(0, 10),
"nested": {"ntop": 0, "nmid": {"list": ["a", "b"]},
"lowest": {"x": {"a": -1, "b": 1}}},
"collection": {1, 2, 3}}) | 5d6cde325b69e43598f9d0158ae5989a4d70b54c | 1,710 |
def count_ref_alleles(variant, *traits):
"""Count reference allels for a variant
Parameters
----------
variant : a Variant as from funcgenom
the variant for which alleles should be counted
*traits : str
the traits for which alleles should be counted
Returns
-------
int
the reference allele count
"""
return (
''.join(variant.traits[trait]['alleles'] for trait in traits)
.replace(',', '.')
.count('.')
) | 10ea3468f5de8f2b77bb97b27b888af808c541b7 | 1,711 |
def preprocess_image(image, image_sz=48):
"""
Preprocess an image. Most of this is stuff that needs to be done for the Keras CNN model to work,
as recommended by: https://chsasank.github.io/keras-tutorial.html
"""
# we need to convert to saturation, and value (HSV) coordinates
hsv_image = color.rgb2hsv(image)
hsv_image[:, :, 2] = exposure.equalize_hist(hsv_image[:, :, 2])
image = color.hsv2rgb(hsv_image)
# we have to crop to central square
min_side = min(image.shape[:-1])
centre = image.shape[0] // 2, image.shape[1] // 2
image = image[centre[0] - min_side // 2:centre[0] + min_side // 2, centre[1] - min_side // 2:centre[1] + min_side // 2, :]
# our model _needs_ images that are all the same size
image = transform.resize(image, (image_sz, image_sz))
# change colour axis
image = np.rollaxis(image, -1)
return image | ade5d63ad8e2a25622795d15c90c1e062a76006d | 1,712 |
def pspace_independent(a, b):
"""
Tests for independence between a and b by checking if their PSpaces have
overlapping symbols. This is a sufficient but not necessary condition for
independence and is intended to be used internally.
Notes
=====
pspace_independent(a, b) implies independent(a, b)
independent(a, b) does not imply pspace_independent(a, b)
"""
a_symbols = set(pspace(b).symbols)
b_symbols = set(pspace(a).symbols)
if len(set(random_symbols(a)).intersection(random_symbols(b))) != 0:
return False
if len(a_symbols.intersection(b_symbols)) == 0:
return True
return None | 5c6a253e266af1673c6e05c4b7f81b04a1201803 | 1,713 |
def img_aspect_ratio(width, height):
"""
Returns an image's aspect ratio.
If the image has a common aspect ratio, returns the aspect ratio in the format x:y,
otherwise, just returns width/height.
"""
ratio = round(width/height, 2)
for ar, val in COMMON_ASPECT_RATIOS.items():
if ratio <= val + 0.01 and ratio >= val - 0.01:
ratio = ar
break
return ratio | 82104f3d4105fd4c22bc4cfed51e4c8261d32079 | 1,714 |
def _get_active_sculpting_mesh_for_deformer(deformer):
"""
If sculpting is enabled on the deformer, return the output mesh. Otherwise,
return None.
"""
# If sculpting is enabled, .tweak[0] will be connected to the .tweakLocation of
# a mesh.
connections = cmds.listConnections('%s.tweak[0]' % deformer, d=True, s=False) or []
if len(connections) == 0:
return None
if len(connections) > 1:
# This isn't expected.
raise RuntimeError('More than one mesh points to %s.tweak[0]' % deformer)
return connections[0] | a0864999d6aec487a23bac4dfb602f01b0b45d8f | 1,715 |
def get_client_versions():
"""Gets the client versions (or client equivalent for server).
Returns:
A list of client versions (or client equivalent for server).
E.g. '10' for Windows 10 and Windows Server 2016.
"""
version_nubmer = get_os_version_number()
if version_nubmer in _WIN32_CLIENT_NAMES:
return [_WIN32_CLIENT_NAMES[version_nubmer]]
return [] | 0954754b608b745d2dbc9f6c83bd85bc0c2549e2 | 1,716 |
from CA import caget
from CA import caput
def PV_property(name,default_value=nan):
"""EPICS Channel Access Process Variable as class property"""
def prefix(self):
prefix = ""
if hasattr(self,"prefix"): prefix = self.prefix
if hasattr(self,"__prefix__"): prefix = self.__prefix__
if prefix and not prefix.endswith("."): prefix += "."
return prefix
def get(self):
value = caget(prefix(self)+name.upper())
if value is None: value = default_value
if type(value) != type(default_value):
if type(default_value) == list: value = [value]
else:
try: value = type(default_value)(value)
except: value = default_value
return value
def set(self,value):
value = caput(prefix(self)+name.upper(),value)
return property(get,set) | 745fe42d760f42dbc4e729d45256dd291be7e5b3 | 1,717 |
import six
def _stream_files(curr_header, fn, mesos_files):
"""Apply `fn` in parallel to each file in `mesos_files`. `fn` must
return a list of strings, and these strings are then printed
serially as separate lines.
`curr_header` is the most recently printed header. It's used to
group lines. Each line has an associated header (e.g. a string
representation of the MesosFile it was read from), and we only
print the header before printing a line with a different header
than the previous line. This effectively groups lines together
when the have the same header.
:param curr_header: Most recently printed header
:type curr_header: str
:param fn: function that reads a sequence of lines from a MesosFile
:type fn: MesosFile -> [str]
:param mesos_files: files to read
:type mesos_files: [MesosFile]
:returns: Returns the most recently printed header, and a list of
files that are still reachable. Once we detect a file is
unreachable, we stop trying to read from it.
:rtype: (str, [MesosFile])
"""
reachable_files = list(mesos_files)
# TODO switch to map
for job, mesos_file in util.stream(fn, mesos_files):
try:
lines = job.result()
except DCOSException as e:
# The read function might throw an exception if read.json
# is unavailable, or if the file doesn't exist in the
# sandbox. In any case, we silently remove the file and
# continue.
logger.exception("Error reading file: {}".format(e))
reachable_files.remove(mesos_file)
continue
if lines:
curr_header = _output(curr_header,
len(reachable_files) > 1,
six.text_type(mesos_file),
lines)
return curr_header, reachable_files | 9d6cc9a238c831017b0426d9cd21d42d26812365 | 1,718 |
import sys
def supported_platform(logger):
"""Checks if this script is running on supported platform.
Args:
logger: A valid logger instance to log debug/error messages.
Returns:
True if this platform is supported.
"""
# TODO(billy): Look into supporting Windows in the near future.
logger.debug("Current platform: {}".format(sys.platform))
if not (sys.platform == "linux" or sys.platform == "darwin"):
logger.error("Sorry, your OS is currently unsupported for this script.")
return False
if not (sys.version_info.major == 3 and sys.version_info.minor >= 5):
logger.error("This script requires Python 3.5 or higher!")
logger.error("You are using Python {}.{}.".format(sys.version_info.major,
sys.version_info.minor))
return False
return True | f7f9566fe06bb96f3d213d94bdfb195e4f22b33f | 1,719 |
import random
def random_in_range(a: int, b: int) -> int:
""" Return a random number r with a <= r <= b. """
return random.randint(a, b) | 611c2754ace92eac4951f42e1e31af2f441ed0c2 | 1,720 |
import sqlite3
from datetime import datetime
def count_items():
"""
:returns: a dictionary with counts in fields 'total', 'done'.
"""
con = sqlite3.connect(PROGRESS_DB_FILE_NAME)
cur = con.cursor()
# do not count root
cur.execute("SELECT COUNT(*) FROM item WHERE pk<>0")
total = cur.fetchone()[0]
cur.execute("SELECT COUNT(*) FROM item WHERE is_done='TRUE' AND pk<>0")
done = cur.fetchone()[0]
done_items = load_items(is_done=True)
done_today = 0
done_yesterday = 0
for i in done_items:
date_item = datetime.strptime(i.done_at, DATE_FORMAT)
date_now = datetime.now()
if date_now.date() == date_item.date():
done_today += 1
if (date_now.date() - date_item.date()).days == 1:
done_yesterday += 1
return {
'done': done,
'total': total,
'done_today': done_today,
'done_yesterday': done_yesterday,
} | 904c72b74603fd9a9c2a1c249641ba61a7b85b04 | 1,721 |
def app() -> None:
"""This app renders the Data Analyzer page"""
# TEXT:
st.write(
"""
# Data Analysis Dashboard
Please provide an asset name to display historical data.
"""
)
# INPUTs:
st.sidebar.title("Parameters")
col1, col2, col3 = st.columns(3)
with col1:
asset_class = st.selectbox(
label="Choose an asset class", options=AssetClasses.list()
)
# asset_ticker = st.text_input(label="Enter an asset ticker", value="AAPL")
# PROCESSING:
if asset_class == AssetClasses.STOCKS.value:
@st.cache(persist=True)
def get_global_stocks(hundred_results: int = 10) -> pd.DataFrame:
"""Get company name, ticker and country of top companies based on market cap.
By default returns 1000 biggest companies, max is 5800.
"""
return scrape_largest_companies(num_pages=hundred_results)
number_of_stocks = st.sidebar.number_input(
label="Number of stocks", min_value=100, max_value=5800, value=1000
)
period = st.sidebar.selectbox(
label="Period",
options=[
"max",
"ytd",
"10y",
"5y",
"2y",
"1y",
"1d",
"5d",
"1mo",
"3mo",
"6mo",
],
)
interval = st.sidebar.selectbox(
label="Interval",
options=[
"1d",
"1h",
"5d",
"1wk",
"1mo",
"3mo",
],
)
with st.spinner("Getting companies..."):
companies_df = get_global_stocks(
hundred_results=int(np.ceil(number_of_stocks / 100))
)
with col2:
country = st.selectbox(
label="Choose a country", options=companies_df["country"].unique()
)
with col3:
stock_name = st.selectbox(
label="Choose a stock",
options=companies_df.loc[companies_df["country"] == str(country)][
"name"
],
)
stock = Stock(
name=str(stock_name),
ticker=companies_df.loc[companies_df["name"] == str(stock_name)][
"ticker"
].iloc[0],
country=companies_df.loc[companies_df["name"] == str(stock_name)][
"country"
].iloc[0],
)
@st.cache(persist=True, allow_output_mutation=True)
def get_prices(
stock: Stock, period: str = "max", interval: str = "1d"
) -> pd.DataFrame:
"""Get prices from Yahoo Finance"""
return yf.Ticker(ticker=stock.ticker).history(
period=period, interval=interval
)
with st.spinner("Getting prices..."):
stock.prices = get_prices(stock=stock, period=period, interval=interval)
@st.cache(persist=True, allow_output_mutation=True)
def get_info(stock: Stock) -> dict:
return yf.Ticker(ticker=stock.ticker).info
@st.cache(persist=True, allow_output_mutation=True)
def get_news(stock: Stock) -> dict:
return yf.Ticker(ticker=stock.ticker).news
else:
raise NotImplementedError("Not implemented yet.")
# OUTPUT:
with st.spinner("Getting company info..."):
info = get_info(stock=stock)
st.write(
"""
## Business Summary
"""
)
with st.expander("See business description"):
st.write(info["longBusinessSummary"])
col_1, col_2, col_3 = st.columns(3)
with col_1:
st.write("**Ticker**: ", stock.ticker)
st.write("**Website**: ", info["website"])
with col_2:
st.write("**Sector**: ", info["sector"])
st.write("**Industry**: ", info["industry"])
with col_3:
st.write(
"**Number of shares**: ",
str(round(info["sharesOutstanding"] / 1e6, 2)),
"milions",
)
st.write("**Market beta**: ", str(round(info["beta"], 2)))
st.write("## Prices")
st.line_chart(stock.prices[["Open", "High", "Low", "Close"]])
st.line_chart(stock.prices["Volume"])
if interval != "1h":
try:
stock.prices.index = stock.prices.index.date
except AttributeError:
pass
st.dataframe(stock.prices)
# st.write(get_info(stock=stock))
news: list[dict] = get_news(stock=stock)
st.write("## Related news:")
for n in news:
st.markdown(f"[{n.get('title')}]({n.get('link')})") | 6890ed875368eec3271f8ee0625d4182aaeb769d | 1,722 |
import re
def valida_cnpj(cnpj):
"""
Valida CNPJs, retornando apenas a string de números válida.
# CNPJs errados
>>> validar_cnpj('abcdefghijklmn')
False
>>> validar_cnpj('123')
False
>>> validar_cnpj('')
False
>>> validar_cnpj(None)
False
>>> validar_cnpj('12345678901234')
False
>>> validar_cnpj('11222333000100')
False
# CNPJs corretos
>>> validar_cnpj('11222333000181')
'11222333000181'
>>> validar_cnpj('11.222.333/0001-81')
'11222333000181'
>>> validar_cnpj(' 11 222 333 0001 81 ')
'11222333000181'
"""
cnpj = ''.join(re.findall('\d', str(cnpj)))
if (not cnpj) or (len(cnpj) < 14):
return False
# Pega apenas os 12 primeiros dígitos do CNPJ e
# gera os 2 dígitos que faltam
inteiros = list(map(int, cnpj))
novo = inteiros[:12]
prod = [5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
while len(novo) < 14:
r = sum([x*y for (x, y) in zip(novo, prod)]) % 11
if r > 1:
f = 11 - r
else:
f = 0
novo.append(f)
prod.insert(0, 6)
# Se o número gerado coincidir com o número original, é válido
if novo == inteiros:
return cnpj
return False | 4b3d2591e6f196cccdd8d68089e36f22ba1d1a98 | 1,723 |
def km_miles(kilometers):
"""Usage: Convert kilometers to miles"""
return kilometers/1.609 | 5480c065f904dfc1959691e158653fd0e6bb67e6 | 1,724 |
def is_enterprise_learner(user):
"""
Check if the given user belongs to an enterprise. Cache the value if an enterprise learner is found.
Arguments:
user (User): Django User object.
Returns:
(bool): True if given user is an enterprise learner.
"""
cached_is_enterprise_key = get_is_enterprise_cache_key(user.id)
if cache.get(cached_is_enterprise_key):
return True
if EnterpriseCustomerUser.objects.filter(user_id=user.id).exists():
# Cache the enterprise user for one hour.
cache.set(cached_is_enterprise_key, True, 3600)
return True
return False | 76bbf24dafec3ec26ec23504b8d064fbe5c21c52 | 1,725 |
def point_cloud(depth, colors):
"""Transform a depth image into a point cloud with one point for each
pixel in the image, using the camera transform for a camera
centred at cx, cy with field of view fx, fy.
depth is a 2-D ndarray with shape (rows, cols) containing
depths from 1 to 254 inclusive. The result is a 3-D array with
shape (rows, cols, 3). Pixels with invalid depth in the input have
NaN for the z-coordinate in the result.
"""
rows, cols = depth.shape
c, r = np.meshgrid(np.arange(cols), np.arange(rows), sparse=True)
valid = (depth > 0) & (depth < 255)
z = np.where(valid, depth / 256.0, np.nan)
x = np.where(valid, z * (c - cx) / fx, 0)
y = np.where(valid, z * (r - cy) / fy, 0)
points = np.dstack((x, y, z))
print('points:{}, colors:{}'.format(np.shape(points), np.shape(colors)))
reflect_matrix = np.identity(3) # reflect on x axis
reflect_matrix[0] *= -1
points = np.matmul(points, reflect_matrix)
out_fn = 'point_cloud.ply'
# filter by min disparity
mask = disparity > disparity.min()
out_points = points[mask]
out_colors = colors[mask]
idx = np.fabs(out_points[:, -1]) < 50 # 10.5 # filter by dimension
print('out_points:{}'.format(np.shape(out_points)))
out_points = out_points[idx]
out_colors = out_colors.reshape(-1, 3)
out_colors = out_colors[idx]
write_ply(out_fn, out_points, out_colors)
# reproject on the image -----------------------------------
reflected_pts = np.matmul(out_points, reflect_matrix)
projected_img, _ = cv2.projectPoints(reflected_pts, np.identity(3), np.array([0., 0., 0.]), K_left, D_left)
projected_img = projected_img.reshape(-1, 2)
blank_img = np.zeros(colors.shape, 'uint8')
img_colors = colors[mask][idx].reshape(-1, 3)
for i, pt in enumerate(projected_img):
pt_x = int(pt[0])
pt_y = int(pt[1])
if pt_x > 0 and pt_y > 0:
# use the BGR format to match the original image type
col = (int(img_colors[i, 2]), int(img_colors[i, 1]), int(img_colors[i, 0]))
cv2.circle(blank_img, (pt_x, pt_y), 1, col)
return blank_img, out_points | 75aa681fa817b29e23ed76beb8504ef1bbaa5d67 | 1,726 |
import tqdm
def structural_email(data, pos_parser=True, bytedata_parser_threshold=50, reference_parser_match_type=2):
"""
This is a parser pipeline, parser order matters.
1. string => structure email to separate => header, body, others
2. body => remove typo and some irrelevant words => body
3. body => parse and remove email from body => body_no_email
4. body_no_email => parse and remove binary data like BMP or picture from body => body_no_binary_no_email
5. body_no_binary_no_email => separate email reference and reply => reply, previous_one, previous_two
@param data: data text series including all the training set or test set
@return: structural information
"""
print("Preprocessing for unstructure email...")
header_info = []
body_info = []
others_info = []
tag_info = []
for string in tqdm(data):
# structure parsers
header, body, others = structure_parser(string)
body = typo_parser(body)
body_no_email, emails = email_address_parser(body)
body_no_binary_no_email, bytedata = bytedata_parser(body_no_email, threshold=bytedata_parser_threshold)
# main parser
reply, previous_one, previous_two = reference_parser(body_no_binary_no_email, match_type=reference_parser_match_type)
if pos_parser:
target_tag = set(['NN', 'NNS', 'NNPS'])
tag_reply = pos_tag_parser(reply, target_tag)
tag_previous_one = pos_tag_parser(previous_one, target_tag)
tag_previous_two = pos_tag_parser(previous_two, target_tag)
tag_info.append([tag_reply, tag_previous_one, tag_previous_two])
# append data in loops
header_info.append(header)
body_info.append([reply, previous_one, previous_two])
others_info.append(others + [emails] + [bytedata])
a1 = pd.DataFrame.from_dict(header_info)
a2 = pd.DataFrame(body_info, columns=["reply", "reference_one", "reference_two"])
a3 = pd.DataFrame(others_info, columns=["date", "delivered_to", "to_domains", "error_message", "contained_emails", "long_string"])
if pos_parser:
a4 = pd.DataFrame(tag_info, columns=["tag_reply", "tag_reference_one", "tag_reference_two"])
structure_email = pd.concat([a1, a2, a3, a4], axis=1)
else:
structure_email = pd.concat([a1, a2, a3], axis=1)
return structure_email | b68227f10ae6e78f6e12ab174e2360c0828e2038 | 1,727 |
import six
def build_batches(data, conf, turn_cut_type='tail', term_cut_type='tail'):
"""
Build batches
"""
_turns_batches = []
_tt_turns_len_batches = []
_every_turn_len_batches = []
_response_batches = []
_response_len_batches = []
_label_batches = []
batch_len = len(data[six.b('y')]) // conf['batch_size']
for batch_index in six.moves.range(batch_len):
_turns, _tt_turns_len, _every_turn_len, _response, _response_len, _label = build_one_batch(
data, batch_index, conf, turn_cut_type='tail', term_cut_type='tail')
_turns_batches.append(_turns)
_tt_turns_len_batches.append(_tt_turns_len)
_every_turn_len_batches.append(_every_turn_len)
_response_batches.append(_response)
_response_len_batches.append(_response_len)
_label_batches.append(_label)
ans = {
"turns": _turns_batches,
"tt_turns_len": _tt_turns_len_batches,
"every_turn_len": _every_turn_len_batches,
"response": _response_batches,
"response_len": _response_len_batches,
"label": _label_batches
}
return ans | e82411d5b51171c9590bd5f150dfeca666b3a3a6 | 1,728 |
def is_notebook():
"""Check if pyaedt is running in Jupyter or not.
Returns
-------
bool
"""
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return True # Jupyter notebook or qtconsole
else:
return False
except NameError:
return False | 51c0806ba17cbaef5732379a5e9c68d8eb171d31 | 1,729 |
def strategy(history, memory):
"""
Tit-for-tat, except we punish them N times in a row if this is the Nth time they've
initiated a defection.
memory: (initiatedDefections, remainingPunitiveDefections)
"""
if memory is not None and memory[1] > 0:
choice = 0
memory = (memory[0], memory[1] - 1)
return choice, memory
num_rounds = history.shape[1]
opponents_last_move = history[1, -1] if num_rounds >= 1 else 1
our_last_move = history[0, -1] if num_rounds >= 1 else 1
our_second_last_move = history[0, -2] if num_rounds >= 2 else 1
opponent_initiated_defection = (
opponents_last_move == 0 and our_last_move == 1 and our_second_last_move == 1
)
choice = 0 if opponent_initiated_defection else 1
if choice == 0:
memory = (1, 0) if memory is None else (memory[0] + 1, memory[0])
return choice, memory | bf8d09417c246f9f88a721dfcc4408f49195fd1a | 1,730 |
def get_primitives(name=None, primitive_type=None, primitive_subtype=None):
"""Get a list of the available primitives.
Optionally filter by primitive type: ``transformation`` or ``aggregation``.
Args:
primitive_type (str):
Filter by primitive type. ``transformation`` or ``aggregation``.
Returns:
list:
List of the names of the available primitives.
"""
filters = {}
if primitive_type:
if primitive_type not in ('transformation', 'aggregation'):
raise ValueError('primitive_type must be `transformation` or `aggregation`.')
filters['classifiers.type'] = primitive_type
if primitive_subtype:
if primitive_subtype not in ('amplitude', 'frequency', 'frequency_time'):
raise ValueError(
'primitive_subtype must be `amplitude`, `frequency` or `frequency_time`.')
filters['classifiers.subtype'] = primitive_subtype
return discovery.find_primitives(name or 'sigpro', filters) | c833a2b1d52dc135a4518aa6fa7147ae58b73b9a | 1,731 |
def _unpack_batch_channel(data, old_shape):
"""Unpack the data channel dimension.
"""
data = nnvm.sym.transpose(data, axes=(0, 4, 1, 5, 2, 3))
data = nnvm.sym.reshape(data, shape=old_shape)
return data | 1b59f6fbceabef3a28b4180a5bc808621e11c6b7 | 1,732 |
def get_branch_user(branch):
"""Get user name for given branch."""
with Command('git', 'log', '--pretty=tformat:%an', '-1', branch) as cmd:
for line in cmd:
return line | 0845dc69cbd949c1f739ca877c0b182740fa7bdb | 1,733 |
from .qtmultimedia import find_system_cameras
from electrum_cintamani import qrscanner
def find_system_cameras() -> Mapping[str, str]:
"""Returns a camera_description -> camera_path map."""
if sys.platform == 'darwin' or sys.platform in ('windows', 'win32'):
try:
except ImportError as e:
return {}
else:
return find_system_cameras()
else: # desktop Linux and similar
return qrscanner.find_system_cameras() | adefb85f99494f71e1c55e74f8b4e589d96daacf | 1,734 |
def _shape_list(x):
"""Return list of dims, statically where possible."""
static = x.get_shape().as_list()
shape = tf.shape(x)
ret = []
for i, static_dim in enumerate(static):
dim = static_dim or shape[i]
ret.append(dim)
return ret | 0add2ba771dd99817654ce48c745db5c5f09d3aa | 1,735 |
import packaging
def upgrade_common(ctx, config, deploy_style):
"""
Common code for upgrading
"""
remotes = upgrade_remote_to_config(ctx, config)
project = config.get('project', 'ceph')
extra_pkgs = config.get('extra_packages', [])
log.info('extra packages: {packages}'.format(packages=extra_pkgs))
for remote, node in remotes.items():
system_type = teuthology.get_system_type(remote)
assert system_type in ('deb', 'rpm')
pkgs = get_package_list(ctx, config)[system_type]
log.info("Upgrading {proj} {system_type} packages: {pkgs}".format(
proj=project, system_type=system_type, pkgs=', '.join(pkgs)))
if isinstance(extra_pkgs, dict):
pkgs += extra_pkgs.get(system_type, [])
else:
pkgs += extra_pkgs
installed_version = packaging.get_package_version(remote, 'ceph-common')
upgrade_version = get_upgrade_version(ctx, node, remote)
log.info("Ceph {s} upgrade from {i} to {u}".format(
s=system_type,
i=installed_version,
u=upgrade_version
))
if _upgrade_is_downgrade(installed_version, upgrade_version):
raise RuntimeError(
"An attempt to upgrade from a higher version to a lower one "
"will always fail. Hint: check tags in the target git branch."
)
deploy_style(ctx, node, remote, pkgs, system_type)
verify_package_version(ctx, node, remote)
return len(remotes) | 13840203bceb6b6ae069d47fa03a278dac3b0bc6 | 1,736 |
def convert2define(name):
"""
returns the name of the define used according to 'name' which is the name of the file
"""
header = toupper(toalphanum(name))
return "__" + header + "__" | 9f48181310db2732a26b846cb8270eb44bd06004 | 1,737 |
def url_exists(url):
"""
Checks if a url exists
:param url:
:return:
"""
p = urlparse(url)
conn = httplib.HTTPConnection(p.netloc)
conn.request('HEAD', p.path)
resp = conn.getresponse()
return resp.status == 301 or resp.status == 200 | 3ef7d71fed0c85d4e75e910e5354b817c656c0d7 | 1,738 |
def add_header(cmd):
"""
:param cmd: the command with its values
:return: adds a header and returns it, ready to be send
"""
# get the length of the length of the cmd (for how many spaces needed)
header = str(len(cmd))
for i in range(get_digits(len(cmd)), HEADERSIZE):
header = header + " "
return header + cmd | 91a23eaee6ddd01ce5b5d62b3d43221b25bcd541 | 1,739 |
def stat_selector(player, stat, in_path, year):
"""
Selects stat for player in game year selected
Parameters
----------
player
The player being assessed (str)
stat
The stat being assessed (str)
in_path
The path to the folder containing player data (str)
year
The year of game to look at (int)
Returns
-------
stat_selected
A number indicating the selected stat value (int)
"""
df = fifa_file_opener(in_path, year)
player_row = df.loc[df["short_name"] == player]
stat_selected = int(player_row[stat])
return stat_selected | 7f04086e4e3baee273baa1b90e1e0735856091d5 | 1,740 |
import torch
def get_cali_samples(train_data_loader, num_samples, no_label=True):
"""Generate sub-dataset for calibration.
Args:
train_data_loader (torch.utils.data.DataLoader):
num_samples (int):
no_label (bool, optional): If the dataloader has no labels. Defaults to True.
Returns:
torch.Tensor: Concatenated data matrix.
"""
cali_data_list = []
if no_label:
for batch_data in train_data_loader:
cali_data_list.append(batch_data["image"])
if len(cali_data_list) >= num_samples:
break
else:
for batch_data, _ in train_data_loader:
cali_data_list.append(batch_data)
if len(cali_data_list) >= num_samples:
break
return torch.cat(cali_data_list, dim=0)[:num_samples].cpu() | 297ea0384b1e7f0a6ea51fc37325e57eb1cb8afa | 1,741 |
from typing import List
from typing import Tuple
import requests
import json
def fetch_available_litteraturbanken_books() -> List[Tuple[str, str]]:
"""Fetch available books from Litteraturbanken."""
url = "https://litteraturbanken.se/api/list_all/etext?exclude=text,parts,sourcedesc,pages,errata&filter_and=%7B%22sort_date_imprint.date:range%22:%221248,2020%22,%22export%3Etype%22:%5B%22xml%22,%22txt%22,%22workdb%22%5D%7D&filter_or=%7B%7D&filter_string=&from=0&include=lbworkid,titlepath,title,titleid,work_titleid,shorttitle,mediatype,searchable,imported,sortfield,sort_date_imprint.plain,main_author.authorid,main_author.surname,main_author.type,work_authors.authorid,work_authors.surname,startpagename,has_epub,sort_date.plain,export&partial_string=true&sort_field=popularity%7Cdesc&suggest=true&to=1000"
response = requests.get(url)
response.raise_for_status()
response = json.loads(response.text)
books = []
for book in response["data"]:
has_text = False
for export in book["export"]:
if export["type"] == "txt":
has_text = True
break
if not has_text:
continue
filename = "LB_{}_{}_{}_etext.txt".format(book["main_author"]["authorid"], book["titleid"], book["sort_date_imprint"]["plain"])
if filename in blacklist:
continue
books.append((filename, book["lbworkid"]))
return books | ff14af499335c6229d1f8d995c343c62fff7db74 | 1,742 |
def soup_from_psf(psf):
"""
Returns a Soup from a .psf file
"""
soup = pdbatoms.Soup()
curr_res_num = None
is_header = True
for line in open(psf):
if is_header:
if "NATOM" in line:
n_atom = int(line.split()[0])
is_header = False
continue
words = line.split()
atom_num = int(words[0])
chain_id = words[1]
res_num = int(words[2])
res_type = words[3]
atom_type = words[4]
charge = float(words[6])
mass = float(words[7])
if chain_id.startswith('WT') or chain_id.startswith('ION'):
is_hetatm = True
chain_id = " "
else:
is_hetatm = False
chain_id = chain_id[0]
if curr_res_num != res_num:
res = pdbatoms.Residue(res_type, chain_id, res_num)
soup.append_residue(res)
curr_res_num = res_num
atom = pdbatoms.Atom()
atom.vel = v3.vector()
atom.chain_id = chain_id
atom.is_hetatm = is_hetatm
atom.num = atom_num
atom.res_num = res_num
atom.res_type = res_type
atom.type = atom_type
atom.mass = mass
atom.charge = charge
atom.element = data.guess_element(res_type, atom_type)
soup.insert_atom(-1, atom)
if len(soup.atoms()) == n_atom:
break
convert_to_pdb_atom_names(soup)
return soup | 6b84e9428bec66e65b0d06dd81b238370f1602a8 | 1,743 |
def check_api():
"""
复核货品入库
post req: withlock
{
erp_order_code,
lines: [{
barcode, location, lpn, qty
},]
w_user_code,
w_user_name
}
"""
w_user_code = request.json.pop('w_user_code', None)
w_user_name = request.json.pop('w_user_name', None)
order = Stockin.query.t_query.filter_by(erp_order_code=request.json.pop('erp_order_code')) \
.with_for_update().first()
if order.state == 'create' or order.state == 'part':
lines = request.json['lines']
action = StockinAction(order)
for line in lines:
line['qty'] = int(line.get('qty', 0) or 0)
if line.get('qty', 0) <= 0:
continue
action.check(order=order, w_user_code=w_user_code, w_user_name=w_user_name, **line)
order.state = 'part'
# 不允许超收时,收完一次后,判断单子是否入库完成; 允许超收的话,单子只能手动关闭
if not g.owner.is_overcharge:
finish = True
for line in order.lines:
if not (line.qty_real >= line.qty):
finish = False
order.state = 'all' if finish else 'part'
if order.state == 'all':
order.finish()
db.session.commit()
return json_response({'status': 'success', 'msg': u'ok', 'data': order.as_dict})
db.session.rollback()
return json_response({'status': 'fail', 'msg': u'订单在(%s)状态中,不能再收货'%(order.state), 'data': order.as_dict}) | 7c91f2c9068f762cb6681210f52ffe7d1a6ca259 | 1,744 |
def quadratic_form(u, Q, v, workers=1, **kwargs):
"""
Compute the quadratic form uQv, with broadcasting
Parameters
----------
u : (..., M) array
The u vectors of the quadratic form uQv
Q : (..., M, N) array
The Q matrices of the quadratic form uQv
v : (..., N) array
The v vectors of the quadratic form uQv
workers : int, optional
The number of parallel threads to use along gufunc loop dimension(s).
If set to -1, the maximum number of threads (as returned by
``multiprocessing.cpu_count()``) are used.
Returns
-------
qf : (...) array
The result of the quadratic forms
Notes
-----
Numpy broadcasting rules apply.
Implemented for types single, double, csingle and cdouble. Numpy
conversion rules apply.
This is similar to PDL inner2
Examples
--------
The result in absence of broadcasting is just as np.dot(np.dot(u,Q),v)
or np.dot(u, np.dot(Q,v))
>>> u = np.array([2., 3.])
>>> Q = np.array([[1.,1.], [0.,1.]])
>>> v = np.array([1.,2.])
>>> quadratic_form(u,Q,v)
12.0
>>> np.dot(np.dot(u,Q),v)
12.0
>>> np.dot(u, np.dot(Q,v))
12.0
"""
with _setup_gulinalg_threads(workers):
out = _impl.quadratic_form(u, Q, v, **kwargs)
return out | 6cd0abdf3d49ce38ba61ba6da9ee107663b1a8b9 | 1,745 |
def reorg(dat):
"""This function grabs the data from the dictionary of data types
(organized by ID), and combines them into the
:class:`dolfyn.ADPdata` object.
"""
outdat = apb.ADPdata()
cfg = outdat['config'] = db.config(_type='Nortek AD2CP')
cfh = cfg['filehead config'] = dat['filehead config']
cfg['model'] = (cfh['ID'].split(',')[0][5:-1])
outdat['props'] = {}
outdat['props']['inst_make'] = 'Nortek'
outdat['props']['inst_model'] = cfg['model']
outdat['props']['inst_type'] = 'ADP'
for id, tag in [(21, ''), (24, '_b5'), (26, '_ar')]:
if id == 26:
collapse_exclude = [0]
else:
collapse_exclude = []
if id not in dat:
continue
dnow = dat[id]
cfg['burst_config' + tag] = lib.headconfig_int2dict(
lib.collapse(dnow['config'], exclude=collapse_exclude,
name='config'))
outdat['mpltime' + tag] = lib.calc_time(
dnow['year'] + 1900,
dnow['month'],
dnow['day'],
dnow['hour'],
dnow['minute'],
dnow['second'],
dnow['usec100'].astype('uint32') * 100)
tmp = lib.beams_cy_int2dict(
lib.collapse(dnow['beam_config'], exclude=collapse_exclude,
name='beam_config'), 21)
cfg['ncells' + tag] = tmp['ncells']
cfg['coord_sys' + tag] = tmp['cy']
cfg['nbeams' + tag] = tmp['nbeams']
for ky in ['SerialNum', 'cell_size', 'blanking',
'nom_corr', 'data_desc',
'vel_scale', 'power_level']:
# These ones should 'collapse'
# (i.e., all values should be the same)
# So we only need that one value.
cfg[ky + tag] = lib.collapse(dnow[ky], exclude=collapse_exclude,
name=ky)
for ky in ['c_sound', 'temp', 'press',
'heading', 'pitch', 'roll',
'temp_press', 'batt_V',
'temp_mag', 'temp_clock',
'mag', 'accel',
'ambig_vel', 'xmit_energy',
'error', 'status0', 'status',
'_ensemble', 'ensemble']:
# No if statement here
outdat[ky + tag] = dnow[ky]
for ky in [
'vel', 'amp', 'corr',
'alt_dist', 'alt_quality', 'alt_status',
'ast_dist', 'ast_quality', 'ast_offset_time',
'ast_pressure',
'altraw_nsamp', 'altraw_dist', 'altraw_samp',
'echo',
'orientmat', 'angrt',
'percent_good',
'std_pitch', 'std_roll', 'std_heading', 'std_press'
]:
if ky in dnow:
outdat[ky + tag] = dnow[ky]
for grp, keys in defs._burst_group_org.items():
if grp not in outdat and \
len(set(defs._burst_group_org[grp])
.intersection(outdat.keys())):
outdat[grp] = db.TimeData()
for ky in keys:
if ky == grp and ky in outdat and \
not isinstance(outdat[grp], db.TimeData):
tmp = outdat.pop(grp)
outdat[grp] = db.TimeData()
outdat[grp][ky] = tmp
#print(ky, tmp)
if ky + tag in outdat and not \
isinstance(outdat[ky + tag], db.TimeData):
outdat[grp][ky + tag] = outdat.pop(ky + tag)
# Move 'altimeter raw' data to it's own down-sampled structure
if 26 in dat:
ard = outdat['altraw'] = db.MappedTime()
for ky in list(outdat.iter_data(include_hidden=True)):
if ky.endswith('_ar'):
grp = ky.split('.')[0]
if '.' in ky and grp not in ard:
ard[grp] = db.TimeData()
ard[ky.rstrip('_ar')] = outdat.pop(ky)
N = ard['_map_N'] = len(outdat['mpltime'])
parent_map = np.arange(N)
ard['_map'] = parent_map[np.in1d(outdat.sys.ensemble, ard.sys.ensemble)]
outdat['config']['altraw'] = db.config(_type='ALTRAW', **ard.pop('config'))
outdat.props['coord_sys'] = {'XYZ': 'inst',
'ENU': 'earth',
'BEAM': 'beam'}[cfg['coord_sys'].upper()]
tmp = lib.status2data(outdat.sys.status) # returns a dict
outdat.orient['orient_up'] = tmp['orient_up']
# 0: XUP, 1: XDOWN, 4: ZUP, 5: ZDOWN
# Heding is: 0,1: Z; 4,5: X
return outdat | 2389be25e7052016a6a710803b7b661a7eb1606c | 1,746 |
import os
def human2pickett(name: str, reduction="A", linear=True, nuclei=0):
""" Function for translating a Hamiltonian parameter to a Pickett
identifier.
An alternative way of doing this is to programmatically
generate the Pickett identifiers, and just use format string
to output the identifier.
"""
pickett_parameters = read_yaml(
os.path.expanduser("~") + "/.pyspectools/pickett_terms.yml"
)
if name is "B" and linear is True:
# Haven't thought of a clever way of doing this yet...
identifier = 100
elif name is "B" and linear is False:
identifier = 20000
else:
# Hyperfine terms
if name in ["eQq", "eQq/2"]:
identifier = str(pickett_parameters[name]).format(nuclei)
elif "D_" in name or "del" in name:
identifier = str(pickett_parameters[name][reduction])
else:
try:
identifier = pickett_parameters[name]
except KeyError:
print("Parameter name unknown!")
return identifier | 683713e660e6e65846d32c7d018024674c4732f8 | 1,747 |
def get_oauth2_service_account_keys():
"""A getter that returns the required OAuth2 service account keys.
Returns:
A tuple containing the required keys as strs.
"""
return _OAUTH2_SERVICE_ACCOUNT_KEYS | bcded81a6884dc40b9f2ccb32e8b14df450b6fd6 | 1,748 |
from pathlib import Path
from typing import Any
import sys
import toml
def read_conf_file(
toml_path: Path,
file_desc: str,
schema_type: str,
) -> Any:
"""Read TOML configuration and verify against schema."""
if not toml_path.exists():
logger.error(f'{file_desc} file "{toml_path}" does not exist')
sys.exit(1)
try:
toml_dict = toml.load(toml_path)
except TypeError:
logger.error(f'Error in {file_desc} filename "{toml_path}"')
sys.exit(1)
except toml.TomlDecodeError as e:
logger.error(f"File {toml_path} is not valid TOML:")
logger.error(e)
sys.exit(1)
if schema_type == "combine":
file_schema = COMBINE_SCHEMA
elif schema_type == "plot":
file_schema = PLOTTING_SCHEMA
else:
logger.error(f"unknown schema type {schema_type}")
sys.exit(1)
try:
validated = file_schema.validate(toml_dict)
except SchemaError as e:
logger.error(e)
sys.exit(1)
return validated | 0004c188d3bce92399a3b3e72cc0ce6225d1e4d6 | 1,749 |
import json
import os
def mocked_requests_post(*args, **kwargs):
"""Mock to replace requests.post"""
class MockResponse:
"""Mock class for KustoResponse."""
def __init__(self, json_data, status_code):
self.json_data = json_data
self.text = text_type(json_data)
self.status_code = status_code
self.headers = None
def json(self):
"""Get json data from response."""
return self.json_data
if args[0] == "https://somecluster.kusto.windows.net/v2/rest/query":
if "truncationmaxrecords" in kwargs["json"]["csl"]:
if json.loads(kwargs["json"]["properties"])["Options"]["deferpartialqueryfailures"]:
file_name = "query_partial_results_defer_is_true.json"
else:
file_name = "query_partial_results_defer_is_false.json"
elif "Deft" in kwargs["json"]["csl"]:
file_name = "deft.json"
with open(os.path.join(os.path.dirname(__file__), "input", file_name), "r") as response_file:
data = response_file.read()
return MockResponse(json.loads(data), 200)
elif args[0] == "https://somecluster.kusto.windows.net/v1/rest/mgmt":
if kwargs["json"]["csl"] == ".show version":
file_name = "versionshowcommandresult.json"
else:
file_name = "adminthenquery.json"
with open(os.path.join(os.path.dirname(__file__), "input", file_name), "r") as response_file:
data = response_file.read()
return MockResponse(json.loads(data), 200)
return MockResponse(None, 404) | 017cf98285e77b7684cb37b3d65f9dd292f505b1 | 1,750 |
def grammar_info(df, col):
"""return three separate attributes with
clean abstract, flesh score and sentence count"""
df['clean_abstract'] = clean_text(df[col])
df['flesch_score'] = df[col].apply(flesch_score)
df['sentence_count'] = sentence_count(df[col])
return df | 7606121f68434a760255cca10e75840ca058c50c | 1,751 |
import os
import re
def read_file_list(bld, file):
"""
Read and process a file list file (.waf_file) and manage duplicate files and possible globbing patterns to prepare
the list for injestion by the project
:param bld: The build context
:param file: The .waf_file file list to process
:return: The processed list file
"""
if not os.path.isfile(os.path.join(bld.path.abspath(), file)):
raise Errors.WafError("Invalid waf file list file: {}. File not found.".format(file))
def _invalid_alias_callback(alias_key):
error_message = "Invalid alias '{}' specified in {}".format(alias_key, file)
raise Errors.WafError(error_message)
def _alias_not_enabled_callback(alias_key, roles):
error_message = "3rd Party alias '{}' specified in {} is not enabled. Make sure that at least one of the " \
"following roles is enabled: [{}]".format(alias_key, file, ', '.join(roles))
raise Errors.WafError(error_message)
# Manage duplicate files and glob hits
dup_set = set()
glob_hits = 0
waf_file_node = bld.path.make_node(file)
waf_file_node_abs = waf_file_node.abspath()
base_path_abs = waf_file_node.parent.abspath()
if not os.path.exists(waf_file_node_abs):
raise Errors.WafError('Invalid WAF file list: {}'.format(waf_file_node_abs))
def _determine_vs_filter(input_rel_folder_path, input_filter_name, input_filter_pattern):
"""
Calculate the vvs filter based on the resulting relative path, the input filter name,
and the pattern used to derive the input relative path
"""
vs_filter = input_filter_name
if len(input_rel_folder_path) > 0:
# If the resulting relative path has a subfolder, the base the filter on the following conditions
if input_filter_name.lower()=='root':
# This is the root folder, use the relative folder subpath as the filter
vs_filter = input_rel_folder_path
else:
# This is a named filter, the filter will place all results under this filter
pattern_dirname = os.path.dirname(input_filter_pattern)
if len(pattern_dirname) > 0:
if input_rel_folder_path != pattern_dirname:
# Strip out the base of the filter name
vs_filter = input_filter_name + '/' + input_rel_folder_path.replace(pattern_dirname, '')
else:
vs_filter = input_filter_name
else:
vs_filter = input_filter_name + '/' + input_rel_folder_path
return vs_filter
def _process_glob_entry(glob_content, filter_name, current_uber_dict):
"""
Process a glob content from the input file list
"""
if 'pattern' not in glob_content:
raise Errors.WafError('Missing keyword "pattern" from the glob entry"')
original_pattern = glob_content.pop('pattern').replace('\\', '/')
if original_pattern.startswith('@'):
ALIAS_PATTERN = re.compile('@.*@')
alias_match = ALIAS_PATTERN.search(original_pattern)
if alias_match:
alias = alias_match.group(0)[1:-1]
pattern = original_pattern[len(alias)+2:]
if alias=='ENGINE':
search_node = bld.path
else:
search_node = bld.root.make_node(bld.ThirdPartyPath(alias))
else:
pattern = original_pattern
search_node = waf_file_node.parent
else:
pattern = original_pattern
search_node = waf_file_node.parent
while pattern.startswith('../'):
pattern = pattern[3:]
search_node = search_node.parent
glob_results = search_node.ant_glob(pattern, **glob_content)
for globbed_file in glob_results:
rel_path = globbed_file.path_from(waf_file_node.parent).replace('\\', '/')
abs_path = globbed_file.abspath().replace('\\', '/')
rel_folder_path = os.path.dirname(rel_path)
vs_filter = _determine_vs_filter(rel_folder_path, filter_name, original_pattern)
if vs_filter not in current_uber_dict:
current_uber_dict[vs_filter] = []
if abs_path in dup_set:
Logs.warn("[WARN] File '{}' specified by the pattern '{}' in waf file '{}' is a duplicate. It will be ignored"
.format(abs_path, original_pattern, waf_file_node_abs))
else:
current_uber_dict[vs_filter].append(rel_path)
dup_set.add(abs_path)
def _clear_empty_uber_dict(current_uber_dict):
"""
Perform house clean in case glob pattern overrides move all files out of a 'root' group.
"""
empty_filters = []
for filter_name, filter_contents in current_uber_dict.items():
if len(filter_contents)==0:
empty_filters.append(filter_name)
for empty_filter in empty_filters:
current_uber_dict.pop(empty_filter)
return current_uber_dict
def _process_uber_dict(uber_section, uber_dict):
"""
Process each uber dictionary value
"""
processed_uber_dict = {}
for filter_name, filter_contents in uber_dict.items():
for filter_content in filter_contents:
if isinstance(filter_content, str):
if '*' in filter_content or '?' in filter_content:
# If this is a raw glob pattern, stuff it into the expected glob dictionary
_process_glob_entry(dict(pattern=filter_content), filter_name, processed_uber_dict)
elif filter_content.startswith('@ENGINE@'):
file_path = os.path.normpath(filter_content.replace('@ENGINE@', bld.engine_path))
if not os.path.exists(file_path):
Logs.warn("[WARN] File '{}' specified in '{}' does not exist. It will be ignored"
.format(file_path, waf_file_node_abs))
else:
if filter_name not in processed_uber_dict:
processed_uber_dict[filter_name] = []
processed_uber_dict[filter_name].append(filter_content)
dup_set.add(file_path)
else:
# This is a straight up file reference.
# Do any processing on an aliased reference
if filter_content.startswith('@'):
processed_path = bld.PreprocessFilePath(filter_content, _invalid_alias_callback,
_alias_not_enabled_callback)
else:
processed_path = os.path.normpath(os.path.join(base_path_abs, filter_content))
if not os.path.exists(processed_path):
Logs.warn("[WARN] File '{}' specified in '{}' does not exist. It will be ignored"
.format(processed_path, waf_file_node_abs))
elif not os.path.isfile(processed_path):
Logs.warn("[WARN] Path '{}' specified in '{}' is a folder, only files or glob patterns are "
"allowed. It will be ignored"
.format(processed_path, waf_file_node_abs))
elif processed_path in dup_set:
Logs.warn("[WARN] File '{}' specified in '{}' is a duplicate. It will be ignored"
.format(processed_path, waf_file_node_abs))
else:
if filter_name not in processed_uber_dict:
processed_uber_dict[filter_name] = []
processed_uber_dict[filter_name].append(processed_path)
dup_set.add(processed_path)
elif isinstance(filter_content, dict):
# Dictionaries automatically go through the glob pattern working
_process_glob_entry(filter_content, filter_name, processed_uber_dict)
else:
raise Errors.WafError("Invalid entry '{}' in file '{}', section '{}/{}'"
.format(filter_content, file, uber_section, filter_name))
return _clear_empty_uber_dict(processed_uber_dict)
def _get_cached_file_list():
"""
Calculate the location of the cached waf_files path
"""
bintemp_path = os.path.join(bld.srcnode.abspath(), BINTEMP_FOLDER)
src_relative_path = file_node.path_from(bld.srcnode)
cached_waf_files_abs_path = os.path.join(bintemp_path, src_relative_path)
return cached_waf_files_abs_path
file_node = bld.path.make_node(file)
# Read the source waf_file list
source_file_list = bld.parse_json_file(file_node)
# Prepare a processed waf_file list
processed_file_list = {}
for uber_file_entry, uber_file_dict in source_file_list.items():
processed_file_list[uber_file_entry] = _process_uber_dict(uber_file_entry, uber_file_dict)
pass
return processed_file_list | 28fa2d07085a572b9e3e33ef508c82b8c4f1bf42 | 1,752 |
def landing():
"""Landing page"""
return render_template('public/index.html') | 462b8f4451008832c6883be64dc23712bc76c907 | 1,753 |
def uniform_decay(distance_array, scale):
"""
Transform a measurement array using a uniform distribution.
The output is 1 below the scale parameter and 0 above it.
Some sample values. Measurements are in multiple of ``scale``; decay value are in fractions of
the maximum value:
+---------------+---------------+
| measurement | decay value |
+===============+===============+
| 0.0 | 1.0 |
+---------------+---------------+
| 0.25 | 1.0 |
+---------------+---------------+
| 0.5 | 1.0 |
+---------------+---------------+
| 0.75 | 1.0 |
+---------------+---------------+
| 1.0 | 1.0 |
+---------------+---------------+
"""
return (distance_array <= scale).astype(np.float64) | e643e7e962d3b6e29c2c23c0aa682e77a539d04b | 1,754 |
def pid_to_service(pid):
"""
Check if a PID belongs to a systemd service and return its name.
Return None if the PID does not belong to a service.
Uses DBUS if available.
"""
if dbus:
return _pid_to_service_dbus(pid)
else:
return _pid_to_service_systemctl(pid) | 925f67611d83b3304db673e5e3d0c0a7dafd8211 | 1,755 |
def Frequencies(bands, src):
"""
Count the number of scalars in each band.
:param: bands - the bands.
:param: src - the vtkPolyData source.
:return: The frequencies of the scalars in each band.
"""
freq = dict()
for i in range(len(bands)):
freq[i] = 0;
tuples = src.GetPointData().GetScalars().GetNumberOfTuples()
for i in range(tuples):
x = src.GetPointData().GetScalars().GetTuple1(i)
for j in range(len(bands)):
if x <= bands[j][2]:
freq[j] = freq[j] + 1
break
return freq | 081e37f0d2d9d5a70266b24372d75d94d86fcbb0 | 1,756 |
from typing import Callable
import torch
from typing import Dict
def get_loss_fn(loss: str) -> Callable[..., torch.Tensor]:
"""
Get loss function as a PyTorch functional loss based on the name of the loss function.
Choices include 'cross_entropy', 'nll_loss', and 'kl_div'.
Args:
loss: a string indicating the loss function to return.
"""
loss_fn_mapping: Dict[str, Callable[..., torch.Tensor]] = {
'cross_entropy': F.cross_entropy,
'nll_loss': F.nll_loss,
'kl_div': F.kl_div,
}
try:
loss_fn: Callable[..., torch.Tensor] = loss_fn_mapping[loss]
except KeyError:
raise ValueError(f'Loss function {loss} is not supported.')
return loss_fn | ebbb20dba1b7573c615c35d683a59c9a5151b0e9 | 1,757 |
def FormatAddress(chainIDAlias: str, hrp: str, addr: bytes) -> str:
"""FormatAddress takes in a chain prefix, HRP, and byte slice to produce a string for an address."""
addr_str = FormatBech32(hrp, addr)
return f"{chainIDAlias}{addressSep}{addr_str}" | 4004e2367e13abb890d22b653b4ac849bf615d1a | 1,758 |
from typing import List
from operator import or_
async def get_journal_scopes(
db_session: Session, user_id: str, user_group_id_list: List[str], journal_id: UUID
) -> List[JournalPermissions]:
"""
Returns list of all permissions (group user belongs to and user) for provided user and journal.
"""
journal_spec = JournalSpec(id=journal_id)
await find_journal(db_session, journal_spec)
if journal_id is None:
raise JournalNotFound(
"In order to get journal permissions, journal_id must be specified"
)
query = db_session.query(JournalPermissions).filter(
JournalPermissions.journal_id == journal_id
)
if user_id is None and user_group_id_list is None:
raise InvalidParameters(
"In order to get journal permissions, at least one of user_id, or user_group_id_list must be specified"
)
query = query.filter(
or_(
JournalPermissions.holder_id == user_id,
JournalPermissions.holder_id.in_(user_group_id_list),
)
)
journal_permissions = query.all()
if not journal_permissions:
raise PermissionsNotFound(f"No permissions for journal_id={journal_id}")
return journal_permissions | 2f3fcc3cbfdc124a10ee04a716c76f7e2144e0de | 1,759 |
import re
def clean_script_title(script_title):
"""Cleans up a TV/movie title to save it as a file name.
"""
clean_title = re.sub(r'\s+', ' ', script_title).strip()
clean_title = clean_title.replace('\\', BACKSLASH)
clean_title = clean_title.replace('/', SLASH)
clean_title = clean_title.replace(':', COLON)
clean_title = clean_title.replace('*', STAR)
clean_title = clean_title.replace('<', LESS_THAN)
clean_title = clean_title.replace('>', GREATER_THAN)
clean_title = clean_title.replace('?', QUESTION_MARK)
clean_title = clean_title.replace('|', PIPE)
return clean_title | 6dcee3b05e9654e65e0f8eb78be9383d349adff2 | 1,760 |
import time
import os
import subprocess
import threading
import signal
def runCmd(cmd, timeout=42, sh=False, env=None, retry=0):
"""
Execute an external command, read the output and return it.
@param cmd (str|list of str): command to be executed
@param timeout (int): timeout in sec, after which the command is forcefully terminated
@param sh (bool): True if the command is to be run in a shell and False if directly
@param env (dict): environment variables for the new process (instead of inheriting from the current process)
@param retry (int): number of retries on command timeout
@return: (stdout, stderr, rc) (str, str, int): the output of the command
"""
trace = ""
logger = get_logger()
if isinstance(cmd, str):
log_cmd = cmd
else:
log_cmd = ' '.join(cmd)
if log_cmd.startswith("/usr/lpp/mmfs/bin/mmccr fget"): # drop temp file name
log_cmd = ' '.join(log_cmd.split()[:-1])
t_start = time.time()
try:
if env is not None:
fullenv = dict(os.environ)
fullenv.update(env)
env = fullenv
# create the subprocess, ensuring a new process group is spawned
# so we can later kill the process and all its child processes
proc = subprocess.Popen(cmd, shell=sh,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=False, env=env)
timer = threading.Timer(timeout, _stop_process, [proc, logger, log_cmd, timeout])
timer.start()
(sout, serr) = proc.communicate()
timer.cancel() # stop the timer when we got data from process
ret = proc.poll()
except OSError as e:
logger.debug(str(e))
sout = ""
serr = str(e)
ret = 127 if "No such file" in serr else 255
finally:
try:
proc.stdout.close()
proc.stderr.close()
except: #pylint: disable=bare-except
pass
t_run = time.time() - t_start
cmd_timeout = ret in (-signal.SIGTERM, -signal.SIGKILL) # 143,137
if ret == -6 and retry >= 0 : # special handling for sigAbrt
logger.warning("retry abrt %s with subprocess %s", cmd, s32)
(sout, serr, ret) = runCmd(cmd, timeout, sh, env, -1)
if cmd_timeout and retry > 0:
retry -= 1
logger.warning("Retry command %s counter: %s", cmd, retry)
(sout, serr, ret) = runCmd(cmd, timeout, sh, env, retry)
elif cmd_timeout:
serr = CMD_TIMEDOUT
logger.warning("runCMD: %s Timeout:%d ret:%s", cmd, timeout, ret)
elif trace:
logger.debug("runCMD: %s :(%d) ret:%s \n%s \n%s", cmd, timeout, ret, serr, sout)
return (sout, serr, ret) | d7a7054589af1f9cf30a2048eecacc4bc9d4044b | 1,761 |
def _calc_cumsum_matrix_jit(X, w_list, p_ar, open_begin):
"""Fast implementation by numba.jit."""
len_x, len_y = X.shape
# cumsum matrix
D = np.ones((len_x, len_y), dtype=np.float64) * np.inf
if open_begin:
X = np.vstack((np.zeros((1, X.shape[1])), X))
D = np.vstack((np.zeros((1, D.shape[1])), D))
w_list[:, 0] += 1
# number of patterns
num_pattern = p_ar.shape[0]
# max pattern length
max_pattern_len = p_ar.shape[1]
# pattern cost
pattern_cost = np.zeros(num_pattern, dtype=np.float64)
# step cost
step_cost = np.zeros(max_pattern_len, dtype=np.float64)
# number of cells
num_cells = w_list.shape[0]
for cell_idx in range(num_cells):
i = w_list[cell_idx, 0]
j = w_list[cell_idx, 1]
if i == j == 0:
D[i, j] = X[0, 0]
continue
for pidx in range(num_pattern):
# calculate local cost for each pattern
for sidx in range(1, max_pattern_len):
# calculate step cost of pair-wise cost matrix
pattern_index = p_ar[pidx, sidx, 0:2]
ii = int(i + pattern_index[0])
jj = int(j + pattern_index[1])
if ii < 0 or jj < 0:
step_cost[sidx] = np.inf
continue
else:
step_cost[sidx] = X[ii, jj] \
* p_ar[pidx, sidx, 2]
pattern_index = p_ar[pidx, 0, 0:2]
ii = int(i + pattern_index[0])
jj = int(j + pattern_index[1])
if ii < 0 or jj < 0:
pattern_cost[pidx] = np.inf
continue
pattern_cost[pidx] = D[ii, jj] \
+ step_cost.sum()
min_cost = pattern_cost.min()
if min_cost != np.inf:
D[i, j] = min_cost
return D | a282f68ca5789c97582f9535b5a255066bba44d9 | 1,762 |
def create_field_texture_coordinates(fieldmodule: Fieldmodule, name="texture coordinates", components_count=3,
managed=False) -> FieldFiniteElement:
"""
Create texture coordinates finite element field of supplied name with
number of components 1, 2, or 3 and the components named "u", "v" and "w" if used.
New field is not managed by default.
"""
return create_field_finite_element(fieldmodule, name, components_count,
component_names=("u", "v", "w"), managed=managed, type_coordinate=True) | e19e964e0828006beae3c9e71f30fb0c846de1de | 1,763 |
import uuid
def get_cert_sha1_by_openssl(certraw: str) -> str:
"""calc the sha1 of a certificate, return openssl result str"""
res: str = None
tmpname = None
try:
tmpname = tmppath / f"{uuid.uuid1()}.crt"
while tmpname.exists():
tmpname = tmppath / f"{uuid.uuid1()}.crt"
tmpname.write_text(certraw, encoding="utf-8")
cmd = f"openssl x509 -in {tmpname} -fingerprint -noout -sha1"
res = exec_openssl(cmd)
except Exception as ex:
raise Exception(f"Parse ssl data error, err:{ex}")
finally:
if tmpname is not None:
tmpname.unlink()
return res | 4b92531473e8488a87d14c8ecc8c88d4d0adef0d | 1,764 |
import os
import glob
def get_files(root_path, extension='*.*'):
"""
- root_path: Path raiz a partir de onde serão realizadas a busca
- extension: Extensão de arquivo usado para filtrar o retorno
- retorna: Retorna todos os arquivos recursivamente a partir de um path raiz
"""
return [y for x in os.walk(root_path) for y in glob(os.path.join(x[0], extension))] | a5ed8a24eb20ad86fe1c4dba2a9028bd639f4f57 | 1,765 |
from typing import Union
def get_dderivative_skewness(uni_ts: Union[pd.Series, np.ndarray], step_size: int = 1) -> np.float64:
"""
:return: The skewness of the difference derivative of univariate time series within the
function we use step_size to find derivative (default value of step_size is 1).
"""
return get_skewness(_difference_derivative(uni_ts, step_size)) | 11688b0cbd5dde2539cc3d5cfa8c5dccb9432f55 | 1,766 |
def extract_query(e: Event, f, woi, data):
"""
create a query array from the the event
:param data:
:param e:
:param doi:
"""
assert woi[0] > 0 and woi[1] > 0
e_start_index = resolve_esi(e, data)
st = int(e_start_index - woi[0] * f)
ed = int(e_start_index + woi[0] * f)
return Event(e.name, e.startT - woi[0], e.endT + woi[1], data[st:ed]), st, ed | 7109e28a265ff49054036e4e4c16ace0fc5eebda | 1,767 |
import ctypes
def getForegroundClassNameUnicode(hwnd=None):
"""
Returns a unicode string containing the class name of the specified
application window.
If hwnd parameter is None, frontmost window will be queried.
"""
if hwnd is None:
hwnd = win32gui.GetForegroundWindow()
# Maximum number of chars we'll accept for the class name; the
# rest will be truncated if it's longer than this.
MAX_LENGTH = 1024
classNameBuf = ctypes.create_unicode_buffer( MAX_LENGTH )
retval = ctypes.windll.User32.GetClassNameW(
hwnd,
classNameBuf,
len( classNameBuf )
)
if retval == 0:
raise ctypes.WinError()
return classNameBuf.value | 82147d3da4c9374078bbeba64ef6968982dc2550 | 1,768 |
def read_mapping_from_csv(bind):
"""
Calls read_csv() and parses the loaded array into a dictionary. The dictionary is defined as follows:
{
"teams": {
*team-name*: {
"ldap": []
},
....
},
"folders: {
*folder-id*: {
"name": *folder-name*,
"permissions": [
{
"teamId": *team-name*,
"permission0: *permission*"
},
....
]
},
...
}
:return: The csv's contents parsed into a dictionary as described above.
"""
result = {"teams": {}, "folders": {}}
csv_content = read_csv(bind)
is_header = True
for line in csv_content:
if not is_header:
ldap = line[0]
team = line[1]
folder_name = line[3]
folder_uuid = line[4]
permission = line[5]
if not team in result["teams"]:
result["teams"][team] = {"ldap": []}
if not ldap in result["teams"][team]["ldap"]:
result["teams"][team]["ldap"].append(ldap)
if not folder_uuid in result["folders"]:
result["folders"][folder_uuid] = {"name": folder_name, "permissions": []}
access = {"teamId": team, "permission": permission}
if not access in result["folders"][folder_uuid]["permissions"]:
result["folders"][folder_uuid]["permissions"].append(access)
else:
is_header = False
return result | 8ffe1b5f489bb3428cb0b2dd3cc7f9eafe9ecf27 | 1,769 |
from typing import Sequence
from typing import Tuple
def primal_update(
agent_id: int,
A: np.ndarray,
W: np.ndarray,
x: np.ndarray,
z: np.ndarray,
lam: np.ndarray,
prev_x: np.ndarray,
prev_z: np.ndarray,
objective_grad: np.ndarray,
feasible_set: CCS,
alpha: float,
tau: float,
nu: float,
others_agent_id: Sequence[int],
others_lam: Sequence[np.ndarray],
) -> Tuple[np.ndarray, np.ndarray]:
""" """
x = feasible_set.projection(
x + alpha * (x - prev_x) - tau * objective_grad - np.matmul(A.T, lam)
)
z = (
z
+ alpha * (z - prev_z)
+ nu
* sum(
[
W[agent_id, oai] * (lam - ol)
for oai, ol in zip(others_agent_id, others_lam)
]
)
)
return x, z | 6a13bb9147b74c3803482f53273ebc831ca1662b | 1,770 |
def norm_cmap(values, cmap, normalize, cm, mn, mx):
""" Normalize and set colormap
Parameters
----------
values
Series or array to be normalized
cmap
matplotlib Colormap
normalize
matplotlib.colors.Normalize
cm
matplotlib.cm
Returns
-------
n_cmap
mapping of normalized values to colormap (cmap)
"""
if (mn is None) and (mx is None):
mn, mx = min(values), max(values)
norm = normalize(vmin=mn, vmax=mx)
n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)
return n_cmap, norm | 25515df37fe6b7060acf681287156af2c58d4c03 | 1,771 |
def _cpx(odss_tuple, nterm, ncond):
"""
This function transforms the raw data for electric parameters (voltage, current...) in a suitable complex array
:param odss_tuple: tuple of nphases*2 floats (returned by odsswr as couples of real, imag components, for each phase
of each terminal)
:type odss_tuple: tuple or list
:param nterm: number of terminals of the underlying electric object
:type nterm: int
:param ncond: number of conductors per terminal of the underlying electric object
:type ncond: int
:returns: a [nterm x ncond] numpy array of complex floats
:rtype: numpy.ndarray
"""
assert len(odss_tuple) == nterm * ncond * 2
cpxr = np.zeros([nterm, ncond], 'complex')
def pairwise(iterable):
# "s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return zip(a, a)
for idx, couple in enumerate(pairwise(odss_tuple)):
real = couple[0]
imag = couple[1]
cpxr[int(idx / ncond), (idx % ncond)] = np.sum([np.multiply(1j, imag), real], axis=0)
cpxr[int(idx / ncond), (idx % ncond)] = np.sum([np.multiply(1j, imag), real], axis=0)
return cpxr | f5931915550bb7ec9e713689c3d79997973eb252 | 1,772 |
def get_l2_distance_arad(X1, X2, Z1, Z2, \
width=0.2, cut_distance=6.0, r_width=1.0, c_width=0.5):
""" Calculates the Gaussian distance matrix D for atomic ARAD for two
sets of molecules
K is calculated using an OpenMP parallel Fortran routine.
Arguments:
==============
X1 -- np.array of ARAD descriptors for molecules in set 1.
X2 -- np.array of ARAD descriptors for molecules in set 2.
Z1 -- List of lists of nuclear charges for molecules in set 1.
Z2 -- List of lists of nuclear charges for molecules in set 2.
Keyword arguments:
width --
cut_distance --
r_width --
c_width --
Returns:
==============
D -- The distance matrices for each sigma (4D-array, Nmol1 x Nmol2 x Natom1 x Natoms2)
"""
amax = X1.shape[1]
assert X1.shape[3] == amax, "ERROR: Check ARAD decriptor sizes! code = 1"
assert X2.shape[1] == amax, "ERROR: Check ARAD decriptor sizes! code = 2"
assert X2.shape[3] == amax, "ERROR: Check ARAD decriptor sizes! code = 3"
nm1 = len(Z1)
nm2 = len(Z2)
assert X1.shape[0] == nm1, "ERROR: Check ARAD decriptor sizes! code = 4"
assert X2.shape[0] == nm2, "ERROR: Check ARAD decriptor sizes! code = 5"
N1 = []
for Z in Z1:
N1.append(len(Z))
N2 = []
for Z in Z2:
N2.append(len(Z))
N1 = np.array(N1,dtype=np.int32)
N2 = np.array(N2,dtype=np.int32)
c1 = []
for charges in Z1:
c1.append(np.array([PTP[int(q)] for q in charges], dtype=np.int32))
Z1_arad = np.zeros((nm1,amax,2))
for i in range(nm1):
for j, z in enumerate(c1[i]):
Z1_arad[i,j] = z
c2 = []
for charges in Z2:
c2.append(np.array([PTP[int(q)] for q in charges], dtype=np.int32))
Z2_arad = np.zeros((nm2,amax,2))
for i in range(nm2):
for j, z in enumerate(c2[i]):
Z2_arad[i,j] = z
return atomic_arad_l2_distance_all(X1, X2, Z1_arad, Z2_arad, N1, N2, \
nm1, nm2, width, cut_distance, r_width, c_width, amax) | 77a4656a6f0014453991b8619ea4c53c6eec2c78 | 1,773 |
def _swap_endian(val, length):
"""
Swap the endianness of a number
"""
if length <= 8:
return val
if length <= 16:
return (val & 0xFF00) >> 8 | (val & 0xFF) << 8
if length <= 32:
return ((val & 0xFF000000) >> 24 |
(val & 0x00FF0000) >> 8 |
(val & 0x0000FF00) << 8 |
(val & 0x000000FF) << 24)
raise Exception('Cannot swap endianness for length ' + length) | 4b3b879ad04e43e9454b904ba65420a8d477b629 | 1,774 |
def get_analysis(output, topology, traj):
"""
Calls analysis fixture with the right arguments depending on the trajectory type.
Parameters
-----------
output : str
Path to simulation 'output' folder.
topology : str
Path to the topology file.
traj : str
Trajectory type: xtc or pdb.
"""
traj = traj if traj else "pdb"
trajectory = f"trajectory.{traj}"
analysis = Analysis(
resname="LIG",
chain="Z",
simulation_output=output,
skip_initial_structures=False,
topology=topology,
water_ids_to_track=[("A", 2109), ("A", 2124)],
traj=trajectory,
)
return analysis | 0382f4e672aba3ab754de7d26d27c7921239951f | 1,775 |
def get_callback_class(module_name, subtype):
""" Can return None. If no class implementation exists for the given subtype, the module is
searched for a BASE_CALLBACKS_CLASS implemention which is used if found. """
module = _get_module_from_name(module_name)
if subtype is None:
return _get_callback_base_class(module)
try:
return getattr(module, subtype + CALLBACK_PREFIX)
# If the callback implementation for this subtype doesn't exist,
# attempt to load the BASE_CALLBACKS_CLASS class.
except AttributeError:
return _get_callback_base_class(module) | cf04ddcc28c43b82db44d8be96419efbc166330f | 1,776 |
def index():
"""Toon de metingen"""
return render_template('index.html', metingen=Meting.query.all()) | 3d92b912c0af513b6d20a094799f7dfb60220a75 | 1,777 |
def about(template):
"""
Attach a template to a step which can be used to generate
documentation about the step.
"""
def decorator(step_function):
step_function._about_template = template
return step_function
return decorator | 7c00256e39481247857b34dcd5b7783a39b0a8bd | 1,778 |
import torch
def _extend_batch_dim(t: torch.Tensor, new_batch_dim: int) -> torch.Tensor:
"""
Given a tensor `t` of shape [B x D1 x D2 x ...] we output the same tensor repeated
along the batch dimension ([new_batch_dim x D1 x D2 x ...]).
"""
num_non_batch_dims = len(t.shape[1:])
repeat_shape = (new_batch_dim, *(1 for _ in range(num_non_batch_dims)))
return t.repeat(repeat_shape) | 7ee1d0930f843a9d31bcc4934d675109f3b2df9b | 1,779 |
import os
import csv
import random
def read_GTSRB_train(directory, shuffle = True):
"""
Read the training portion of GTSRB database.
Each class has an own index file.
"""
print('Reading trainset index...')
entries = []
for class_id in range(num_classes):
# each class is in a separate folder
print('\r%i%%'%(int((class_id/num_classes) * 100)), end='')
class_str = str(class_id).zfill(5)
class_directory = os.path.join(directory, class_str)
# each class has an own indes file
index_filename = os.path.join(class_directory, 'GT-%s.csv'%class_str)
index = csv.DictReader(open(index_filename, 'r'), delimiter=';')
for line in index:
filename = os.path.join(class_directory, line['Filename'])
x1 = int(line['Roi.X1'])
y1 = int(line['Roi.Y1'])
x2 = int(line['Roi.X2'])
y2 = int(line['Roi.Y2'])
# there is no need to use the class_id from the csv file
# we can be sure that it corresponds to the folder
entries.append(DatasetEntry(filename, x1, y1, x2, y2, class_id))
print('\rdone')
if shuffle: random.shuffle(entries)
return entries | 278813bf2aabc1f721f23d5ee466fb1444b8ca84 | 1,780 |
def get_client(config):
"""
get_client returns a feature client configured using data found in the
settings of the current application.
"""
storage = _features_from_settings(config.registry.settings)
return Client(storage) | 650f0d294514a4d13afd9ab010d6d4bdd4045c43 | 1,781 |
import copy
from ibeis.init import filter_annots
from ibeis.expt import experiment_helpers
import six
def print_results(ibs, testres):
"""
Prints results from an experiment harness run.
Rows store different qaids (query annotation ids)
Cols store different configurations (algorithm parameters)
Args:
ibs (IBEISController): ibeis controller object
testres (test_result.TestResult):
CommandLine:
python dev.py -e print --db PZ_MTEST -a default:dpername=1,qpername=[1,2] -t default:fg_on=False
python dev.py -e print -t best --db seals2 --allgt --vz
python dev.py -e print --db PZ_MTEST --allgt -t custom --print-confusion-stats
python dev.py -e print --db PZ_MTEST --allgt --noqcache --index 0:10:2 -t custom:rrvsone_on=True --print-confusion-stats
python dev.py -e print --db PZ_MTEST --allgt --noqcache --qaid4 -t custom:rrvsone_on=True --print-confusion-stats
python -m ibeis --tf print_results -t default --db PZ_MTEST -a ctrl
python -m ibeis --tf print_results -t default --db PZ_MTEST -a ctrl
python -m ibeis --tf print_results --db PZ_MTEST -a default -t default:lnbnn_on=True default:lnbnn_on=False,bar_l2_on=True default:lnbnn_on=False,normonly_on=True
CommandLine:
python -m ibeis.expt.experiment_printres --test-print_results
utprof.py -m ibeis.expt.experiment_printres --test-print_results
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.expt.experiment_printres import * # NOQA
>>> from ibeis.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts(
>>> 'PZ_MTEST', a='default:dpername=1,qpername=[1,2]', t='default:fg_on=False')
>>> result = print_results(ibs, testres)
>>> print(result)
"""
(cfg_list, cfgx2_cfgresinfo, testnameid, cfgx2_lbl, cfgx2_qreq_) = ut.dict_take(
testres.__dict__, ['cfg_list', 'cfgx2_cfgresinfo', 'testnameid', 'cfgx2_lbl', 'cfgx2_qreq_'])
# cfgx2_cfgresinfo is a list of dicts of lists
# Parse result info out of the lists
cfgx2_nextbestranks = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_next_bestranks')
cfgx2_gt_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gt_raw_score')
cfgx2_gf_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gf_raw_score')
#cfgx2_aveprecs = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_avepercision')
cfgx2_scorediffs = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_scorediff')
#cfgx2_gt_raw_score = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gt_raw_score')
column_lbls = [ut.remove_chars(ut.remove_vowels(lbl), [' ', ','])
for lbl in cfgx2_lbl]
scorediffs_mat = np.array(ut.replace_nones(cfgx2_scorediffs, np.nan))
print(' --- PRINT RESULTS ---')
print(' use --rank-lt-list=1,5 to specify X_LIST')
if True:
# Num of ranks less than to score
X_LIST = testres.get_X_LIST()
#X_LIST = [1, 5]
#nConfig = len(cfg_list)
#nQuery = len(testres.qaids)
cfgx2_nQuery = list(map(len, testres.cfgx2_qaids))
#cfgx2_qx2_ranks = testres.get_infoprop_list('qx2_bestranks')
#--------------------
# A positive scorediff indicates the groundtruth was better than the
# groundfalse scores
istrue_list = [scorediff > 0 for scorediff in scorediffs_mat]
isfalse_list = [~istrue for istrue in istrue_list]
#------------
# Build Colscore
nLessX_dict = testres.get_nLessX_dict()
#------------
best_rankscore_summary = []
#to_intersect_list = []
# print each configs scores less than X=thresh
for X, cfgx2_nLessX in six.iteritems(nLessX_dict):
max_nLessX = cfgx2_nLessX.max()
bestX_cfgx_list = np.where(cfgx2_nLessX == max_nLessX)[0]
best_rankscore = '[cfg*] %d cfg(s) scored ' % len(bestX_cfgx_list)
# FIXME
best_rankscore += rankscore_str(X, max_nLessX, cfgx2_nQuery[bestX_cfgx_list[0]])
best_rankscore_summary += [best_rankscore]
#to_intersect_list.append(ut.take(cfgx2_lbl, max_nLessX))
#intersected = to_intersect_list[0] if len(to_intersect_list) > 0 else []
#for ix in range(1, len(to_intersect_list)):
# intersected = np.intersect1d(intersected, to_intersect_list[ix])
#if False:
# #gt_raw_score_mat = np.vstack(cfgx2_gt_raw_score).T
# #rank_mat = testres.get_rank_mat()
# #------------
# # Build row lbls
# if False:
# qx2_lbl = np.array([
# 'qx=%d) q%s ' % (qx, ibsfuncs.aidstr(testres.qaids[qx], ibs=ibs, notes=True))
# for qx in range(nQuery)])
# #------------
# # Build Colscore and hard cases
# if False:
# qx2_min_rank = []
# qx2_argmin_rank = []
# new_hard_qaids = []
# new_hardtup_list = []
# for qx in range(nQuery):
# ranks = rank_mat[qx]
# valid_ranks = ranks[ranks >= 0]
# min_rank = ranks.min() if len(valid_ranks) > 0 else -3
# bestCFG_X = np.where(ranks == min_rank)[0]
# qx2_min_rank.append(min_rank)
# # Find the best rank over all configurations
# qx2_argmin_rank.append(bestCFG_X)
#@ut.memoize
#def get_new_hard_qx_list(testres):
# """ Mark any query as hard if it didnt get everything correct """
# rank_mat = testres.get_rank_mat()
# is_new_hard_list = rank_mat.max(axis=1) > 0
# new_hard_qx_list = np.where(is_new_hard_list)[0]
# return new_hard_qx_list
# new_hard_qx_list = testres.get_new_hard_qx_list()
# for qx in new_hard_qx_list:
# # New list is in aid format instead of cx format
# # because you should be copying and pasting it
# notes = ' ranks = ' + str(rank_mat[qx])
# qaid = testres.qaids[qx]
# name = ibs.get_annot_names(qaid)
# new_hardtup_list += [(qaid, name + " - " + notes)]
# new_hard_qaids += [qaid]
@ut.argv_flag_dec
def intersect_hack():
failed = testres.rank_mat > 0
colx2_failed = [np.nonzero(failed_col)[0] for failed_col in failed.T]
#failed_col2_only = np.setdiff1d(colx2_failed[1], colx2_failed[0])
#failed_col2_only_aids = ut.take(testres.qaids, failed_col2_only)
failed_col1_only = np.setdiff1d(colx2_failed[0], colx2_failed[1])
failed_col1_only_aids = ut.take(testres.qaids, failed_col1_only)
gt_aids1 = ibs.get_annot_groundtruth(failed_col1_only_aids, daid_list=testres.cfgx2_qreq_[0].daids)
gt_aids2 = ibs.get_annot_groundtruth(failed_col1_only_aids, daid_list=testres.cfgx2_qreq_[1].daids)
qaids_expt = failed_col1_only_aids
gt_avl_aids1 = ut.flatten(gt_aids1)
gt_avl_aids2 = list(set(ut.flatten(gt_aids2)).difference(gt_avl_aids1))
ibs.print_annotconfig_stats(qaids_expt, gt_avl_aids1)
ibs.print_annotconfig_stats(qaids_expt, gt_avl_aids2)
#jsontext = ut.to_json({
# 'qaids': list(qaids_expt),
# 'dinclude_aids1': list(gt_aids_expt1),
# 'dinclude_aids2': list(gt_aids_expt2),
#})
#annotation_configs.varysize_pzm
#from ibeis.expt import annotation_configs
acfg = testres.acfg_list[0]
acfg1 = copy.deepcopy(acfg)
acfg2 = copy.deepcopy(acfg)
acfg1['qcfg']['min_pername'] = None
acfg2['qcfg']['min_pername'] = None
acfg1['dcfg']['min_pername'] = None
acfg2['dcfg']['min_gt_per_name'] = None
acfg1['qcfg']['default_aids'] = qaids_expt
acfg1['dcfg']['gt_avl_aids'] = gt_avl_aids1
acfg2['qcfg']['default_aids'] = qaids_expt
acfg2['dcfg']['gt_avl_aids'] = gt_avl_aids2
annots1 = filter_annots.expand_acfgs(ibs, acfg1, verbose=True)
annots2 = filter_annots.expand_acfgs(ibs, acfg2, verbose=True)
acfg_name_list = dict( # NOQA
acfg_list=[acfg1, acfg2],
expanded_aids_list=[annots1, annots2],
)
test_cfg_name_list = ['candidacy_k']
cfgdict_list, pipecfg_list = experiment_helpers.get_pipecfg_list(test_cfg_name_list, ibs=ibs)
t1, t2 = testres_list # NOQA
#ut.embed()
#intersect_hack()
#@ut.argv_flag_dec
#def print_rowlbl():
# print('=====================')
# print('[harn] Row/Query Labels: %s' % testnameid)
# print('=====================')
# print('[harn] queries:\n%s' % '\n'.join(qx2_lbl))
#print_rowlbl()
#------------
@ut.argv_flag_dec
def print_collbl():
print('=====================')
print('[harn] Col/Config Labels: %s' % testnameid)
print('=====================')
enum_cfgx2_lbl = ['%2d) %s' % (count, cfglbl)
for count, cfglbl in enumerate(cfgx2_lbl)]
print('[harn] cfglbl:\n%s' % '\n'.join(enum_cfgx2_lbl))
print_collbl()
#------------
@ut.argv_flag_dec
def print_cfgstr():
print('=====================')
print('[harn] Config Strings: %s' % testnameid)
print('=====================')
cfgstr_list = [query_cfg.get_cfgstr() for query_cfg in cfg_list]
enum_cfgstr_list = ['%2d) %s' % (count, cfgstr)
for count, cfgstr in enumerate(cfgstr_list)]
print('\n[harn] cfgstr:\n%s' % '\n'.join(enum_cfgstr_list))
print_cfgstr()
#------------
#@ut.argv_flag_dec
#def print_rowscore():
# print('=======================')
# print('[harn] Scores per Query: %s' % testnameid)
# print('=======================')
# for qx in range(nQuery):
# bestCFG_X = qx2_argmin_rank[qx]
# min_rank = qx2_min_rank[qx]
# minimizing_cfg_str = ut.indentjoin(cfgx2_lbl[bestCFG_X], '\n * ')
# #minimizing_cfg_str = str(bestCFG_X)
# print('-------')
# print(qx2_lbl[qx])
# print(' best_rank = %d ' % min_rank)
# if len(cfgx2_lbl) != 1:
# print(' minimizing_cfg_x\'s = %s ' % minimizing_cfg_str)
#print_rowscore()
#------------
#@ut.argv_flag_dec
#def print_row_ave_precision():
# print('=======================')
# print('[harn] Scores per Query: %s' % testnameid)
# print('=======================')
# for qx in range(nQuery):
# aveprecs = ', '.join(['%.2f' % (aveprecs[qx],) for aveprecs in cfgx2_aveprecs])
# print('-------')
# print(qx2_lbl[qx])
# print(' aveprecs = %s ' % aveprecs)
#print_row_ave_precision()
##------------
#@ut.argv_flag_dec
#def print_hardcase():
# print('--- hard new_hardtup_list (w.r.t these configs): %s' % testnameid)
# print('\n'.join(map(repr, new_hardtup_list)))
# print('There are %d hard cases ' % len(new_hardtup_list))
# aid_list = [aid_notes[0] for aid_notes in new_hardtup_list]
# name_list = ibs.get_annot_names(aid_list)
# name_set = set(name_list)
# print(sorted(aid_list))
# print('Names: %r' % (name_set,))
#print_hardcase()
#default=not ut.get_argflag('--allhard'))
#------------
#@ut.argv_flag_dec
#def echo_hardcase():
# print('--- hardcase commandline: %s' % testnameid)
# # Show index for current query where hardids reside
# #print('--index ' + (' '.join(map(str, new_hard_qx_list))))
# #print('--take new_hard_qx_list')
# #hardaids_str = ' '.join(map(str, [' ', '--qaid'] + new_hard_qaids))
# hardaids_str = ' '.join(map(str, [' ', '--set-aids-as-hard'] + new_hard_qaids))
# print(hardaids_str)
##echo_hardcase(default=not ut.get_argflag('--allhard'))
#echo_hardcase()
#@ut.argv_flag_dec
#def print_bestcfg():
# print('==========================')
# print('[harn] Best Configurations: %s' % testnameid)
# print('==========================')
# # print each configs scores less than X=thresh
# for X, cfgx2_nLessX in six.iteritems(nLessX_dict):
# max_LessX = cfgx2_nLessX.max()
# bestCFG_X = np.where(cfgx2_nLessX == max_LessX)[0]
# best_rankscore = '[cfg*] %d cfg(s) scored ' % len(bestCFG_X)
# best_rankscore += rankscore_str(X, max_LessX, nQuery)
# cfglbl_list = cfgx2_lbl[bestCFG_X]
# best_rankcfg = format_cfgstr_list(cfglbl_list)
# #indent('\n'.join(cfgstr_list), ' ')
# print(best_rankscore)
# print(best_rankcfg)
# print('[cfg*] %d cfg(s) are the best of %d total cfgs' % (len(intersected), nConfig))
# print(format_cfgstr_list(intersected))
#print_bestcfg()
#------------
#@ut.argv_flag_dec
#def print_gtscore():
# # Prints best ranks
# print('gtscore_mat: %s' % testnameid)
# print(' nRows=%r, nCols=%r' % (nQuery, nConfig))
# header = (' labled rank matrix: rows=queries, cols=cfgs:')
# print('\n'.join(cfgx2_lbl))
# column_list = gt_raw_score_mat.T
# print(ut.make_csv_table(column_list, row_lbls=testres.qaids,
# column_lbls=column_lbls, header=header,
# transpose=False,
# use_lbl_width=len(cfgx2_lbl) < 5))
#print_gtscore()
#------------
#@ut.argv_flag_dec
#def print_best_rankmat():
# # Prints best ranks
# print('-------------')
# print('RankMat: %s' % testnameid)
# print(' nRows=%r, nCols=%r' % (nQuery, nConfig))
# header = (' labled rank matrix: rows=queries, cols=cfgs:')
# print('\n'.join(cfgx2_lbl))
# column_list = rank_mat.T
# print(ut.make_csv_table(column_list, row_lbls=testres.qaids,
# column_lbls=column_lbls, header=header,
# transpose=False,
# use_lbl_width=len(cfgx2_lbl) < 5))
#print_best_rankmat()
#@ut.argv_flag_dec
#def print_diffmat():
# # score differences over configs
# print('-------------')
# print('Diffmat: %s' % testnameid)
# diff_matstr = get_diffmat_str(rank_mat, testres.qaids, nConfig)
# print(diff_matstr)
#print_diffmat()
#@ut.argv_flag_dec
#def print_rankhist_time():
# print('A rank histogram is a dictionary. '
# 'The keys denote the range of the ranks that the values fall in')
# # TODO: rectify this code with other hist code
# config_gt_aids = ut.get_list_column(testres.cfgx2_cfgresinfo, 'qx2_gt_aid')
# config_rand_bin_qxs = testres.get_rank_histogram_qx_binxs()
# _iter = enumerate(zip(rank_mat.T, agg_hist_dict, config_gt_aids, config_rand_bin_qxs))
# for cfgx, (ranks, agg_hist_dict, qx2_gt_aid, config_binxs) in _iter:
# #full_cfgstr = testres.cfgx2_qreq_[cfgx].get_full_cfgstr()
# #ut.print_dict(ut.dict_hist(ranks), 'rank histogram', sorted_=True)
# # find the qxs that belong to each bin
# aid_list1 = testres.qaids
# aid_list2 = qx2_gt_aid
# ibs.assert_valid_aids(aid_list1)
# ibs.assert_valid_aids(aid_list2)
# timedelta_list = ibs.get_annot_pair_timdelta(aid_list1, aid_list2)
# #timedelta_str_list = [ut.get_posix_timedelta_str2(delta)
# # for delta in timedelta_list]
# bin_edges = testres.get_rank_histogram_bin_edges()
# timedelta_groups = ut.dict_take(ut.group_items(timedelta_list, config_binxs), np.arange(len(bin_edges)), [])
# timedelta_stats = [ut.get_stats(deltas, use_nan=True, datacast=ut.get_posix_timedelta_str2) for deltas in timedelta_groups]
# print('Time statistics for each rank range:')
# print(ut.dict_str(dict(zip(bin_edges, timedelta_stats)), sorted_=True))
#print_rankhist_time()
#@ut.argv_flag_dec
#def print_rankhist():
# print('A rank histogram is a dictionary. '
# 'The keys denote the range of the ranks that the values fall in')
# # TODO: rectify this code with other hist code
# config_gt_aids = ut.get_list_column(testres.cfgx2_cfgresinfo, 'qx2_gt_aid')
# config_rand_bin_qxs = testres.get_rank_histogram_qx_binxs()
# _iter = enumerate(zip(rank_mat.T, agg_hist_dict, config_gt_aids, config_rand_bin_qxs))
# for cfgx, (ranks, agg_hist_dict, qx2_gt_aid, config_binxs) in _iter:
# print('Frequency of rank ranges:')
# ut.print_dict(agg_hist_dict, 'agg rank histogram', sorted_=True)
#print_rankhist()
#------------
# Print summary
#print(' --- SUMMARY ---')
#------------
#@ut.argv_flag_dec
#def print_colmap():
# print('==================')
# print('[harn] mAP per Config: %s (sorted by mAP)' % testnameid)
# print('==================')
# cfgx2_mAP = np.array([aveprec_list.mean() for aveprec_list in cfgx2_aveprecs])
# sortx = cfgx2_mAP.argsort()
# for cfgx in sortx:
# print('[mAP] cfgx=%r) mAP=%.3f -- %s' % (cfgx, cfgx2_mAP[cfgx], cfgx2_lbl[cfgx]))
# #print('L___ Scores per Config ___')
#print_colmap()
#------------
@ut.argv_flag_dec_true
def print_colscore():
print('==================')
print('[harn] Scores per Config: %s' % testnameid)
print('==================')
#for cfgx in range(nConfig):
# print('[score] %s' % (cfgx2_lbl[cfgx]))
# for X in X_LIST:
# nLessX_ = nLessX_dict[int(X)][cfgx]
# print(' ' + rankscore_str(X, nLessX_, nQuery))
print('\n[harn] ... sorted scores')
for X in X_LIST:
print('\n[harn] Sorted #ranks < %r scores' % (X))
sortx = np.array(nLessX_dict[int(X)]).argsort()
#frac_list = (nLessX_dict[int(X)] / cfgx2_nQuery)[:, None]
#print('cfgx2_nQuery = %r' % (cfgx2_nQuery,))
#print('frac_list = %r' % (frac_list,))
#print('Pairwise Difference: ' + str(ut.safe_pdist(frac_list, metric=ut.absdiff)))
for cfgx in sortx:
nLessX_ = nLessX_dict[int(X)][cfgx]
rankstr = rankscore_str(X, nLessX_, cfgx2_nQuery[cfgx], withlbl=False)
print('[score] %s --- %s' % (rankstr, cfgx2_lbl[cfgx]))
print_colscore()
#------------
ut.argv_flag_dec(print_latexsum)(ibs, testres)
@ut.argv_flag_dec
def print_next_rankmat():
# Prints nextbest ranks
print('-------------')
print('NextRankMat: %s' % testnameid)
header = (' top false rank matrix: rows=queries, cols=cfgs:')
print('\n'.join(cfgx2_lbl))
column_list = cfgx2_nextbestranks
print(ut.make_csv_table(column_list, row_lbls=testres.qaids,
column_lbls=column_lbls, header=header,
transpose=False,
use_lbl_width=len(cfgx2_lbl) < 5))
print_next_rankmat()
#------------
@ut.argv_flag_dec
def print_scorediff_mat():
# Prints nextbest ranks
print('-------------')
print('ScoreDiffMat: %s' % testnameid)
header = (' score difference between top true and top false: rows=queries, cols=cfgs:')
print('\n'.join(cfgx2_lbl))
column_list = cfgx2_scorediffs
column_type = [float] * len(column_list)
print(ut.make_csv_table(column_list, row_lbls=testres.qaids,
column_lbls=column_lbls,
column_type=column_type,
header=header,
transpose=False,
use_lbl_width=len(cfgx2_lbl) < 5))
print_scorediff_mat(alias_flags=['--sdm'])
#------------
def jagged_stats_info(arr_, lbl, col_lbls):
arr = ut.recursive_replace(arr_, np.inf, np.nan)
# Treat infinite as nan
stat_dict = ut.get_jagged_stats(arr, use_nan=True, use_sum=True)
sel_stat_dict, sel_indices = ut.find_interesting_stats(stat_dict, col_lbls)
sel_col_lbls = ut.take(col_lbls, sel_indices)
statstr_kw = dict(precision=3, newlines=True, lbl=lbl, align=True)
stat_str = ut.get_stats_str(stat_dict=stat_dict, **statstr_kw)
sel_stat_str = ut.get_stats_str(stat_dict=sel_stat_dict, **statstr_kw)
sel_stat_str = 'sel_col_lbls = %s' % (ut.list_str(sel_col_lbls),) + '\n' + sel_stat_str
return stat_str, sel_stat_str
@ut.argv_flag_dec
def print_confusion_stats():
"""
CommandLine:
python dev.py --allgt --print-scorediff-mat-stats --print-confusion-stats -t rrvsone_grid
"""
# Prints nextbest ranks
print('-------------')
print('ScoreDiffMatStats: %s' % testnameid)
print('column_lbls = %r' % (column_lbls,))
#cfgx2_gt_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gt_raw_score')
#cfgx2_gf_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gf_raw_score')
gt_rawscores_mat = ut.replace_nones(cfgx2_gt_rawscores, np.nan)
gf_rawscores_mat = ut.replace_nones(cfgx2_gf_rawscores, np.nan)
tp_rawscores = vt.zipcompress(gt_rawscores_mat, istrue_list)
fp_rawscores = vt.zipcompress(gt_rawscores_mat, isfalse_list)
tn_rawscores = vt.zipcompress(gf_rawscores_mat, istrue_list)
fn_rawscores = vt.zipcompress(gf_rawscores_mat, isfalse_list)
tp_rawscores_str, tp_rawscore_statstr = jagged_stats_info(tp_rawscores, 'tp_rawscores', cfgx2_lbl)
fp_rawscores_str, fp_rawscore_statstr = jagged_stats_info(fp_rawscores, 'fp_rawscores', cfgx2_lbl)
tn_rawscores_str, tn_rawscore_statstr = jagged_stats_info(tn_rawscores, 'tn_rawscores', cfgx2_lbl)
fn_rawscores_str, fn_rawscore_statstr = jagged_stats_info(fn_rawscores, 'fn_rawscores', cfgx2_lbl)
#print(tp_rawscores_str)
#print(fp_rawscores_str)
#print(tn_rawscores_str)
#print(fn_rawscores_str)
print(tp_rawscore_statstr)
print(fp_rawscore_statstr)
print(tn_rawscore_statstr)
print(fn_rawscore_statstr)
print_confusion_stats(alias_flags=['--cs'])
ut.argv_flag_dec_true(testres.print_percent_identification_success)()
sumstrs = []
sumstrs.append('')
sumstrs.append('||===========================')
sumstrs.append('|| [cfg*] SUMMARY: %s' % testnameid)
sumstrs.append('||---------------------------')
sumstrs.append(ut.joins('\n|| ', best_rankscore_summary))
sumstrs.append('||===========================')
summary_str = '\n' + '\n'.join(sumstrs) + '\n'
#print(summary_str)
ut.colorprint(summary_str, 'blue')
print('To enable all printouts add --print-all to the commandline') | 265a3ae3e44b3816ed541521e24fb2aa52d1989b | 1,782 |
from datetime import datetime
def fra_months(z): # Apologies, this function is verbose--function modeled after SSA regulations
"""A function that returns the number of months from date of birth to FRA based on SSA chart"""
# Declare global variable
global months_to_fra
# If date of birth is 1/1/1938 or earlier, full retirement age (FRA) is 65
if z < datetime.date(1938, 1, 2):
months_to_fra = 780
# If date of birth is between 1/2/1938 and 1/1/1939, then (FRA) is age 65 + 2 months
elif z < datetime.date(1939, 1, 2):
months_to_fra = 782
# If date of birth is between 1/2/1939 and 1/1/1940, then (FRA) is age 65 + 4 months
elif z < datetime.date(1940, 1, 2):
months_to_fra = 784
# If date of birth is between 1/2/1940 and 1/1/1941, then (FRA) is age 65 + 6 months
elif z < datetime.date(1941, 1, 2):
months_to_fra = 786
# If date of birth is between 1/2/1941 and 1/1/1942, then (FRA) is age 65 + 8 months
elif z < datetime.date(1942, 1, 2):
months_to_fra = 788
# If date of birth is between 1/2/1942 and 1/1/1943, then (FRA) is age 65 + 10 months
elif z < datetime.date(1943, 1, 2):
months_to_fra = 790
# If date of birth is between 1/2/1943 and 1/1/1955, then (FRA) is age 66
elif z < datetime.date(1955, 1, 2):
months_to_fra = 792
# If date of birth is between 1/2/1955 and 1/1/1956, then (FRA) is age 66 + 2 months
elif z < datetime.date(1956, 1, 2):
months_to_fra = 794
# If date of birth is between 1/2/1956 and 1/1/1957, then (FRA) is age 66 + 4 months
elif z < datetime.date(1957, 1, 2):
months_to_fra = 796
# If date of birth is between 1/2/1957 and 1/1/1958, then (FRA) is age 66 + 6 months
elif z < datetime.date(1958, 1, 2):
months_to_fra = 798
# If date of birth is between 1/2/1958 and 1/1/1959, then (FRA) is age 66 + 8 months
elif z < datetime.date(1959, 1, 2):
months_to_fra = 800
# If date of birth is between 1/2/1959 and 1/1/1960, then (FRA) is age 66 + 10 months
elif z < datetime.date(1960, 1, 2):
months_to_fra = 802
# If date of birth is 1/2/1960 or later, then (FRA) is age 67
else:
months_to_fra = 804
return months_to_fra | 70ba416f6415fd5db08244ae7543db0573f74b2d | 1,783 |
def set_global_format_spec(formats: SpecDict):
"""Set the global default format specifiers.
Parameters
----------
formats: dict[type, str]
Class-based format identifiers.
Returns
-------
old_spec : MultiFormatSpec
The previous globally-set formatters.
Example
-------
>>> s = section.Elastic2D(1, 29000, 10, 144)
>>> print(s)
section Elastic 1 29000 10 144
>>> set_global_format_spec({float: '#.3g'})
MultiFormatSpec(int='d', float='g')
>>> print(s)
section Elastic 1 2.90e+04 10.0 144.
"""
old_spec = _GLOBAL_FORMAT_SPEC.copy()
_GLOBAL_FORMAT_SPEC.update(formats)
return old_spec | 1494a6ff2ad71aa9ed0d20bc0620a124d404e5da | 1,784 |
def gen_base_pass(length=15):
"""
Generate base password.
- A new password will be generated on each call.
:param length: <int> password length.
:return: <str> base password.
"""
generator = PassGen()
return generator.make_password(length=length) | 571683589e13b8dcbd74573b31e5fc7644360bfe | 1,785 |
def split_component_chars(address_parts):
"""
:param address_parts: list of the form [(<address_part_1>, <address_part_1_label>), .... ]
returns [(<char_0>, <address_comp_for_char_0), (<char_1>, <address_comp_for_char_1),.., (<char_n-1>, <address_comp_for_char_n-1)]
"""
char_arr = []
for address_part, address_part_label in address_parts:
# The address part of the tuple (address_part, address_part_label)
for c in address_part:
char_arr.append((c, address_part_label))
return char_arr | f4f3dd59378a689e9048cee96b8d6f12e9d8fe21 | 1,786 |
import json
def report_metrics(topic, message):
"""
将metric数据通过datamanage上报到存储中
:param topic: 需要上报的topic
:param message: 需要上报的打点数据
:return: 上报结果
"""
try:
res = DataManageApi.metrics.report({"kafka_topic": topic, MESSAGE: message, TAGS: [DEFAULT_GEOG_AREA_TAG]})
logger.info(f"report capacity metric {json.dumps(message)}")
if res.is_success():
return True
else:
logger.warning(f"report metric failed. {json.dumps(message)} {res.message}")
return False
except Exception:
logger.error("query metric failed, encounter some exception", exc_info=True)
return False | 28f0bf1671b4116b26b8dba3f0c0a34174a0597a | 1,787 |
def wg_completion_scripts_cb(data, completion_item, buffer, completion):
""" Complete with known script names, for command '/weeget'. """
global wg_scripts
wg_read_scripts(download_list=False)
if len(wg_scripts) > 0:
for id, script in wg_scripts.items():
weechat.hook_completion_list_add(completion, script["full_name"],
0, weechat.WEECHAT_LIST_POS_SORT)
return weechat.WEECHAT_RC_OK | b9dc0d5e736cfeb1dc98d09b8e12c6a52696d89d | 1,788 |
def getG(source):
""" Read the Graph from a textfile """
G = {}
Grev = {}
for i in range(1,N+1):
G[i] = []
Grev[i] = []
fin = open(source)
for line in fin:
v1 = int(line.split()[0])
v2 = int(line.split()[1])
G[v1].append(v2)
Grev[v2].append(v1)
fin.close()
return G, Grev | 6e9a8a5c69267403ee3c624670c60af547d37a46 | 1,789 |
import re
def remove_version(code):
""" Remove any version directive """
pattern = '\#\s*version[^\r\n]*\n'
regex = re.compile(pattern, re.MULTILINE|re.DOTALL)
return regex.sub('\n', code) | 101ada9490137a879ea287076989a732942368f8 | 1,790 |
def unlabeled_balls_in_labeled_boxes(balls, box_sizes):
"""
OVERVIEW
This function returns a generator that produces all distinct distributions of
indistinguishable balls among labeled boxes with specified box sizes
(capacities). This is a generalization of the most common formulation of the
problem, where each box is sufficiently large to accommodate all of the
balls, and is an important example of a class of combinatorics problems
called 'weak composition' problems.
CONSTRUCTOR INPUTS
n: the number of balls
box_sizes: This argument is a list of length 1 or greater. The length of
the list corresponds to the number of boxes. `box_sizes[i]` is a positive
integer that specifies the maximum capacity of the ith box. If
`box_sizes[i]` equals `n` (or greater), the ith box can accommodate all `n`
balls and thus effectively has unlimited capacity.
ACKNOWLEDGMENT
I'd like to thank Chris Rebert for helping me to convert my prototype
class-based code into a generator function.
"""
if not isinstance(balls, int):
raise TypeError("balls must be a non-negative integer.")
if balls < 0:
raise ValueError("balls must be a non-negative integer.")
if not isinstance(box_sizes,list):
raise ValueError("box_sizes must be a non-empty list.")
capacity= 0
for size in box_sizes:
if not isinstance(size, int):
raise TypeError("box_sizes must contain only positive integers.")
if size < 1:
raise ValueError("box_sizes must contain only positive integers.")
capacity+= size
if capacity < balls:
raise ValueError("The total capacity of the boxes is less than the "
"number of balls to be distributed.")
return _unlabeled_balls_in_labeled_boxes(balls, box_sizes) | 6390226744c2d4b756b43e880707accc333893d5 | 1,791 |
def beginning_next_non_empty_line(bdata, i):
""" doc
"""
while bdata[i] not in EOL:
i += 1
while bdata[i] in EOL:
i += 1
return i | 0a372729a7ad794a9385d87be39d62b1e6831b71 | 1,792 |
import collections
def VisualizeBoxes(image,
boxes,
classes,
scores,
class_id_to_name,
min_score_thresh=.25,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_scores=False,
skip_labels=False,
text_loc='TOP'):
"""Visualize boxes on top down image."""
box_to_display_str_map = collections.defaultdict(str)
box_to_color_map = collections.defaultdict(str)
num_boxes = boxes.shape[0]
for i in range(num_boxes):
if scores is not None and scores[i] < min_score_thresh:
continue
box = tuple(boxes[i].tolist())
display_str = ''
if not skip_labels:
if classes[i] in class_id_to_name:
class_name = class_id_to_name[classes[i]]
display_str = str(class_name)
else:
display_str = 'N/A'
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100 * scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100 * scores[i]))
box_to_display_str_map[box] = display_str
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
box_to_color_map[box] = PIL_COLOR_LIST[classes[i] % len(PIL_COLOR_LIST)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
DrawBoundingBoxOnImage(
image,
box,
color=color,
thickness=line_thickness,
display_str=box_to_display_str_map[box],
text_loc=text_loc)
return image | b02216a5a2e7fa7029dd0fea298efd1d593bab88 | 1,793 |
from datetime import datetime
import sys
def tagDataskp(dList, start, end, name):
"""
Toma una posición para obtener de la lista dList.
"""
try:
#if end is not None:
if end:
#tagdata = ",".join(dList[start:end + 1])
tagdata = dList[start:end + 1]
else:
tagdata = dList[start]
except:
sys.stderr.write("Error al obtener el Tag Data: %s, %s. Evento: %s [%s].\n" % (name, dList[2], dList[1], str(datetime.now()))) # dList[2] el 'id'
return tagdata or None | 13d611b92dcd61377a68b53b47331260a9936f09 | 1,794 |
def cal_softplus(x):
"""Calculate softplus."""
return np.log(np.exp(x) + 1) | a966826f1e508ca1a197e63396ae9e2f779bcf96 | 1,795 |
import os
import pickle
def load_list_from_disk_with_pickle(path_to_list: str) -> list:
"""This function loads a list from disk
Args:
path_to_list (str): path to where the list is saved
Returns:
loaded_list (list): loaded list
Raises:
AssertionError: if list path does not exist
"""
assert os.path.exists(path_to_list), "Path {} does not exist".format(path_to_list)
open_file = open(path_to_list, "rb")
loaded_list = pickle.load(open_file) # load from disk
open_file.close()
return loaded_list | abaf968c1b71bba83edac0e6d91b8eeb0dddc517 | 1,796 |
def prepare_spark_conversion(df: pd.DataFrame) -> pd.DataFrame:
"""Pandas does not distinguish NULL and NaN values. Everything null-like
is converted to NaN. However, spark does distinguish NULL and NaN for
example. To enable correct spark dataframe creation with NULL and NaN
values, the `PANDAS_NULL` constant is used as a workaround to enforce NULL
values in pyspark dataframes. Pyspark treats `None` values as NULL.
Parameters
----------
df: pd.DataFrame
Input dataframe to be prepared.
Returns
-------
df_prepared: pd.DataFrame
Prepared dataframe for spark conversion.
"""
return df.where(df.ne(PANDAS_NULL), None) | f18ddfc3e77809908bf8fa365c1acf8a8d5069c6 | 1,797 |
import requests
import token
from sys import api_version
def get_user_vk_id(id):
"""
:param id: Числовой ID пользователя VK
:return: Ссылка на пользователя
"""
response = requests.get('{}users.get?user_ids={}&fields=domain&access_token={}&v={}'
.format(api_address, id, token, api_version))
dict = get_dictionary(response)
return 'https://vk.com/{}'.format(dict['response'][0]['domain']) | bc82fc4e3a72adb3c8c1cdf4838351ee7aa608ad | 1,798 |
import scipy
def controllable_staircase(
A,
B,
C,
D,
E,
tol=1e-9,
):
"""
Implementation of
COMPUTATION OF IRREDUCIBLE GENERALIZED STATE-SPACE REALIZATIONS ANDRAS VARGA
using givens rotations.
it is very slow, but numerically stable
TODO, add pivoting,
TODO, make it use the U-T property on E better for speed
TODO, make it output Q and Z to apply to aux matrices, perhaps use them on C
"""
# from icecream import ic
# import tabulate
Ninputs = B.shape[1]
Nstates = A.shape[0]
Nconstr = A.shape[1]
Noutput = C.shape[0]
BA, E = scipy.linalg.qr_multiply(E, np.hstack([B, A]), pivoting=False, mode="left")
Nmin = min(Nconstr, Nstates)
for CidxBA in range(0, Nmin - 1):
for RidxBA in range(Nconstr - 1, CidxBA, -1):
# create a givens rotation for Q reduction on BA
BAv0 = BA[RidxBA - 1, CidxBA]
BAv1 = BA[RidxBA, CidxBA]
BAvSq = BAv0 ** 2 + BAv1 ** 2
if BAvSq < tol:
continue
BAvAbs = BAvSq ** 0.5
c = BAv1 / BAvAbs
s = BAv0 / BAvAbs
M = np.array([[s, +c], [-c, s]])
BA[RidxBA - 1 : RidxBA + 1, :] = M @ BA[RidxBA - 1 : RidxBA + 1, :]
# TODO, use the U-T to be more efficient
E[RidxBA - 1 : RidxBA + 1, :] = M @ E[RidxBA - 1 : RidxBA + 1, :]
Cidx = RidxBA
Ridx = RidxBA
# row and col swap
Ev0 = E[Ridx, Cidx - 1]
Ev1 = E[Ridx, Cidx]
EvSq = Ev0 ** 2 + Ev1 ** 2
if EvSq < tol:
continue
EvAbs = EvSq ** 0.5
c = Ev0 / EvAbs
s = Ev1 / EvAbs
MT = np.array([[s, +c], [-c, s]])
BA[:, Ninputs:][:, Cidx - 1 : Cidx + 1] = (
BA[:, Ninputs:][:, Cidx - 1 : Cidx + 1] @ MT
)
C[:, Cidx - 1 : Cidx + 1] = C[:, Cidx - 1 : Cidx + 1] @ MT
# TODO, use the U-T to be more efficient
E[:, Cidx - 1 : Cidx + 1] = E[:, Cidx - 1 : Cidx + 1] @ MT
B = BA[:, :Ninputs]
A = BA[:, Ninputs:]
return A, B, C, D, E | fb2e2f162aad45a1bdbb21a67207576539700b0e | 1,799 |