text
stringlengths 4
1.02M
| meta
dict |
---|---|
from dtreeviz.utils import *
import numpy as np
import pandas as pd
import graphviz
from pathlib import Path
from sklearn import tree
from graphviz.backend import run, view
import matplotlib.pyplot as plt
from dtreeviz.shadow import *
from numbers import Number
import matplotlib.patches as patches
from mpl_toolkits.mplot3d import Axes3D
import tempfile
from os import getpid, makedirs
from sys import platform as PLATFORM
from colour import Color
YELLOW = "#fefecd" # "#fbfbd0" # "#FBFEB0"
GREEN = "#cfe2d4"
DARKBLUE = '#313695'
BLUE = '#4575b4'
DARKGREEN = '#006400'
LIGHTORANGE = '#fee090'
LIGHTBLUE = '#a6bddb'
GREY = '#444443'
WEDGE_COLOR = GREY #'orange'
HIGHLIGHT_COLOR = '#D67C03'
# How many bins should we have based upon number of classes
NUM_BINS = [0, 0, 10, 9, 8, 6, 6, 6, 5, 5, 5]
# 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
color_blind_friendly_colors = [
None, # 0 classes
None, # 1 class
["#FEFEBB","#a1dab4"], # 2 classes
["#FEFEBB","#D9E6F5",'#a1dab4'], # 3 classes
["#FEFEBB","#D9E6F5",'#a1dab4','#fee090'], # 4
["#FEFEBB","#D9E6F5",'#a1dab4','#41b6c4','#fee090'], # 5
["#FEFEBB",'#c7e9b4','#41b6c4','#2c7fb8','#fee090','#f46d43'], # 6
["#FEFEBB",'#c7e9b4','#7fcdbb','#41b6c4','#225ea8','#fdae61','#f46d43'], # 7
["#FEFEBB",'#edf8b1','#c7e9b4','#7fcdbb','#1d91c0','#225ea8','#fdae61','#f46d43'], # 8
["#FEFEBB",'#c7e9b4','#41b6c4','#74add1','#4575b4','#313695','#fee090','#fdae61','#f46d43'], # 9
["#FEFEBB",'#c7e9b4','#41b6c4','#74add1','#4575b4','#313695','#fee090','#fdae61','#f46d43','#d73027'] # 10
]
class DTreeViz:
def __init__(self,dot):
self.dot = dot
def _repr_svg_(self):
return self.svg()
def svg(self):
"""Render tree as svg and return svg text."""
tmp = tempfile.gettempdir()
svgfilename = f"{tmp}/DTreeViz_{getpid()}.svg"
self.save(svgfilename)
with open(svgfilename, encoding='UTF-8') as f:
svg = f.read()
return svg
def view(self):
tmp = tempfile.gettempdir()
svgfilename = f"{tmp}/DTreeViz_{getpid()}.svg"
self.save(svgfilename)
view(svgfilename)
def save(self, filename):
"""
Save the svg of this tree visualization into filename argument.
Mac platform can save any file type (.pdf, .png, .svg). Other platforms
would fail with errors. See https://github.com/parrt/dtreeviz/issues/4
"""
path = Path(filename)
if not path.parent.exists:
makedirs(path.parent)
g = graphviz.Source(self.dot, format='svg')
dotfilename = g.save(directory=path.parent.as_posix(), filename=path.stem)
if PLATFORM=='darwin':
# dot seems broken in terms of fonts if we use -Tsvg. Force users to
# brew install graphviz with librsvg (else metrics are off) and
# use -Tsvg:cairo which fixes bug and also automatically embeds images
format = path.suffix[1:] # ".svg" -> "svg" etc...
cmd = ["dot", f"-T{format}:cairo", "-o", filename, dotfilename]
# print(' '.join(cmd))
stdout, stderr = run(cmd, capture_output=True, check=True, quiet=False)
else:
if not filename.endswith(".svg"):
raise (Exception(f"{PLATFORM} can only save .svg files: {filename}"))
# Gen .svg file from .dot but output .svg has image refs to other files
#orig_svgfilename = filename.replace('.svg', '-orig.svg')
cmd = ["dot", "-Tsvg", "-o", filename, dotfilename]
# print(' '.join(cmd))
stdout, stderr = run(cmd, capture_output=True, check=True, quiet=False)
# now merge in referenced SVG images to make all-in-one file
with open(filename, encoding='UTF-8') as f:
svg = f.read()
svg = inline_svg_images(svg)
with open(filename, "w", encoding='UTF-8') as f:
f.write(svg)
def rtreeviz_univar(ax,
x_train: (pd.Series, np.ndarray), # 1 vector of X data
y_train: (pd.Series, np.ndarray),
max_depth,
feature_name: str,
target_name: str,
fontsize: int = 14,
show={'title','splits'}):
if isinstance(x_train, pd.Series):
x_train = x_train.values
if isinstance(y_train, pd.Series):
y_train = y_train.values
y_range = (min(y_train), max(y_train)) # same y axis for all
overall_feature_range = (np.min(x_train), np.max(x_train))
t = tree.DecisionTreeRegressor(max_depth=max_depth)
t.fit(x_train.reshape(-1,1), y_train)
shadow_tree = ShadowDecTree(t, x_train.reshape(-1,1), y_train, feature_names=[feature_name])
splits = []
for node in shadow_tree.internal:
splits.append(node.split())
splits = sorted(splits)
bins = [overall_feature_range[0]] + splits + [overall_feature_range[1]]
means = []
for i in range(len(bins) - 1):
left = bins[i]
right = bins[i + 1]
inrange = y_train[(x_train >= left) & (x_train < right)]
means.append(np.mean(inrange))
ax.scatter(x_train, y_train, marker='o', alpha=.4, c=BLUE,
edgecolor=GREY, lw=.3)
if 'splits' in show:
for split in splits:
ax.plot([split, split], [*y_range], '--', color='grey', linewidth=.7)
prevX = overall_feature_range[0]
for i, m in enumerate(means):
split = overall_feature_range[1]
if i < len(splits):
split = splits[i]
ax.plot([prevX, split], [m, m], '-', color='#f46d43', linewidth=2)
prevX = split
ax.tick_params(axis='both', which='major', width=.3, labelcolor=GREY, labelsize=fontsize)
if 'title' in show:
title = f"Regression tree depth {max_depth}, training $R^2$={t.score(x_train.reshape(-1,1),y_train):.3f}"
plt.title(title, fontsize=fontsize, color=GREY)
plt.xlabel(feature_name, fontsize=fontsize)
plt.ylabel(target_name, fontsize=fontsize)
def rtreeviz_bivar_heatmap(ax, X_train, y_train, max_depth, feature_names,
fontsize=14, ticks_fontsize=12,
show={'title'}
) -> tree.DecisionTreeClassifier:
"""
Show tesselated 2D feature space for bivariate regression tree. X_train can
have lots of features but features lists indexes of 2 features to train tree with.
"""
if isinstance(X_train,pd.DataFrame):
X_train = X_train.values
if isinstance(y_train, pd.Series):
y_train = y_train.values
rt = tree.DecisionTreeRegressor(max_depth=max_depth)
rt.fit(X_train, y_train)
n_colors_in_map = 100
y_lim = np.min(y_train), np.max(y_train)
y_range = y_lim[1] - y_lim[0]
color_map = list(str(c) for c in Color("#c7e9b4").range_to(Color("#081d58"), n_colors_in_map))
shadow_tree = ShadowDecTree(rt, X_train, y_train, feature_names=feature_names)
tesselation = shadow_tree.tesselation()
for node,bbox in tesselation:
pred = node.prediction()
color = color_map[int(((pred - y_lim[0]) / y_range) * (n_colors_in_map-1))]
x = bbox[0]
y = bbox[1]
w = bbox[2]-bbox[0]
h = bbox[3]-bbox[1]
rect = patches.Rectangle((x, y), w, h, 0, linewidth=.3, alpha=.5,
edgecolor=GREY, facecolor=color)
ax.add_patch(rect)
colors = [color_map[int(((y-y_lim[0])/y_range)*(n_colors_in_map-1))] for y in y_train]
x, y, z = X_train[:,0], X_train[:,1], y_train
ax.scatter(x, y, marker='o', alpha=.95, c=colors, edgecolor=GREY, lw=.3)
ax.set_xlabel(f"{feature_names[0]}", fontsize=fontsize, fontname="Arial", color=GREY)
ax.set_ylabel(f"{feature_names[1]}", fontsize=fontsize, fontname="Arial", color=GREY)
ax.tick_params(axis='both', which='major', width=.3, labelcolor=GREY, labelsize=ticks_fontsize)
if 'title' in show:
accur = rt.score(X_train, y_train)
title = f"Regression tree depth {max_depth}, training $R^2$={accur:.3f}"
plt.title(title, fontsize=fontsize, color=GREY)
return None
def rtreeviz_bivar_3D(ax, X_train, y_train, max_depth, feature_names, target_name,
fontsize=14, ticks_fontsize=10,
azim=0, elev=0, dist=7,
show={'title'}
) -> tree.DecisionTreeClassifier:
"""
Show 3D feature space for bivariate regression tree. X_train can
have lots of features but features lists indexes of 2 features to train tree with.
"""
if isinstance(X_train,pd.DataFrame):
X_train = X_train.values
if isinstance(y_train, pd.Series):
y_train = y_train.values
n_colors_in_map = 100
ax.view_init(elev=elev, azim=azim)
ax.dist=dist
def plane(node, bbox):
x = np.linspace(bbox[0], bbox[2], 2)
y = np.linspace(bbox[1], bbox[3], 2)
xx, yy = np.meshgrid(x, y)
z = np.full(xx.shape, node.prediction())
# print(f"{node.prediction()}->{int(((node.prediction()-y_lim[0])/y_range)*(n_colors_in_map-1))}, lim {y_lim}")
# print(f"{color_map[int(((node.prediction()-y_lim[0])/y_range)*(n_colors_in_map-1))]}")
ax.plot_surface(xx, yy, z, alpha=.85, shade=False,
color=color_map[int(((node.prediction()-y_lim[0])/y_range)*(n_colors_in_map-1))],
edgecolor=GREY, lw=.3)
rt = tree.DecisionTreeRegressor(max_depth=max_depth)
rt.fit(X_train, y_train)
y_lim = np.min(y_train), np.max(y_train)
y_range = y_lim[1] - y_lim[0]
color_map = list(str(c) for c in Color("#c7e9b4").range_to(Color("#081d58"), n_colors_in_map))
colors = [color_map[int(((y-y_lim[0])/y_range)*(n_colors_in_map-1))] for y in y_train]
shadow_tree = ShadowDecTree(rt, X_train, y_train, feature_names=feature_names)
tesselation = shadow_tree.tesselation()
for node, bbox in tesselation:
plane(node, bbox)
x, y, z = X_train[:, 0], X_train[:, 1], y_train
ax.scatter(x, y, z, marker='o', alpha=.7, edgecolor=GREY, lw=.3, c=colors)
ax.set_xlabel(f"{feature_names[0]}", fontsize=fontsize, fontname="Arial", color=GREY)
ax.set_ylabel(f"{feature_names[1]}", fontsize=fontsize, fontname="Arial", color=GREY)
ax.set_zlabel(f"{target_name}", fontsize=fontsize, fontname="Arial", color=GREY)
ax.tick_params(axis='both', which='major', width=.3, labelcolor=GREY, labelsize=ticks_fontsize)
if 'title' in show:
accur = rt.score(X_train, y_train)
title = f"Regression tree depth {max_depth}, training $R^2$={accur:.3f}"
plt.title(title, fontsize=fontsize)
return None
def ctreeviz_univar(ax, x_train, y_train, max_depth, feature_name, class_names,
target_name,
fontsize=14, nbins=25, gtype='strip',
show={'title','legend','splits'}):
if isinstance(x_train, pd.Series):
x_train = x_train.values
if isinstance(y_train, pd.Series):
y_train = y_train.values
# ax.set_facecolor('#F9F9F9')
ct = tree.DecisionTreeClassifier(max_depth=max_depth)
ct.fit(x_train.reshape(-1, 1), y_train)
shadow_tree = ShadowDecTree(ct, x_train.reshape(-1, 1), y_train,
feature_names=[feature_name], class_names=class_names)
n_classes = shadow_tree.nclasses()
overall_feature_range = (np.min(x_train), np.max(x_train))
class_values = shadow_tree.unique_target_values
color_values = color_blind_friendly_colors[n_classes]
colors = {v: color_values[i] for i, v in enumerate(class_values)}
X_colors = [colors[cl] for cl in class_values]
ax.set_xlabel(f"{feature_name}", fontsize=fontsize, fontname="Arial",
color=GREY)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.yaxis.set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_linewidth(.3)
r = overall_feature_range[1] - overall_feature_range[0]
dot_w = 25
X_hist = [x_train[y_train == cl] for cl in class_values]
binwidth = r / nbins
if gtype == 'barstacked':
hist, bins, barcontainers = ax.hist(X_hist,
color=X_colors,
align='mid',
histtype='barstacked',
bins=np.arange(overall_feature_range[0],
overall_feature_range[
1] + binwidth, binwidth),
label=class_names)
for patch in barcontainers:
for rect in patch.patches:
rect.set_linewidth(.5)
rect.set_edgecolor(GREY)
ax.set_xlim(*overall_feature_range)
ax.set_xticks(overall_feature_range)
ax.set_yticks([0, max([max(h) for h in hist])])
elif gtype == 'strip':
# user should pass in short and wide fig
sigma = .013
mu = .08
class_step = .08
dot_w = 20
ax.set_ylim(0, mu + n_classes*class_step)
for i, bucket in enumerate(X_hist):
y_noise = np.random.normal(mu+i*class_step, sigma, size=len(bucket))
ax.scatter(bucket, y_noise, alpha=.7, marker='o', s=dot_w, c=colors[i],
edgecolors=GREY, lw=.3)
ax.tick_params(axis='both', which='major', width=.3, labelcolor=GREY,
labelsize=fontsize)
splits = []
for node in shadow_tree.internal:
splits.append(node.split())
splits = sorted(splits)
bins = [ax.get_xlim()[0]] + splits + [ax.get_xlim()[1]]
pred_box_height = .07 * ax.get_ylim()[1]
preds = []
for i in range(len(bins) - 1):
left = bins[i]
right = bins[i + 1]
inrange = y_train[(x_train >= left) & (x_train < right)]
values, counts = np.unique(inrange, return_counts=True)
pred = values[np.argmax(counts)]
rect = patches.Rectangle((left, 0), (right - left), pred_box_height, linewidth=.3,
edgecolor=GREY, facecolor=colors[pred])
ax.add_patch(rect)
preds.append(pred)
if 'legend' in show:
add_classifier_legend(ax, class_names, class_values, colors, target_name)
if 'title' in show:
accur = ct.score(x_train.reshape(-1, 1), y_train)
title = f"Classifier tree depth {max_depth}, training accuracy={accur*100:.2f}%"
plt.title(title, fontsize=fontsize, color=GREY)
if 'splits' in show:
for split in splits:
plt.plot([split, split], [*ax.get_ylim()], '--', color='grey', linewidth=1)
def ctreeviz_bivar(ax, X_train, y_train, max_depth, feature_names, class_names,
target_name,
fontsize=14,
show={'title','legend','splits'}):
"""
Show tesselated 2D feature space for bivariate classification tree. X_train can
have lots of features but features lists indexes of 2 features to train tree with.
"""
if isinstance(X_train,pd.DataFrame):
X_train = X_train.values
if isinstance(y_train, pd.Series):
y_train = y_train.values
ct = tree.DecisionTreeClassifier(max_depth=max_depth)
ct.fit(X_train, y_train)
shadow_tree = ShadowDecTree(ct, X_train, y_train,
feature_names=feature_names, class_names=class_names)
tesselation = shadow_tree.tesselation()
n_classes = shadow_tree.nclasses()
class_values = shadow_tree.unique_target_values
color_values = color_blind_friendly_colors[n_classes]
colors = {v: color_values[i] for i, v in enumerate(class_values)}
if 'splits' in show:
for node,bbox in tesselation:
x = bbox[0]
y = bbox[1]
w = bbox[2]-bbox[0]
h = bbox[3]-bbox[1]
rect = patches.Rectangle((x, y), w, h, 0, linewidth=.3, alpha=.4,
edgecolor=GREY, facecolor=colors[node.prediction()])
ax.add_patch(rect)
dot_w = 25
X_hist = [X_train[y_train == cl] for cl in class_values]
for i, h in enumerate(X_hist):
ax.scatter(h[:,0], h[:,1], alpha=1, marker='o', s=dot_w, c=colors[i],
edgecolors=GREY, lw=.3)
ax.set_xlabel(f"{feature_names[0]}", fontsize=fontsize, fontname="Arial", color=GREY)
ax.set_ylabel(f"{feature_names[1]}", fontsize=fontsize, fontname="Arial", color=GREY)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_linewidth(.3)
if 'legend' in show:
add_classifier_legend(ax, class_names, class_values, colors, target_name)
if 'title' in show:
accur = ct.score(X_train, y_train)
title = f"Classifier tree depth {max_depth}, training accuracy={accur*100:.2f}%"
plt.title(title, fontsize=fontsize, color=GREY)
return None
def add_classifier_legend(ax, class_names, class_values, colors, target_name):
# add boxes for legend
boxes = []
for i, c in enumerate(class_values):
box = patches.Rectangle((0, 0), 20, 10, linewidth=.4, edgecolor=GREY,
facecolor=colors[c], label=class_names[c])
boxes.append(box)
leg = ax.legend(handles=boxes,
frameon=True,
shadow=False,
fancybox=True,
title=target_name,
handletextpad=.35,
borderpad=.8,
bbox_to_anchor=(1.0, 1.0),
edgecolor=GREY)
leg.get_frame().set_linewidth(.5)
leg.get_title().set_color(GREY)
leg.get_title().set_fontsize(10)
leg.get_title().set_fontweight('bold')
for text in leg.get_texts():
text.set_color(GREY)
text.set_fontsize(10)
def dtreeviz(tree_model: (tree.DecisionTreeRegressor, tree.DecisionTreeClassifier),
X_train: (pd.DataFrame, np.ndarray),
y_train: (pd.Series, np.ndarray),
feature_names: List[str],
target_name: str,
class_names: (Mapping[Number, str], List[str]) = None, # required if classifier
precision: int = 2,
orientation: ('TD', 'LR') = "TD",
show_root_edge_labels: bool = True,
show_node_labels: bool = False,
fancy: bool = True,
histtype: ('bar', 'barstacked', 'strip') = 'barstacked',
highlight_path: List[int] = [],
X: np.ndarray = None,
max_X_features_LR: int = 10,
max_X_features_TD: int = 20) \
-> DTreeViz:
"""
Given a decision tree regressor or classifier, create and return a tree visualization
using the graphviz (DOT) language.
:param tree_model: A DecisionTreeRegressor or DecisionTreeClassifier that has been
fit to X_train, y_train.
:param X_train: A data frame or 2-D matrix of feature vectors used to train the model.
:param y_train: A pandas Series or 1-D vector with target values or classes.
:param feature_names: A list of the feature names.
:param target_name: The name of the target variable.
:param class_names: [For classifiers] A dictionary or list of strings mapping class
value to class name.
:param precision: When displaying floating-point numbers, how many digits to display
after the decimal point. Default is 2.
:param orientation: Is the tree top down, "TD", or left to right, "LR"?
:param show_root_edge_labels: Include < and >= on the edges emanating from the root?
:param show_node_labels: Add "Node id" to top of each node in graph for educational purposes
:param fancy:
:param histtype: [For classifiers] Either 'bar' or 'barstacked' to indicate
histogram type. We find that 'barstacked' looks great up to about.
four classes.
:param highlight_path: A list of node IDs to highlight, default is [].
Useful for emphasizing node(s) in tree for discussion.
If X argument given then this is ignored.
:type highlight_path: List[int]
:param X: Instance to run down the tree; derived path to highlight from this vector.
Show feature vector with labels underneath leaf reached. highlight_path
is ignored if X is not None.
:type np.ndarray
:param max_X_features_LR: If len(X) exceeds this limit for LR layout,
display only those features
used to guide X vector down tree. Helps when len(X) is large.
Default is 10.
:param max_X_features_TD: If len(X) exceeds this limit for TD layout,
display only those features
used to guide X vector down tree. Helps when len(X) is large.
Default is 25.
:return: A string in graphviz DOT language that describes the decision tree.
"""
def node_name(node : ShadowDecTreeNode) -> str:
return f"node{node.id}"
def split_node(name, node_name, split):
if fancy:
labelgraph = node_label(node) if show_node_labels else ''
html = f"""<table border="0">
{labelgraph}
<tr>
<td><img src="{tmp}/node{node.id}_{getpid()}.svg"/></td>
</tr>
</table>"""
else:
html = f"""<font face="Helvetica" color="#444443" point-size="12">{name}@{split}</font>"""
if node.id in highlight_path:
gr_node = f'{node_name} [margin="0" shape=box penwidth=".5" color="{HIGHLIGHT_COLOR}" style="dashed" label=<{html}>]'
else:
gr_node = f'{node_name} [margin="0" shape=none label=<{html}>]'
return gr_node
def regr_leaf_node(node, label_fontsize: int = 12):
# always generate fancy regr leaves for now but shrink a bit for nonfancy.
labelgraph = node_label(node) if show_node_labels else ''
html = f"""<table border="0">
{labelgraph}
<tr>
<td><img src="{tmp}/leaf{node.id}_{getpid()}.svg"/></td>
</tr>
</table>"""
if node.id in highlight_path:
return f'leaf{node.id} [margin="0" shape=box penwidth=".5" color="{HIGHLIGHT_COLOR}" style="dashed" label=<{html}>]'
else:
return f'leaf{node.id} [margin="0" shape=box penwidth="0" label=<{html}>]'
def class_leaf_node(node, label_fontsize: int = 12):
labelgraph = node_label(node) if show_node_labels else ''
html = f"""<table border="0" CELLBORDER="0">
{labelgraph}
<tr>
<td><img src="{tmp}/leaf{node.id}_{getpid()}.svg"/></td>
</tr>
</table>"""
if node.id in highlight_path:
return f'leaf{node.id} [margin="0" shape=box penwidth=".5" color="{HIGHLIGHT_COLOR}" style="dashed" label=<{html}>]'
else:
return f'leaf{node.id} [margin="0" shape=box penwidth="0" label=<{html}>]'
def node_label(node):
return f'<tr><td CELLPADDING="0" CELLSPACING="0"><font face="Helvetica" color="{GREY}" point-size="14"><i>Node {node.id}</i></font></td></tr>'
def class_legend_html():
return f"""
<table border="0" cellspacing="0" cellpadding="0">
<tr>
<td border="0" cellspacing="0" cellpadding="0"><img src="{tmp}/legend_{getpid()}.svg"/></td>
</tr>
</table>
"""
def class_legend_gr():
if not shadow_tree.isclassifier():
return ""
return f"""
subgraph cluster_legend {{
style=invis;
legend [penwidth="0" margin="0" shape=box margin="0.03" width=.1, height=.1 label=<
{class_legend_html()}
>]
}}
"""
def instance_html(path, label_fontsize: int = 11):
headers = []
features_used = [node.feature() for node in path[:-1]] # don't include leaf
display_X = X
display_feature_names = feature_names
highlight_feature_indexes = features_used
if (orientation=='TD' and len(X)>max_X_features_TD) or\
(orientation == 'LR' and len(X) > max_X_features_LR):
# squash all features down to just those used
display_X = [X[i] for i in features_used] + ['...']
display_feature_names = [node.feature_name() for node in path[:-1]] + ['...']
highlight_feature_indexes = range(0,len(features_used))
for i,name in enumerate(display_feature_names):
color = GREY
if i in highlight_feature_indexes:
color = HIGHLIGHT_COLOR
headers.append(f'<td cellpadding="1" align="right" bgcolor="white"><font face="Helvetica" color="{color}" point-size="{label_fontsize}"><b>{name}</b></font></td>')
values = []
for i,v in enumerate(display_X):
color = GREY
if i in highlight_feature_indexes:
color = HIGHLIGHT_COLOR
if isinstance(v,int) or isinstance(v, str):
disp_v = v
else:
disp_v = myround(v, precision)
values.append(f'<td cellpadding="1" align="right" bgcolor="white"><font face="Helvetica" color="{color}" point-size="{label_fontsize}">{disp_v}</font></td>')
return f"""
<table border="0" cellspacing="0" cellpadding="0">
<tr>
{''.join(headers)}
</tr>
<tr>
{''.join(values)}
</tr>
</table>
"""
def instance_gr():
if X is None:
return ""
pred, path = shadow_tree.predict(X)
leaf = f"leaf{path[-1].id}"
if shadow_tree.isclassifier():
edge_label = f" Prediction<br/> {path[-1].prediction_name()}"
else:
edge_label = f" Prediction<br/> {myround(path[-1].prediction(), precision)}"
return f"""
subgraph cluster_instance {{
style=invis;
X_y [penwidth="0.3" margin="0" shape=box margin="0.03" width=.1, height=.1 label=<
{instance_html(path)}
>]
}}
{leaf} -> X_y [dir=back; penwidth="1.2" color="{HIGHLIGHT_COLOR}" label=<<font face="Helvetica" color="{GREY}" point-size="{11}">{edge_label}</font>>]
"""
if orientation=="TD":
ranksep = ".2"
nodesep = "0.1"
else:
if fancy:
ranksep = ".22"
nodesep = "0.1"
else:
ranksep = ".05"
nodesep = "0.09"
tmp = tempfile.gettempdir()
# tmp = "/tmp"
shadow_tree = ShadowDecTree(tree_model, X_train, y_train,
feature_names=feature_names, class_names=class_names)
if X is not None:
pred, path = shadow_tree.predict(X)
highlight_path = [n.id for n in path]
n_classes = shadow_tree.nclasses()
color_values = color_blind_friendly_colors[n_classes]
# Fix the mapping from target value to color for entire tree
colors = None
if shadow_tree.isclassifier():
class_values = shadow_tree.unique_target_values
colors = {v:color_values[i] for i,v in enumerate(class_values)}
y_range = (min(y_train)*1.03, max(y_train)*1.03) # same y axis for all
if shadow_tree.isclassifier():
# draw_legend_boxes(shadow_tree, f"{tmp}/legend")
draw_legend(shadow_tree, target_name, f"{tmp}/legend_{getpid()}.svg")
if isinstance(X_train,pd.DataFrame):
X_train = X_train.values
if isinstance(y_train,pd.Series):
y_train = y_train.values
# Find max height (count) for any bar in any node
if shadow_tree.isclassifier():
nbins = get_num_bins(histtype, n_classes)
node_heights = shadow_tree.get_split_node_heights(X_train, y_train, nbins=nbins)
internal = []
for node in shadow_tree.internal:
if fancy:
if shadow_tree.isclassifier():
class_split_viz(node, X_train, y_train,
filename=f"{tmp}/node{node.id}_{getpid()}.svg",
precision=precision,
colors=colors,
histtype=histtype,
node_heights=node_heights,
X = X,
highlight_node=node.id in highlight_path)
else:
regr_split_viz(node, X_train, y_train,
filename=f"{tmp}/node{node.id}_{getpid()}.svg",
target_name=target_name,
y_range=y_range,
precision=precision,
X=X,
highlight_node=node.id in highlight_path)
nname = node_name(node)
gr_node = split_node(node.feature_name(), nname, split=myround(node.split(), precision))
internal.append(gr_node)
leaves = []
for node in shadow_tree.leaves:
if shadow_tree.isclassifier():
class_leaf_viz(node, colors=color_values,
filename=f"{tmp}/leaf{node.id}_{getpid()}.svg")
leaves.append( class_leaf_node(node) )
else:
# for now, always gen leaf
regr_leaf_viz(node, y_train, target_name=target_name,
filename=f"{tmp}/leaf{node.id}_{getpid()}.svg",
y_range=y_range, precision=precision)
leaves.append( regr_leaf_node(node) )
show_edge_labels = False
all_llabel = '<' if show_edge_labels else ''
all_rlabel = '≥' if show_edge_labels else ''
root_llabel = '<' if show_root_edge_labels else ''
root_rlabel = '≥' if show_root_edge_labels else ''
edges = []
# non leaf edges with > and <=
for node in shadow_tree.internal:
nname = node_name(node)
if node.left.isleaf():
left_node_name ='leaf%d' % node.left.id
else:
left_node_name = node_name(node.left)
if node.right.isleaf():
right_node_name ='leaf%d' % node.right.id
else:
right_node_name = node_name(node.right)
llabel = all_llabel
rlabel = all_rlabel
if node==shadow_tree.root:
llabel = root_llabel
rlabel = root_rlabel
lcolor = rcolor = GREY
lpw = rpw = "0.3"
if node.left.id in highlight_path:
lcolor = HIGHLIGHT_COLOR
lpw = "1.2"
if node.right.id in highlight_path:
rcolor = HIGHLIGHT_COLOR
rpw = "1.2"
edges.append( f'{nname} -> {left_node_name} [penwidth={lpw} color="{lcolor}" label=<{llabel}>]' )
edges.append( f'{nname} -> {right_node_name} [penwidth={rpw} color="{rcolor}" label=<{rlabel}>]' )
edges.append(f"""
{{
rank=same;
{left_node_name} -> {right_node_name} [style=invis]
}}
""")
newline = "\n\t"
dot = f"""
digraph G {{
splines=line;
nodesep={nodesep};
ranksep={ranksep};
rankdir={orientation};
margin=0.0;
node [margin="0.03" penwidth="0.5" width=.1, height=.1];
edge [arrowsize=.4 penwidth="0.3"]
{newline.join(internal)}
{newline.join(edges)}
{newline.join(leaves)}
{class_legend_gr()}
{instance_gr()}
}}
"""
return DTreeViz(dot)
def class_split_viz(node: ShadowDecTreeNode,
X_train: np.ndarray,
y_train: np.ndarray,
colors: Mapping[int, str],
node_heights,
filename: str = None,
ticks_fontsize: int = 8,
label_fontsize: int = 9,
precision=1,
histtype: ('bar', 'barstacked', 'strip') = 'barstacked',
X : np.array = None,
highlight_node : bool = False
):
height_range = (.5, 1.5)
h = prop_size(n=node_heights[node.id], counts=node_heights.values(), output_range=height_range)
figsize=(3.3, h)
fig, ax = plt.subplots(1, 1, figsize=figsize)
feature_name = node.feature_name()
# Get X, y data for all samples associated with this node.
X_feature = X_train[:, node.feature()]
X_feature, y_train = X_feature[node.samples()], y_train[node.samples()]
n_classes = node.shadow_tree.nclasses()
nbins = get_num_bins(histtype, n_classes)
overall_feature_range = (np.min(X_train[:, node.feature()]), np.max(X_train[:, node.feature()]))
overall_feature_range_wide = (overall_feature_range[0]-overall_feature_range[0]*.08,
overall_feature_range[1]+overall_feature_range[1]*.05)
ax.set_xlabel(f"{feature_name}", fontsize=label_fontsize, fontname="Arial",
color=GREY)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(.3)
ax.spines['bottom'].set_linewidth(.3)
class_names = node.shadow_tree.class_names
r = overall_feature_range[1]-overall_feature_range[0]
class_values = node.shadow_tree.unique_target_values
X_hist = [X_feature[y_train == cl] for cl in class_values]
if histtype=='strip':
ax.yaxis.set_visible(False)
ax.spines['left'].set_visible(False)
sigma = .013
mu = .05
class_step = .08
dot_w = 20
ax.set_ylim(0, mu + n_classes * class_step)
for i, bucket in enumerate(X_hist):
alpha = .6 if len(bucket) > 10 else 1
y_noise = np.random.normal(mu + i * class_step, sigma, size=len(bucket))
ax.scatter(bucket, y_noise, alpha=alpha, marker='o', s=dot_w, c=colors[i],
edgecolors=GREY, lw=.3)
else:
X_colors = [colors[cl] for cl in class_values]
binwidth = r / nbins
hist, bins, barcontainers = ax.hist(X_hist,
color=X_colors,
align='mid',
histtype=histtype,
bins=np.arange(overall_feature_range[0],overall_feature_range[1] + binwidth, binwidth),
label=class_names)
# Alter appearance of each bar
for patch in barcontainers:
for rect in patch.patches:
rect.set_linewidth(.5)
rect.set_edgecolor(GREY)
ax.set_yticks([0,max([max(h) for h in hist])])
ax.set_xlim(*overall_feature_range_wide)
ax.set_xticks(overall_feature_range)
ax.tick_params(axis='both', which='major', width=.3, labelcolor=GREY, labelsize=ticks_fontsize)
def wedge(ax,x,color):
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
xr = xmax - xmin
yr = ymax - ymin
hr = h / (height_range[1] - height_range[0])
th = yr * .15 * 1 / hr # convert to graph coordinates (ugh)
tw = xr * .018
tipy = -0.1 * yr * .15 * 1 / hr
tria = np.array(
[[x, tipy], [x - tw, -th], [x + tw, -th]])
t = patches.Polygon(tria, facecolor=color)
t.set_clip_on(False)
ax.add_patch(t)
ax.text(node.split(), -2 * th,
f"{myround(node.split(),precision)}",
horizontalalignment='center',
fontsize=ticks_fontsize, color=GREY)
wedge(ax, node.split(), color=WEDGE_COLOR)
if highlight_node:
wedge(ax, X[node.feature()], color=HIGHLIGHT_COLOR)
if filename is not None:
plt.savefig(filename, bbox_inches='tight', pad_inches=0)
plt.close()
def class_leaf_viz(node : ShadowDecTreeNode,
colors : List[str],
filename: str):
size = prop_size(node.nsamples(), counts=node.shadow_tree.leaf_sample_counts(),
output_range=(1.01, 1.5))
# we visually need n=1 and n=9 to appear different but diff between 300 and 400 is no big deal
size = np.sqrt(np.log(size))
counts = node.class_counts()
draw_piechart(counts, size=size, colors=colors, filename=filename, label=f"n={node.nsamples()}")
def regr_split_viz(node: ShadowDecTreeNode,
X_train: np.ndarray,
y_train: np.ndarray,
target_name: str,
filename: str = None,
y_range=None,
ticks_fontsize: int = 8,
label_fontsize: int = 9,
precision=1,
X : np.array = None,
highlight_node : bool = False):
figsize = (2.5, 1.1)
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.tick_params(colors=GREY)
feature_name = node.feature_name()
ax.set_xlabel(f"{feature_name}", fontsize=label_fontsize, fontname="Arial", color=GREY)
ax.set_ylim(y_range)
if node==node.shadow_tree.root:
ax.set_ylabel(target_name, fontsize=label_fontsize, fontname="Arial", color=GREY)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(.3)
ax.spines['bottom'].set_linewidth(.3)
ax.tick_params(axis='both', which='major', width=.3, labelcolor=GREY, labelsize=ticks_fontsize)
# Get X, y data for all samples associated with this node.
X_feature = X_train[:,node.feature()]
X_feature, y_train = X_feature[node.samples()], y_train[node.samples()]
overall_feature_range = (np.min(X_train[:,node.feature()]), np.max(X_train[:,node.feature()]))
ax.set_xlim(*overall_feature_range)
xmin, xmax = overall_feature_range
xr = xmax - xmin
xticks = list(overall_feature_range)
if node.split()>xmin+.10*xr and node.split()<xmax-.1*xr: # don't show split if too close to axis ends
xticks += [node.split()]
ax.set_xticks(xticks)
ax.scatter(X_feature, y_train, s=5, c=BLUE, alpha=.4, lw=.3)
left, right = node.split_samples()
left = y_train[left]
right = y_train[right]
split = node.split()
ax.plot([overall_feature_range[0],split],[np.mean(left),np.mean(left)],'--', color='k', linewidth=1)
ax.plot([split,split],[*y_range],'--', color='k', linewidth=1)
ax.plot([split,overall_feature_range[1]],[np.mean(right),np.mean(right)],'--', color='k', linewidth=1)
def wedge(ax,x,color):
ymin, ymax = ax.get_ylim()
xr = xmax - xmin
yr = ymax - ymin
hr = figsize[1]
th = yr * .1
tw = xr * .018
tipy = ymin
tria = np.array([[x, tipy], [x - tw, ymin-th], [x + tw, ymin-th]])
t = patches.Polygon(tria, facecolor=color)
t.set_clip_on(False)
ax.add_patch(t)
wedge(ax, node.split(), color=WEDGE_COLOR)
if highlight_node:
wedge(ax, X[node.feature()], color=HIGHLIGHT_COLOR)
plt.tight_layout()
if filename is not None:
plt.savefig(filename, bbox_inches='tight', pad_inches=0)
plt.close()
def regr_leaf_viz(node : ShadowDecTreeNode,
y : (pd.Series,np.ndarray),
target_name,
filename:str=None,
y_range=None,
precision=1,
label_fontsize: int = 9,
ticks_fontsize: int = 8):
samples = node.samples()
y = y[samples]
figsize = (.75, .8)
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.tick_params(colors=GREY)
m = np.mean(y)
ax.set_ylim(y_range)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_linewidth(.3)
ax.set_xticks([])
# ax.set_yticks(y_range)
ticklabelpad = plt.rcParams['xtick.major.pad']
ax.annotate(f"{target_name}={myround(m,precision)}\nn={len(y)}",
xy=(.5, 0), xytext=(.5, -.5*ticklabelpad), ha='center', va='top',
xycoords='axes fraction', textcoords='offset points',
fontsize = label_fontsize, fontname = "Arial", color = GREY)
ax.tick_params(axis='y', which='major', width=.3, labelcolor=GREY, labelsize=ticks_fontsize)
mu = .5
sigma = .08
X = np.random.normal(mu, sigma, size=len(y))
ax.set_xlim(0, 1)
alpha = .25
ax.scatter(X, y, s=5, c='#225ea8', alpha=alpha, lw=.3)
ax.plot([0,len(node.samples())],[m,m],'--', color=GREY, linewidth=1)
plt.tight_layout()
if filename is not None:
plt.savefig(filename, bbox_inches='tight', pad_inches=0)
plt.close()
def draw_legend(shadow_tree, target_name, filename):
n_classes = shadow_tree.nclasses()
class_values = shadow_tree.unique_target_values
class_names = shadow_tree.class_names
color_values = color_blind_friendly_colors[n_classes]
colors = {v:color_values[i] for i,v in enumerate(class_values)}
boxes = []
for i, c in enumerate(class_values):
box = patches.Rectangle((0, 0), 20, 10, linewidth=.4, edgecolor=GREY,
facecolor=colors[c], label=class_names[c])
boxes.append(box)
fig, ax = plt.subplots(1, 1, figsize=(1,1))
leg = ax.legend(handles=boxes,
frameon=True,
shadow=False,
fancybox=True,
loc='center',
title=target_name,
handletextpad=.35,
borderpad=.8,
edgecolor=GREY)
leg.get_frame().set_linewidth(.5)
leg.get_title().set_color(GREY)
leg.get_title().set_fontsize(10)
leg.get_title().set_fontweight('bold')
for text in leg.get_texts():
text.set_color(GREY)
text.set_fontsize(10)
ax.set_xlim(0,20)
ax.set_ylim(0,10)
ax.axis('off')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if filename is not None:
plt.savefig(filename, bbox_inches='tight', pad_inches=0)
plt.close()
def draw_piechart(counts,size,colors,filename,label=None):
n_nonzero = np.count_nonzero(counts)
i = np.nonzero(counts)[0][0]
if n_nonzero==1:
counts = [counts[i]]
colors = [colors[i]]
tweak = size * .01
fig, ax = plt.subplots(1, 1, figsize=(size, size))
ax.axis('equal')
# ax.set_xlim(0 - tweak, size + tweak)
# ax.set_ylim(0 - tweak, size + tweak)
ax.set_xlim(0, size-10*tweak)
ax.set_ylim(0, size-10*tweak)
# frame=True needed for some reason to fit pie properly (ugh)
# had to tweak the crap out of this to get tight box around piechart :(
wedges, _ = ax.pie(counts, center=(size/2-6*tweak,size/2-6*tweak), radius=size/2, colors=colors, shadow=False, frame=True)
for w in wedges:
w.set_linewidth(.5)
w.set_edgecolor(GREY)
ax.axis('off')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if label is not None:
ax.text(size/2-6*tweak, -10*tweak, label,
horizontalalignment='center',
verticalalignment='top',
fontsize=9, color=GREY, fontname="Arial")
# plt.tight_layout()
plt.savefig(filename, bbox_inches='tight', pad_inches=0)
plt.close()
def prop_size(n, counts, output_range = (0.00, 0.3)):
min_samples = min(counts)
max_samples = max(counts)
sample_count_range = max_samples - min_samples
if sample_count_range>0:
zero_to_one = (n - min_samples) / sample_count_range
return zero_to_one * (output_range[1] - output_range[0]) + output_range[0]
else:
return output_range[0]
def get_num_bins(histtype, n_classes):
bins = NUM_BINS[n_classes]
if histtype == 'barstacked':
bins *= 2
return bins
global dot_already_tested
if dot_already_tested: return
dot_already_tested = True
tmp = tempfile.gettempdir()
dotfilename = f"{tmp}/testing_svg_{getpid()}.dot"
with open(dotfilename, "w") as f:
f.write("digraph G { A -> B }\n")
svgfilename = f"{tmp}/testing_svg_{getpid()}.svg"
cmd = ["dot", "-Tsvg:cairo", "-o", svgfilename, dotfilename]
print(' '.join(cmd))
ok = True
try:
os.execlp("dot", "dot", "-Tsvg:cairo", "-o", svgfilename, dotfilename)
# run(cmd, capture_output=False, check=False, quiet=True)
except:
ok = False
return ok
| {
"content_hash": "0444097d6cd37a66719732c993eb0e47",
"timestamp": "",
"source": "github",
"line_count": 1167,
"max_line_length": 175,
"avg_line_length": 38.54070265638389,
"alnum_prop": 0.5619094203704116,
"repo_name": "parrt/AniML",
"id": "bf925007fbf715facf431890c125a1b499e23ac4",
"size": "44977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dtreeviz/trees.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "135146"
},
{
"name": "Kotlin",
"bytes": "38472"
},
{
"name": "Python",
"bytes": "15012"
}
],
"symlink_target": ""
} |
from re import findall, sub
from .default_recipe_parser import recipe_parser
def use_schema_org(html):
if 'http://schema.org/Recipe' in str(html):
return True
return False
class schema_org_recipe_parser(recipe_parser):
def datetime_or_content(self, el):
if el:
if el.has_attr('datetime'):
return self.parse_isoduration(el['datetime'])
if el.has_attr('content'):
return self.parse_isoduration(el['content'])
return None
else:
return 0
def parse_cook_time(self):
el = self.soup.find(attrs={'itemprop': 'cookTime'})
return self.datetime_or_content(el)
def parse_prep_time(self):
el = self.soup.find(attrs={'itemprop': 'prepTime'})
return self.datetime_or_content(el)
def parse_total_time(self):
el = self.soup.find(attrs={'itemprop': 'totalTime'})
return self.datetime_or_content(el)
def parse_canonical_url(self):
el = self.soup.find(attrs={'id': 'canonicalUrl'})
if el and el.has_attr('href'):
return el['href']
def parse_image_url(self):
el = self.soup.find(attrs={'itemtype': 'http://schema.org/Recipe'})
if el:
el = el.find(attrs={'itemprop': 'image'})
if el and el.has_attr('src'):
return el['src']
def parse_description(self):
el = self.soup.find(attrs={'itemprop': 'description'})
if el:
text = el.get_text() or el['content']
return text.strip()
def parse_published_date(self):
el = self.soup.find(attrs={'itemprop': 'datePublished'})
if el and el.has_attr('datetime'):
return el.get_text()
def parse_yields(self):
el = self.soup.find(attrs={'itemprop': 'recipeYield'})
if el:
text = el.get_text() or el['content']
y = findall(r'\d+', text.split(' ')[0])
yields = int(y[0]) or 0
yield_modifier = ' '.join(text.split(' ')[1:])
return yield_modifier, yields
def parse_instructions(self):
root = self.soup.find(attrs={'itemprop': 'recipeInstructions'})
res = []
for el in root.find_all('li'):
t = sub(r'[\t\r\n]', '', el.get_text())
if len(t) > 2:
res.append(t)
return res or None
def parse_ingredients(self):
els = self.soup.find_all(attrs={'itemprop': 'ingredients'})
res = []
for el in els:
t = el.get_text()
t = sub('[\t\r\n]', '', t)
t = sub("\s+", " ", t).strip()
if len(t) > 2:
res.append(t)
return res or None
def parse_name(self):
el = self.soup.find(attrs={'itemprop': 'name'})
if el:
return el.get_text()
def parse_author(self):
el = self.soup.find(attrs={'itemprop': 'author'})
if el:
further = el.find(attrs={'itemprop': 'name'})
if further:
return further.get_text()
else:
return el.get_text()
| {
"content_hash": "de9eb3a6bc12ad7d2b09e257669d79ff",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 75,
"avg_line_length": 31.099009900990097,
"alnum_prop": 0.5278573702642471,
"repo_name": "scttcper/hangry-py",
"id": "7fc76dbd06982cce896223005e3336d6f72e1fe0",
"size": "3141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hangrypy/schema_org_recipe_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16843"
}
],
"symlink_target": ""
} |
from highcharts.views.bar import HighChartsBarView # noqa
from highcharts.views.line import HighChartsLineView # noqa
from highcharts.views.area import HighChartsAreaView # noqa
| {
"content_hash": "809c6174e7526e62721400b8cb4bd0ab",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 60,
"avg_line_length": 60.333333333333336,
"alnum_prop": 0.8342541436464088,
"repo_name": "vivek8943/django-highcharts",
"id": "0a34164cb17f43f1ad83539f56dadb61b43b03c2",
"size": "181",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "highcharts/views/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1305446"
},
{
"name": "Python",
"bytes": "16121"
}
],
"symlink_target": ""
} |
from nose.tools import *
from tictactoe.ai_strategies.easy import Easy
from tictactoe.game_board import GameBoard
def easy_strategy_makes_any_opening_move_test():
ai = Easy("X", "O")
board = GameBoard()
move = ai.make_move(board)
assert_equal(True, move in list(range(0, 9)))
def easy_strategy_makes_move_in_nearly_full_board_test():
ai = Easy("X", "O")
board = GameBoard()
board.play_move("X", 0)
board.play_move("O", 2)
board.play_move("X", 3)
board.play_move("O", 4)
board.play_move("X", 5)
board.play_move("O", 6)
board.play_move("X", 7)
board.play_move("O", 8)
assert_equal(1, ai.make_move(board))
| {
"content_hash": "ff47dccbe657311ae3c1ec38bb7a0411",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 57,
"avg_line_length": 29.130434782608695,
"alnum_prop": 0.6268656716417911,
"repo_name": "rickerbh/tictactoe_py",
"id": "5fafd2cc86fc794c471809424b92edfe213a2f85",
"size": "670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/ai_easy_strategy_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "120"
},
{
"name": "Python",
"bytes": "37602"
}
],
"symlink_target": ""
} |
from xml.etree import ElementTree
import json
import os
import shutil
from contextlib import contextmanager
import logging
from copy import deepcopy
import validictory
import build_steps
import build_steps_local
import build_steps_predicates
from xcode import XcodeProject
LOG = logging.getLogger(__name__)
@contextmanager
def cd(target_dir):
'Change directory to :param:`target_dir` as a context manager - i.e. rip off Fabric'
old_dir = os.getcwd()
try:
os.chdir(target_dir)
yield target_dir
finally:
os.chdir(old_dir)
# Needed to prevent elementtree screwing with namespace names
ElementTree.register_namespace('android', 'http://schemas.android.com/apk/res/android')
ElementTree.register_namespace('tools', 'http://schemas.android.com/tools')
def dict_merge(a, b):
'''recursively merges dict's. not just simple a['key'] = b['key'], if
both a and b have a key who's value is a dict then dict_merge is called
on both values and the result stored in the returned dictionary.'''
if not isinstance(b, dict):
return b
result = deepcopy(a)
for k, v in b.iteritems():
if k in result and isinstance(result[k], dict):
result[k] = dict_merge(result[k], v)
else:
result[k] = deepcopy(v)
return result
def _call_with_params(method, build_params, params):
if isinstance(params, dict):
return method(build_params, **params)
elif isinstance(params, tuple):
return method(build_params, *params)
else:
return method(build_params, params)
def apply_module_to_osx_project(module_path, project_path, skip_framework=False, inspector_config=False, include_tests=False, local_build_steps=None, app_config=None):
"""Take the module in a specific folder and apply it to an xcode ios project in another folder"""
if not os.path.exists(os.path.join(module_path, 'manifest.json')):
LOG.warning("Failed to include module: %s" % module_path)
return
with open(os.path.join(module_path, 'manifest.json')) as manifest_file:
manifest = json.load(manifest_file)
# JS
if os.path.exists(os.path.join(module_path, 'javascript', 'module.js')):
with open(os.path.join(module_path, 'javascript', 'module.js')) as module_js:
with open(os.path.join(project_path, 'ForgeInspector', 'assets', 'forge', 'all.js'), 'a') as alljs:
alljs.write('(function () {\n')
alljs.write(module_js.read())
alljs.write('\n})();')
# Tests
if include_tests:
if os.path.exists(os.path.join(module_path, 'tests', 'fixtures')):
if os.path.exists(os.path.join(project_path, 'ForgeInspector', 'assets', 'src', 'fixtures', manifest['name'])):
shutil.rmtree(os.path.join(project_path, 'ForgeInspector', 'assets', 'src', 'fixtures', manifest['name']))
if not os.path.exists(os.path.join(project_path, 'ForgeInspector', 'assets', 'src', 'fixtures')):
os.makedirs(os.path.join(project_path, 'ForgeInspector', 'assets', 'src', 'fixtures'))
shutil.copytree(os.path.join(module_path, 'tests', 'fixtures'), os.path.join(project_path, 'ForgeInspector', 'assets', 'src', 'fixtures', manifest['name']))
if os.path.exists(os.path.join(module_path, 'tests', 'automated.js')):
try:
os.makedirs(os.path.join(project_path, 'ForgeInspector', 'assets', 'forge', 'tests', 'automated'))
except OSError:
pass
shutil.copy2(os.path.join(module_path, 'tests', 'automated.js'), os.path.join(project_path, 'ForgeInspector', 'assets', 'forge', 'tests', 'automated', manifest['name']+'.js'))
if os.path.exists(os.path.join(module_path, 'tests', 'interactive.js')):
try:
os.makedirs(os.path.join(project_path, 'ForgeInspector', 'assets', 'forge', 'tests', 'interactive'))
except OSError:
pass
shutil.copy2(os.path.join(module_path, 'tests', 'interactive.js'), os.path.join(project_path, 'ForgeInspector', 'assets', 'forge', 'tests', 'interactive', manifest['name']+'.js'))
# Add module a if we want it
if not skip_framework:
module_framework = os.path.join(module_path, 'osx', '%s.framework' % manifest['name'])
if os.path.isdir(module_framework):
shutil.copytree(module_framework, os.path.join(project_path, '%s.framework' % manifest['name']))
xcode_project = XcodeProject(os.path.join(project_path, 'ForgeInspector.xcodeproj', 'project.pbxproj'))
xcode_project.add_framework(manifest['name']+'.framework', "<group>")
xcode_project.add_saved_framework(manifest['name']+'.framework', "<group>")
xcode_project.save()
if inspector_config:
# Add inspector config for module to app_config.js(on).
if app_config is None:
with open(os.path.join(project_path, 'ForgeInspector', 'assets', 'app_config.json')) as app_config_json:
app_config = json.load(app_config_json)
if os.path.exists(os.path.join(module_path, 'inspector_config.json')):
with open(os.path.join(module_path, 'inspector_config.json'), "r") as inspector_config_file:
inspector_config = json.load(inspector_config_file)
else:
inspector_config = {
"modules": {
manifest['name']: {
"version": "exampleversion"
}
}
}
app_config = dict_merge(app_config, inspector_config)
with open(os.path.join(project_path, 'ForgeInspector', 'assets', 'app_config.json'), 'w') as app_config_json:
json.dump(app_config, app_config_json)
with open(os.path.join(project_path, 'ForgeInspector', 'assets', 'forge', 'app_config.js'), 'w') as app_config_js:
app_config_js.write("window.forge = {}; window.forge.config = %s;" % json.dumps(app_config))
# Validate config
if os.path.exists(os.path.join(module_path, 'config_schema.json')) and \
"config" in app_config['modules'][manifest['name']]:
with open(os.path.join(module_path, 'config_schema.json')) as schema_file:
config_schema = json.load(schema_file)
try:
validictory.validate(app_config['modules'][manifest['name']]['config'], config_schema)
except validictory.ValidationError as e:
raise Exception("Validation failed for module '%s' with error: %s" % (manifest['name'], str(e)))
# frameworks
module_frameworks = os.path.join(module_path, 'osx', 'frameworks')
if os.path.isdir(module_frameworks):
if os.path.exists(os.path.join(project_path, 'ForgeModule')):
xcode_project = XcodeProject(os.path.join(project_path, 'ForgeModule', 'ForgeModule.xcodeproj', 'project.pbxproj'))
xcode_inspector_project = XcodeProject(os.path.join(project_path, 'ForgeInspector.xcodeproj', 'project.pbxproj'))
for framework in os.listdir(module_frameworks):
if framework.endswith(".framework"):
shutil.copytree(os.path.join(module_frameworks, framework), os.path.join(project_path, framework))
if os.path.exists(os.path.join(project_path, 'ForgeModule')):
xcode_project.add_framework(os.path.join('..', framework), '<group>')
xcode_inspector_project.add_saved_framework(framework, '<group>')
if os.path.exists(os.path.join(project_path, 'ForgeModule')):
xcode_project.save()
xcode_inspector_project.save()
# build steps
module_steps_path = os.path.join(module_path, 'osx', 'build_steps.json')
if os.path.isfile(module_steps_path):
with open(module_steps_path, 'r') as build_steps_file:
module_build_steps = json.load(build_steps_file)
with cd(project_path):
build_params = {
'app_config': app_config,
'project_path': project_path,
'src_path': local_build_steps
}
for step in module_build_steps:
if "do" in step:
for task in step["do"]:
task_func = getattr(build_steps, task, None)
if task_func is not None:
_call_with_params(task_func, build_params, step["do"][task])
elif local_build_steps is not None:
task_func = getattr(build_steps_local, task, None)
if task_func is not None:
_call_with_params(task_func, build_params, step["do"][task])
if local_build_steps is None:
if not os.path.exists(os.path.join(project_path, "dist", "build_steps")):
os.makedirs(os.path.join(project_path, "dist", "build_steps"))
shutil.copy2(module_steps_path, os.path.join(project_path, "dist", "build_steps", manifest['name'] + ".json"))
def apply_module_to_ios_project(module_path, project_path, skip_a=False, inspector_config=False, include_tests=False, local_build_steps=None, app_config=None):
"""Take the module in a specific folder and apply it to an xcode ios project in another folder"""
if not os.path.exists(os.path.join(module_path, 'manifest.json')):
LOG.warning("Failed to include module: %s" % module_path)
return
with open(os.path.join(module_path, 'manifest.json')) as manifest_file:
manifest = json.load(manifest_file)
# JS
if os.path.exists(os.path.join(module_path, 'javascript', 'module.js')):
LOG.info("iOS module '%s': Appending module.js to all.js" % manifest['name'])
with open(os.path.join(module_path, 'javascript', 'module.js')) as module_js:
with open(os.path.join(project_path, 'ForgeInspector', 'assets', 'forge', 'all.js'), 'a') as alljs:
alljs.write('(function () {\n')
alljs.write(module_js.read())
alljs.write('\n})();')
# Tests
if include_tests:
LOG.info("iOS module '%s': Including test files" % manifest['name'])
if os.path.exists(os.path.join(module_path, 'tests', 'fixtures')):
if os.path.exists(os.path.join(project_path, 'ForgeInspector', 'assets', 'src', 'fixtures', manifest['name'])):
shutil.rmtree(os.path.join(project_path, 'ForgeInspector', 'assets', 'src', 'fixtures', manifest['name']))
if not os.path.exists(os.path.join(project_path, 'ForgeInspector', 'assets', 'src', 'fixtures')):
os.makedirs(os.path.join(project_path, 'ForgeInspector', 'assets', 'src', 'fixtures'))
shutil.copytree(os.path.join(module_path, 'tests', 'fixtures'), os.path.join(project_path, 'ForgeInspector', 'assets', 'src', 'fixtures', manifest['name']))
if os.path.exists(os.path.join(module_path, 'tests', 'automated.js')):
try:
os.makedirs(os.path.join(project_path, 'ForgeInspector', 'assets', 'forge', 'tests', 'automated'))
except OSError:
pass
shutil.copy2(os.path.join(module_path, 'tests', 'automated.js'), os.path.join(project_path, 'ForgeInspector', 'assets', 'forge', 'tests', 'automated', manifest['name']+'.js'))
if os.path.exists(os.path.join(module_path, 'tests', 'interactive.js')):
try:
os.makedirs(os.path.join(project_path, 'ForgeInspector', 'assets', 'forge', 'tests', 'interactive'))
except OSError:
pass
shutil.copy2(os.path.join(module_path, 'tests', 'interactive.js'), os.path.join(project_path, 'ForgeInspector', 'assets', 'forge', 'tests', 'interactive', manifest['name']+'.js'))
# Add module a if we want it
if not skip_a:
LOG.info("iOS module '%s': Including module.a" % manifest['name'])
module_a = os.path.join(module_path, 'ios', 'module.a')
if os.path.isfile(module_a):
# Copy to libs
shutil.copy2(module_a, os.path.join(project_path, manifest['name']+'.a'))
# Add to xcode build
xcode_project = XcodeProject(os.path.join(project_path, 'ForgeInspector.xcodeproj', 'project.pbxproj'))
xcode_project.add_framework(manifest['name']+'.a', "<group>")
xcode_project.save()
if inspector_config:
LOG.info("iOS module '%s': Including inspector config" % manifest['name'])
if app_config is None:
with open(os.path.join(project_path, 'ForgeInspector', 'assets', 'app_config.json')) as app_config_json:
app_config = json.load(app_config_json)
if os.path.exists(os.path.join(module_path, 'inspector_config.json')):
with open(os.path.join(module_path, 'inspector_config.json'), "r") as inspector_config_file:
inspector_config = json.load(inspector_config_file)
else:
inspector_config = {
"modules": {
manifest['name']: {
"version": "exampleversion"
}
}
}
app_config = dict_merge(app_config, inspector_config)
with open(os.path.join(project_path, 'ForgeInspector', 'assets', 'app_config.json'), 'w') as app_config_json:
json.dump(app_config, app_config_json)
with open(os.path.join(project_path, 'ForgeInspector', 'assets', 'forge', 'app_config.js'), 'w') as app_config_js:
app_config_js.write("window.forge = {}; window.forge.config = %s;" % json.dumps(app_config))
# Validate config
if os.path.exists(os.path.join(module_path, 'config_schema.json')) and \
"config" in app_config['modules'][manifest['name']]:
with open(os.path.join(module_path, 'config_schema.json')) as schema_file:
config_schema = json.load(schema_file)
try:
validictory.validate(app_config['modules'][manifest['name']]['config'], config_schema)
except validictory.ValidationError as e:
raise Exception("Validation failed for module '%s' with error: %s" % (manifest['name'], str(e)))
# bundles
module_bundles = os.path.join(module_path, 'ios', 'bundles')
if os.path.isdir(module_bundles):
LOG.info("iOS module '%s': Including bundles" % manifest['name'])
xcode_project = XcodeProject(os.path.join(project_path, 'ForgeInspector.xcodeproj', 'project.pbxproj'))
for bundle in os.listdir(module_bundles):
if bundle.endswith(".bundle"):
shutil.copytree(os.path.join(module_bundles, bundle), os.path.join(project_path, bundle))
xcode_project.add_resource(bundle)
xcode_project.save()
# build steps
module_steps_path = os.path.join(module_path, 'ios', 'build_steps.json')
if os.path.isfile(module_steps_path):
LOG.info("iOS module '%s': Applying build steps" % manifest['name'])
with open(module_steps_path, 'r') as build_steps_file:
module_build_steps = json.load(build_steps_file)
with cd(project_path):
build_params = {
'app_config': app_config,
'project_path': os.path.join(project_path, "ForgeInspector"),
'src_path': local_build_steps
}
for step in module_build_steps:
if "when" in step:
should_continue = False
for predicate in step["when"]:
predicate_func = getattr(build_steps_predicates, predicate, None)
if predicate_func is not None:
if not _call_with_params(predicate_func, build_params, step["when"][predicate]):
should_continue = True
break
else:
should_continue = True
break
if should_continue:
continue
if "do" in step:
for task in step["do"]:
task_func = getattr(build_steps, task, None)
if task_func is not None:
_call_with_params(task_func, build_params, step["do"][task])
elif local_build_steps is not None:
task_func = getattr(build_steps_local, task, None)
if task_func is not None:
_call_with_params(task_func, build_params, step["do"][task])
if local_build_steps is None:
if not os.path.exists(os.path.join(project_path, "dist", "build_steps")):
os.makedirs(os.path.join(project_path, "dist", "build_steps"))
shutil.copy2(module_steps_path, os.path.join(project_path, "dist", "build_steps", manifest['name'] + ".json"))
def apply_module_to_android_project(module_path, project_path, skip_jar=False, inspector_config=False, include_tests=False, local_build_steps=None, app_config=None):
"""Take the module in a specific folder and apply it to an eclipse android project in another folder"""
if not os.path.exists(os.path.join(module_path, 'manifest.json')):
LOG.warning("Failed to include module: %s" % module_path)
return
with open(os.path.join(module_path, 'manifest.json')) as manifest_file:
manifest = json.load(manifest_file)
# JS
if os.path.exists(os.path.join(module_path, 'javascript', 'module.js')):
LOG.info("Android module '%s': Appending module.js to all.js" % manifest['name'])
with open(os.path.join(module_path, 'javascript', 'module.js')) as module_js:
with open(os.path.join(project_path, 'assets', 'forge', 'all.js'), 'a') as alljs:
alljs.write('(function () {\n')
alljs.write(module_js.read())
alljs.write('\n})();')
# Tests
if include_tests:
LOG.info("Android module '%s': Including test files" % manifest['name'])
if os.path.exists(os.path.join(module_path, 'tests', 'fixtures')):
if os.path.exists(os.path.join(project_path, 'assets', 'src', 'fixtures', manifest['name'])):
shutil.rmtree(os.path.join(project_path, 'assets', 'src', 'fixtures', manifest['name']))
if not os.path.exists(os.path.join(project_path, 'assets', 'src', 'fixtures')):
os.makedirs(os.path.join(project_path, 'assets', 'src', 'fixtures'))
shutil.copytree(os.path.join(module_path, 'tests', 'fixtures'), os.path.join(project_path, 'assets', 'src', 'fixtures', manifest['name']))
if os.path.exists(os.path.join(module_path, 'tests', 'automated.js')):
try:
os.makedirs(os.path.join(project_path, 'assets', 'forge', 'tests', 'automated'))
except OSError:
pass
shutil.copy2(os.path.join(module_path, 'tests', 'automated.js'), os.path.join(project_path, 'assets', 'forge', 'tests', 'automated', manifest['name']+'.js'))
if os.path.exists(os.path.join(module_path, 'tests', 'interactive.js')):
try:
os.makedirs(os.path.join(project_path, 'assets', 'forge', 'tests', 'interactive'))
except OSError:
pass
shutil.copy2(os.path.join(module_path, 'tests', 'interactive.js'), os.path.join(project_path, 'assets', 'forge', 'tests', 'interactive', manifest['name']+'.js'))
# Add module jar if we want it
if not skip_jar:
LOG.info("Android module '%s': Adding module jar to libs" % manifest['name'])
module_jar = os.path.join(module_path, 'android', 'module.jar')
if not os.path.exists(os.path.join(project_path, 'libs')):
os.makedirs(os.path.join(project_path, 'libs'))
if os.path.exists(module_jar):
shutil.copy2(module_jar, os.path.join(project_path, 'libs', manifest['name']+'.jar'))
if inspector_config:
LOG.info("Android module '%s': Including inspector config" % manifest['name'])
if app_config is None:
with open(os.path.join(project_path, 'assets', 'app_config.json')) as app_config_json:
app_config = json.load(app_config_json)
if os.path.exists(os.path.join(module_path, 'inspector_config.json')):
with open(os.path.join(module_path, 'inspector_config.json'), "r") as inspector_config_file:
inspector_config = json.load(inspector_config_file)
else:
inspector_config = {
"modules": {
manifest['name']: {
"version": "exampleversion"
}
}
}
app_config = dict_merge(app_config, inspector_config)
with open(os.path.join(project_path, 'assets', 'app_config.json'), 'w') as app_config_json:
json.dump(app_config, app_config_json)
with open(os.path.join(project_path, 'assets', 'forge', 'app_config.js'), 'w') as app_config_js:
app_config_js.write("window.forge = {}; window.forge.config = %s;" % json.dumps(app_config))
# Validate config
if os.path.exists(os.path.join(module_path, 'config_schema.json')) and \
"config" in app_config['modules'][manifest['name']]:
with open(os.path.join(module_path, 'config_schema.json')) as schema_file:
config_schema = json.load(schema_file)
try:
validictory.validate(app_config['modules'][manifest['name']]['config'], config_schema)
except validictory.ValidationError as e:
raise Exception("Validation failed for module '%s' with error: %s" % (manifest['name'], str(e)))
# res
module_res = os.path.join(module_path, 'android', 'res')
if os.path.isdir(module_res):
LOG.info("Android module '%s': Adding module res files" % manifest['name'])
for dirpath, _, filenames in os.walk(module_res):
if not os.path.exists(os.path.join(project_path, 'res', dirpath[len(module_res)+1:])):
os.makedirs(os.path.join(project_path, 'res', dirpath[len(module_res)+1:]))
for filename in filenames:
if (filename.startswith('.')):
continue
if os.path.exists(os.path.join(project_path, 'res', dirpath[len(module_res)+1:], filename)):
raise Exception("File '%s' already exists, module resources may only add files, not replace them." % os.path.join('res', dirpath[len(module_res)+1:], filename))
shutil.copy2(os.path.join(dirpath, filename), os.path.join(project_path, 'res', dirpath[len(module_res)+1:], filename))
# libs
module_res = os.path.join(module_path, 'android', 'libs')
if os.path.isdir(module_res):
LOG.info("Android module '%s': Adding module lib files" % manifest['name'])
for dirpath, _, filenames in os.walk(module_res):
if not os.path.exists(os.path.join(project_path, 'libs', dirpath[len(module_res)+1:])):
os.makedirs(os.path.join(project_path, 'libs', dirpath[len(module_res)+1:]))
for filename in filenames:
shutil.copy2(os.path.join(dirpath, filename), os.path.join(project_path, 'libs', dirpath[len(module_res)+1:], filename))
# build steps
if os.path.isfile(os.path.join(module_path, 'android', 'build_steps.json')):
LOG.info("Android module '%s': Performing build steps" % manifest['name'])
with open(os.path.join(module_path, 'android', 'build_steps.json')) as build_steps_file:
module_build_steps = json.load(build_steps_file)
with cd(project_path):
build_params = {
'app_config': app_config,
'project_path': project_path,
'src_path': local_build_steps
}
for step in module_build_steps:
if "do" in step:
for task in step["do"]:
task_func = getattr(build_steps, task, None)
if task_func is not None:
_call_with_params(task_func, build_params, step["do"][task])
elif local_build_steps is not None:
task_func = getattr(build_steps_local, task, None)
if task_func is not None:
_call_with_params(task_func, build_params, step["do"][task])
if local_build_steps is None:
module_steps_path = os.path.join(module_path, 'android', 'build_steps.json')
if not os.path.exists(os.path.join(project_path, "build_steps")):
os.makedirs(os.path.join(project_path, "build_steps"))
shutil.copy2(module_steps_path, os.path.join(project_path, "build_steps", manifest['name'] + ".json"))
| {
"content_hash": "eda608c9e0db9558331e484a7b0265ac",
"timestamp": "",
"source": "github",
"line_count": 455,
"max_line_length": 182,
"avg_line_length": 47.74285714285714,
"alnum_prop": 0.6826865534226396,
"repo_name": "mnaughto/trigger-statusbar",
"id": "aabe67a6ce3fae762b01518426100a3027c27f6b",
"size": "21723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".trigger/module_dynamic/build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "386"
},
{
"name": "CSS",
"bytes": "116661"
},
{
"name": "D",
"bytes": "1536"
},
{
"name": "JavaScript",
"bytes": "64389"
},
{
"name": "Objective-C",
"bytes": "28595"
},
{
"name": "Python",
"bytes": "140312"
}
],
"symlink_target": ""
} |
"""
This is a simple Python GTK+3 TreeView selection snippet.
See: http://python-gtk-3-tutorial.readthedocs.org/en/latest/treeview.html
"""
from gi.repository import Gtk as gtk
# Countries, population (as in 2015) and continent.
DATA_LIST = [("China", 1370130000, "Asia"),
("India", 1271980000, "Asia"),
("United States", 321107000, "America"),
("Indonesia", 255461700, "Asia"),
("Brazil", 204388000, "America"),
("Pakistan", 189936000, "Asia"),
("Nigeria", 183523000, "Africa"),
("Bangladesh", 158425000, "Asia"),
("Russia", 146267288, "Eurasia"),
("Japan", 126880000, "Asia")]
# The TreeView's selection callback
def on_tree_selection_changed(selection):
model, treeiter = selection.get_selected()
if treeiter != None:
print("You selected", model[treeiter][0])
def main():
window = gtk.Window()
window.set_default_size(300, 450)
window.set_border_width(18)
# Creating the ListStore model
liststore = gtk.ListStore(str, int, str)
for item in DATA_LIST:
liststore.append(list(item))
# Creating the treeview and add the columns
treeview = gtk.TreeView(liststore)
for column_index, column_title in enumerate(["Country", "Population", "Continent"]):
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn(column_title, renderer, text=column_index)
column.set_resizable(True) # Let the column be resizable
treeview.append_column(column)
# Connect to the "changed" signal
select = treeview.get_selection()
select.connect("changed", on_tree_selection_changed)
# Scrolled window
scrolled_window = gtk.ScrolledWindow()
scrolled_window.set_border_width(0)
scrolled_window.set_shadow_type(gtk.ShadowType.IN) # should be gtk.ShadowType.IN, gtk.ShadowType.OUT, gtk.ShadowType.ETCHED_IN or gtk.ShadowType.ETCHED_OUT
scrolled_window.set_policy(gtk.PolicyType.AUTOMATIC, gtk.PolicyType.ALWAYS) # should be gtk.PolicyType.AUTOMATIC, gtk.PolicyType.ALWAYS or gtk.PolicyType.NEVER
scrolled_window.add(treeview)
window.add(scrolled_window)
window.connect("delete-event", gtk.main_quit) # ask to quit the application when the close button is clicked
window.show_all() # display the window
gtk.main() # GTK+ main loop
if __name__ == '__main__':
main()
| {
"content_hash": "2278e7b07fed76381796efc1bfb1f1f2",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 188,
"avg_line_length": 38.18181818181818,
"alnum_prop": 0.6365079365079365,
"repo_name": "jeremiedecock/snippets",
"id": "01fb3e7a735eb66d632850369d68dcc665fcb565",
"size": "2629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pygtk/python_gtk3_pygobject/tree_view_selection.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "4294"
},
{
"name": "Batchfile",
"bytes": "6779"
},
{
"name": "C",
"bytes": "102107"
},
{
"name": "C++",
"bytes": "320943"
},
{
"name": "CMake",
"bytes": "11424"
},
{
"name": "CSS",
"bytes": "21121"
},
{
"name": "Cython",
"bytes": "21"
},
{
"name": "Dockerfile",
"bytes": "1818"
},
{
"name": "Fortran",
"bytes": "633"
},
{
"name": "Gnuplot",
"bytes": "39999"
},
{
"name": "Go",
"bytes": "3166"
},
{
"name": "Groovy",
"bytes": "3009"
},
{
"name": "HTML",
"bytes": "138995"
},
{
"name": "IDL",
"bytes": "43"
},
{
"name": "Java",
"bytes": "120221"
},
{
"name": "JavaScript",
"bytes": "32342"
},
{
"name": "Jinja",
"bytes": "206"
},
{
"name": "Jupyter Notebook",
"bytes": "95991"
},
{
"name": "Lua",
"bytes": "200"
},
{
"name": "M4",
"bytes": "111"
},
{
"name": "MATLAB",
"bytes": "31972"
},
{
"name": "Makefile",
"bytes": "81307"
},
{
"name": "OpenSCAD",
"bytes": "14995"
},
{
"name": "PHP",
"bytes": "94"
},
{
"name": "Perl",
"bytes": "46"
},
{
"name": "Processing",
"bytes": "208"
},
{
"name": "Prolog",
"bytes": "454"
},
{
"name": "Python",
"bytes": "1685966"
},
{
"name": "R",
"bytes": "76"
},
{
"name": "Raku",
"bytes": "43"
},
{
"name": "Ruby",
"bytes": "42"
},
{
"name": "Scheme",
"bytes": "649"
},
{
"name": "Shell",
"bytes": "52865"
},
{
"name": "Smalltalk",
"bytes": "55"
},
{
"name": "TeX",
"bytes": "1189"
},
{
"name": "Vue",
"bytes": "49445"
},
{
"name": "XSLT",
"bytes": "1816"
}
],
"symlink_target": ""
} |
from typing import TYPE_CHECKING
from azure.core.credentials import AccessToken, AzureKeyCredential
from azure.core.paging import ItemPaged
from azure.core.pipeline.policies import BearerTokenCredentialPolicy
from azure.core.polling import LROPoller
from azure.core.tracing.decorator import distributed_trace
from ._api_version import RemoteRenderingApiVersion, validate_api_version
from ._generated import RemoteRenderingRestClient
from ._generated.models import (AssetConversion, AssetConversionInputSettings,
AssetConversionOutputSettings,
AssetConversionSettings,
CreateAssetConversionSettings,
CreateRenderingSessionSettings,
RenderingSession, RenderingSessionSize,
UpdateSessionSettings)
from ._polling import ConversionPolling, SessionPolling
from ._shared.authentication_endpoint import construct_endpoint_url
from ._shared.mixed_reality_token_credential import get_mixedreality_credential
from ._shared.mixedreality_account_key_credential import \
MixedRealityAccountKeyCredential
from ._shared.static_access_token_credential import StaticAccessTokenCredential
from ._version import SDK_MONIKER
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Union
from azure.core.credentials import TokenCredential
class RemoteRenderingClient(object):
"""A client for the Azure Remote Rendering Service.
This client offers functionality to convert assets to the format expected by the runtime, and also to manage the
lifetime of remote rendering sessions.
:param str endpoint:
The rendering service endpoint. This determines the region in which the rendering session is created and
asset conversions are performed.
:param str account_id: The Azure Remote Rendering account identifier.
:param str account_domain:
The Azure Remote Rendering account domain. For example, for an account created in the eastus region, this
will have the form "eastus.mixedreality.azure.com"
:param credential: Authentication for the Azure Remote
Rendering account. Can be of the form of an AzureKeyCredential, TokenCredential or an AccessToken acquired
from the Mixed Reality Secure Token Service (STS).
:type credential: Union[TokenCredential, AzureKeyCredential, AccessToken]
:keyword api_version:
The API version of the service to use for requests. It defaults to the latest service version.
Setting to an older version may result in reduced feature compatibility.
:paramtype api_version: str or ~azure.mixedreality.remoterenderings.RemoteRenderingApiVersion
"""
def __init__(self, endpoint, account_id, account_domain, credential, **kwargs):
# type: (str, str, str, Union[TokenCredential, AccessToken], Any) -> None
self._api_version = kwargs.pop(
"api_version", RemoteRenderingApiVersion.V2021_01_01
)
validate_api_version(self._api_version)
if not endpoint:
raise ValueError("endpoint cannot be None")
if not account_id:
raise ValueError("account_id cannot be None")
if not account_domain:
raise ValueError("account_domain cannot be None")
if not credential:
raise ValueError("credential cannot be None")
if isinstance(credential, AccessToken):
cred = StaticAccessTokenCredential(credential) # type: TokenCredential
elif isinstance(credential, AzureKeyCredential):
cred = MixedRealityAccountKeyCredential(
account_id=account_id, account_key=credential)
else:
cred = credential
self.polling_interval = kwargs.pop("polling_interval", 5)
endpoint_url = kwargs.pop(
'authentication_endpoint_url', construct_endpoint_url(account_domain))
# otherwise assume it is a TokenCredential and simply pass it through
pipeline_credential = get_mixedreality_credential(
account_id=account_id, account_domain=account_domain, credential=cred, endpoint_url=endpoint_url)
if pipeline_credential is None:
raise ValueError("credential is not of type TokenCredential, AzureKeyCredential or AccessToken")
authentication_policy = BearerTokenCredentialPolicy(
pipeline_credential, endpoint_url + '/.default')
self._account_id = account_id
self._client = RemoteRenderingRestClient(
endpoint=endpoint,
authentication_policy=authentication_policy,
sdk_moniker=SDK_MONIKER,
api_version=self._api_version,
**kwargs)
@distributed_trace
def begin_asset_conversion(self, conversion_id, input_settings, output_settings, **kwargs):
# type: (str, AssetConversionInputSettings, AssetConversionOutputSettings, Any) -> LROPoller[AssetConversion]
"""
Start a new asset conversion with the given options.
:param str conversion_id:
An ID uniquely identifying the conversion for the remote rendering account. The ID is case sensitive, can
contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain
more than 256 characters.
:param ~azure.mixedreality.remoterendering.AssetConversionInputSettings input_settings: Options for the
input of the conversion.
:param ~azure.mixedreality.remoterendering.AssetConversionOutputSettings output_settings: Options for the
output of the conversion.
:return: A poller for the created asset conversion
:rtype: ~azure.core.polling.LROPoller[AssetConversion]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling_interval = kwargs.pop("polling_interval", self.polling_interval)
initial_state = self._client.remote_rendering.create_conversion(
account_id=self._account_id,
conversion_id=conversion_id,
body=CreateAssetConversionSettings(settings=AssetConversionSettings(
input_settings=input_settings, output_settings=output_settings)),
**kwargs)
polling_method = ConversionPolling(account_id=self._account_id, polling_interval=polling_interval)
return LROPoller(client=self._client,
initial_response=initial_state,
deserialization_callback=lambda: None,
polling_method=polling_method)
@distributed_trace
def get_asset_conversion(self, conversion_id, **kwargs):
# type: (str, Any) -> AssetConversion
"""
Retrieve the state of a previously created conversion.
:param str conversion_id:
The identifier of the conversion to retrieve.
:return: Information about the ongoing conversion process.
:rtype: ~azure.mixedreality.remoterendering.models.AssetConversion
:raises ~azure.core.exceptions.HttpResponseError:
"""
return self._client.remote_rendering.get_conversion(
account_id=self._account_id, conversion_id=conversion_id, **kwargs)
@distributed_trace
def get_asset_conversion_poller(self, **kwargs):
# type: (Any) -> LROPoller[AssetConversion]
"""
Returns a poller for an existing conversion by conversion id or a continuation token retrieved from a previous
poller.
:keyword conversion_id: The conversion id of a previously created conversion.
:paramtype conversion_id: str
:keyword continuation_token:
A continuation token retrieved from a poller of a conversion.
:paramtype continuation_token: str
:return: A poller for the created asset conversion
:rtype: ~azure.core.polling.LROPoller[AssetConversion]
:raises ~azure.core.exceptions.HttpResponseError:
"""
conversion_id = kwargs.pop("conversion_id", None) # type: Union[str,None]
continuation_token = kwargs.pop("continuation_token", None) # type: Union[str,None]
if conversion_id is None and continuation_token is None:
raise ValueError(
"Either conversion_id or continuation_token needs to be supplied.")
if conversion_id is not None and continuation_token is not None:
raise ValueError(
"Parameters conversion_id and continuation_token are mutual exclusive. Supply only one of the two.")
polling_interval = kwargs.pop("polling_interval", self.polling_interval)
polling_method = ConversionPolling(account_id=self._account_id, polling_interval=polling_interval)
if continuation_token is not None:
return LROPoller.from_continuation_token(continuation_token=continuation_token,
polling_method=polling_method,
client=self._client)
if conversion_id is not None:
initial_state = self._client.remote_rendering.get_conversion(
account_id=self._account_id,
conversion_id=conversion_id,
**kwargs)
return LROPoller(client=self._client,
initial_response=initial_state,
deserialization_callback=lambda: None,
polling_method=polling_method)
@distributed_trace
def list_asset_conversions(self, **kwargs):
# type: (...) -> ItemPaged[AssetConversion]
""" Gets conversions for the remote rendering account.
:rtype: ItemPaged[AssetConversion]
:raises ~azure.core.exceptions.HttpResponseError:
"""
return self._client.remote_rendering.list_conversions(account_id=self._account_id, **kwargs) # type: ignore
@distributed_trace
def begin_rendering_session(self, session_id, size, lease_time_minutes, **kwargs):
# type: (str, Union[str, RenderingSessionSize], int, Any) -> LROPoller[RenderingSession]
"""
:param str session_id: An ID uniquely identifying the rendering session for the given account. The ID is case
sensitive, can contain any combination of alphanumeric characters including hyphens and underscores, and
cannot contain more than 256 characters.
:param size: Size of the server used for the rendering session. Remote Rendering with Standard size server has
a maximum scene size of 20 million polygons. Remote Rendering with Premium size does not enforce a hard
maximum, but performance may be degraded if your content exceeds the rendering capabilities of the service.
:param int lease_time_minutes: The time in minutes the session will run after reaching the 'Ready' state.
:type size: str or ~azure.mixedreality.remoterendering.RenderingSessionSize
:return: A poller for the created rendering session
:rtype: LROPoller[RenderingSession]
:raises ~azure.core.exceptions.HttpResponseError:
"""
settings = CreateRenderingSessionSettings(
size=size, lease_time_minutes=lease_time_minutes)
initial_state = self._client.remote_rendering.create_session(
account_id=self._account_id,
session_id=session_id,
body=settings,
**kwargs)
polling_interval = kwargs.pop("polling_interval", self.polling_interval)
polling_method = SessionPolling(account_id=self._account_id, polling_interval=polling_interval)
return LROPoller(client=self._client,
initial_response=initial_state,
deserialization_callback=lambda: None,
polling_method=polling_method)
@distributed_trace
def get_rendering_session(self, session_id, **kwargs):
# type: (str, Any) -> RenderingSession
'''
Returns the properties of a previously generated rendering session.
:param str session_id: The identifier of the rendering session.
:return: Properties of the rendering session
:rtype: ~azure.mixedreality.remoterendering.models.RenderingSession
:raises ~azure.core.exceptions.HttpResponseError:
'''
return self._client.remote_rendering.get_session(
account_id=self._account_id,
session_id=session_id,
**kwargs)
def get_rendering_session_poller(self, **kwargs):
# type: (Any) -> LROPoller[RenderingSession]
"""
Returns a poller for an existing rendering session by session id or a continuation token retrieved from a
previous poller.
:keyword session_id: The conversion id of a previously created conversion.
:paramtype session_id: str
:keyword continuation_token:
A continuation token retrieved from a poller of a session.
:paramtype continuation_token: str
:raises ~azure.core.exceptions.HttpResponseError:
"""
session_id = kwargs.pop("session_id", None) # type: Union[str,None]
continuation_token = kwargs.pop("continuation_token", None) # type: Union[str,None]
if session_id is None and continuation_token is None:
raise ValueError(
"Either session_id or continuation_token needs to be supplied.")
if session_id is not None and continuation_token is not None:
raise ValueError(
"Parameters session_id and continuation_token are mutual exclusive. Supply only one of the two.")
polling_interval = kwargs.pop("polling_interval", self.polling_interval)
if continuation_token is not None:
polling_method = SessionPolling(account_id=self._account_id, polling_interval=polling_interval)
return LROPoller.from_continuation_token(continuation_token=continuation_token,
polling_method=polling_method,
client=self._client)
if session_id is not None:
initial_state = self._client.remote_rendering.get_session(
account_id=self._account_id,
session_id=session_id,
**kwargs)
polling_method = SessionPolling(account_id=self._account_id, polling_interval=polling_interval)
return LROPoller(client=self._client,
initial_response=initial_state,
deserialization_callback=lambda: None,
polling_method=polling_method)
@distributed_trace
def stop_rendering_session(self, session_id, **kwargs):
# type: (str, Any) -> None
"""
:param str session_id: The identifier of the session to be stopped.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
self._client.remote_rendering.stop_session(
account_id=self._account_id, session_id=session_id, **kwargs)
@distributed_trace
def update_rendering_session(self, session_id, **kwargs):
# type: (str, Any) -> RenderingSession
"""
Updates an already existing rendering session.
:param str session_id: The identifier of the session to be updated.
:keyword lease_time_minutes: The new lease time of the rendering session. Has to be strictly larger than
the previous lease time.
:paramtype lease_time_minutes: int
:return: The properties of the updated session
:rtype: ~azure.mixedreality.remoterendering.models.RenderingSession
:raises ~azure.core.exceptions.HttpResponseError:
"""
lease_time_minutes = kwargs.pop("lease_time_minutes", None) # type: Union[int,None]
if lease_time_minutes is not None:
return self._client.remote_rendering.update_session(account_id=self._account_id,
session_id=session_id,
body=UpdateSessionSettings(
lease_time_minutes=lease_time_minutes),
**kwargs)
# if no param to update has been provided the unchanged session is returned
return self._client.remote_rendering.get_session(account_id=self._account_id,
session_id=session_id,
**kwargs)
@distributed_trace
def list_rendering_sessions(self, **kwargs):
# type: (...) -> ItemPaged[RenderingSession]
"""
List rendering sessions in the 'Ready' or 'Starting' state. Does not return stopped or failed rendering
sessions.
:rtype: ItemPaged[RenderingSession]
:raises ~azure.core.exceptions.HttpResponseError:
"""
return self._client.remote_rendering.list_sessions(account_id=self._account_id, **kwargs) # type: ignore
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> RemoteRenderingClient
self._client.__enter__() # pylint:disable=no-member
return self
def __exit__(self, *args):
# type: (*Any) -> None
self._client.__exit__(*args) # pylint:disable=no-member
| {
"content_hash": "8614f0de8f5ed4848140bbf01563a768",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 119,
"avg_line_length": 49.68539325842696,
"alnum_prop": 0.6490841248303935,
"repo_name": "Azure/azure-sdk-for-python",
"id": "e1ef95afca493be7d266ca072915f8b490376df1",
"size": "18012",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/remoterendering/azure-mixedreality-remoterendering/azure/mixedreality/remoterendering/_remote_rendering_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
=========================================================
SVM Tie Breaking Example
=========================================================
Tie breaking is costly if ``decision_function_shape='ovr'``, and therefore it
is not enabled by default. This example illustrates the effect of the
``break_ties`` parameter for a multiclass classification problem and
``decision_function_shape='ovr'``.
The two plots differ only in the area in the middle where the classes are
tied. If ``break_ties=False``, all input in that area would be classified as
one class, whereas if ``break_ties=True``, the tie-breaking mechanism will
create a non-convex decision boundary in that area.
"""
# Code source: Andreas Mueller, Adrin Jalali
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.datasets import make_blobs
X, y = make_blobs(random_state=27)
fig, sub = plt.subplots(2, 1, figsize=(5, 8))
titles = ("break_ties = False", "break_ties = True")
for break_ties, title, ax in zip((False, True), titles, sub.flatten()):
svm = SVC(
kernel="linear", C=1, break_ties=break_ties, decision_function_shape="ovr"
).fit(X, y)
xlim = [X[:, 0].min(), X[:, 0].max()]
ylim = [X[:, 1].min(), X[:, 1].max()]
xs = np.linspace(xlim[0], xlim[1], 1000)
ys = np.linspace(ylim[0], ylim[1], 1000)
xx, yy = np.meshgrid(xs, ys)
pred = svm.predict(np.c_[xx.ravel(), yy.ravel()])
colors = [plt.cm.Accent(i) for i in [0, 4, 7]]
points = ax.scatter(X[:, 0], X[:, 1], c=y, cmap="Accent")
classes = [(0, 1), (0, 2), (1, 2)]
line = np.linspace(X[:, 1].min() - 5, X[:, 1].max() + 5)
ax.imshow(
-pred.reshape(xx.shape),
cmap="Accent",
alpha=0.2,
extent=(xlim[0], xlim[1], ylim[1], ylim[0]),
)
for coef, intercept, col in zip(svm.coef_, svm.intercept_, classes):
line2 = -(line * coef[1] + intercept) / coef[0]
ax.plot(line2, line, "-", c=colors[col[0]])
ax.plot(line2, line, "--", c=colors[col[1]])
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_title(title)
ax.set_aspect("equal")
plt.show()
| {
"content_hash": "b820184659f66c01d879e67bace01140",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 82,
"avg_line_length": 32.803030303030305,
"alnum_prop": 0.5879907621247114,
"repo_name": "vinayak-mehta/scikit-learn",
"id": "e12460b494c024e0a1d7643a1f49552d0a63d693",
"size": "2165",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "examples/svm/plot_svm_tie_breaking.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "668672"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10468304"
},
{
"name": "Shell",
"bytes": "41758"
}
],
"symlink_target": ""
} |
"""Keep track of geo-located stations."""
from astropy.time import Time
from sqlalchemy import Column, Float, String, BigInteger, ForeignKey, func
from . import MCDeclarativeBase, NotNull
from . import mc, cm_utils
class StationType(MCDeclarativeBase):
"""
Table to track/denote station type data categories in various ways.
Attributes
----------
station_type_name : String Column
Name of type class, Primary_key
prefix : String Column
String prefix to station type, elements of which are typically
characterized by <prefix><int>. Comma-delimit list if more than one.
Note that prefix is not in the primary_key, so there can be multiple
prefixes per type_name.
description : String Column
hort description of station type.
plot_marker : String Column
matplotlib marker type to use
"""
__tablename__ = 'station_type'
station_type_name = Column(String(64), primary_key=True)
prefix = NotNull(String(64))
description = Column(String(64))
plot_marker = Column(String(64))
def __repr__(self):
"""Define representation."""
return ('<subarray {self.station_type_name}: prefix={self.prefix} '
'description={self.description} marker={self.plot_marker}>'
.format(self=self))
class GeoLocation(MCDeclarativeBase):
"""
A table logging stations within HERA.
Attributes
----------
station_name : String Column
Colloquial name of station (which is a unique location on the ground).
This one shouldn't change. Primary_key
station_type_name : String Column
Name of station type of which it is a member.
Should match prefix per station_type table.
datum : String Column
Datum of the geoid.
tile : String Column
UTM tile
northing : Float Column
Northing coordinate in m
easting : Float Column
Easting coordinate in m
elevation : Float Column
Elevation in m
created_gpstime : BigInteger Column
The date when the station assigned by project.
"""
__tablename__ = 'geo_location'
station_name = Column(String(64), primary_key=True)
station_type_name = Column(String(64), ForeignKey(StationType.station_type_name),
nullable=False)
datum = Column(String(64))
tile = Column(String(64))
northing = Column(Float(precision='53'))
easting = Column(Float(precision='53'))
elevation = Column(Float)
created_gpstime = NotNull(BigInteger)
def gps2Time(self):
"""Add a created_date attribute -- an astropy Time object based on created_gpstime."""
self.created_date = Time(self.created_gpstime, format='gps')
def geo(self, **kwargs):
"""Add arbitrary attributes to object based on dict."""
for key, value in kwargs.items():
if key == 'station_name':
value = value.upper()
setattr(self, key, value)
def __repr__(self):
"""Define representation."""
return '<station_name={self.station_name} station_type={self.station_type_name} \
northing={self.northing} easting={self.easting} \
elevation={self.elevation}>'.format(self=self)
def update(session=None, data=None, add_new_geo=False):
"""
Update the geo_location table with some data.
Use with caution -- should usually use in a script which will do datetime
primary key etc.
Parameters
----------
session : session
session on current database. If session is None, a new session
on the default database is created and used.
data : str or list
[[station_name0,column0,value0],[...]]
where
station_nameN: station_name (starts with char)
values: corresponding list of values
add_new_geo : bool
allow a new entry to be made.
Returns
-------
bool
Flag if successful
"""
data_dict = format_check_update_request(data)
if data_dict is None:
print('No update - doing nothing.')
return False
close_session_when_done = False
if session is None: # pragma: no cover
db = mc.connect_mc_db()
session = db.sessionmaker()
close_session_when_done = True
for station_name in data_dict.keys():
geo_rec = session.query(GeoLocation).filter(
func.upper(GeoLocation.station_name) == station_name.upper())
num_rec = geo_rec.count()
make_update = False
if num_rec == 0:
if add_new_geo:
gr = GeoLocation()
make_update = True
else:
raise ValueError("{} does not exist and add_new_geo not enabled."
.format(station_name))
elif num_rec == 1:
if add_new_geo:
raise ValueError("{} exists and and_new_geo is enabled."
.format(station_name))
else:
gr = geo_rec.first()
make_update = True
if make_update:
for d in data_dict[station_name]:
setattr(gr, d[1], d[2])
session.add(gr)
session.commit()
cm_utils.log('geo_location update', data_dict=data_dict)
if close_session_when_done: # pragma: no cover
session.close()
return True
def format_check_update_request(request):
"""
Parse the update request for use in the update function.
Parameters
----------
request : str or list
station_name0:column0:value0, [station_name1:]column1:value1, [...] or list
station_nameN: first entry must have the station_name,
if it does not then propagate first station_name but
can't restart 3 then 2
columnN: name of geo_location column
valueN: corresponding new value
Returns
-------
dict
Parsed request for update
"""
if request is None:
return None
data = {}
if type(request) == str:
tmp = request.split(',')
data_to_proc = []
for d in tmp:
data_to_proc.append(d.split(':'))
else:
data_to_proc = request
if len(data_to_proc[0]) == 3:
station_name0 = data_to_proc[0][0]
for d in data_to_proc:
if len(d) == 2:
d.insert(0, station_name0)
elif len(d) != 3:
raise ValueError('Invalid format for update request.')
if d[0] in data.keys():
data[d[0]].append(d)
else:
data[d[0]] = [d]
else:
raise ValueError('Invalid parse request - need 3 parameters for at least first one.')
return data
| {
"content_hash": "40559002ddef9a51553c10e70188a762",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 94,
"avg_line_length": 32.2122641509434,
"alnum_prop": 0.5889588519548983,
"repo_name": "HERA-Team/Monitor_and_Control",
"id": "d64be11cb13bbc061df8e3712f494a4f3446c98b",
"size": "6951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hera_mc/geo_location.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "19948"
}
],
"symlink_target": ""
} |
"""Utility to compile possibly incomplete Python source code."""
import sys
import string
import traceback
def compile_command(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default "<input>"
symbol -- optional grammar start symbol; "single" (default) or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError or OverflowError if the command is a syntax error
(OverflowError if the error is in a numeric constant)
Approach:
First, check if the source consists entirely of blank lines and
comments; if so, replace it with 'pass', because the built-in
parser doesn't always do the right thing for these.
Compile three times: as is, with \n, and with \n\n appended. If
it compiles as is, it's complete. If it compiles with one \n
appended, we expect more. If it doesn't compile either way, we
compare the error we get when compiling with \n or \n\n appended.
If the errors are the same, the code is broken. But if the errors
are different, we expect more. Not intuitive; not even guaranteed
to hold in future releases; but this matches the compiler's
behavior from Python 1.4 through 1.5.2, at least.
Caveat:
It is possible (but not likely) that the parser stops parsing
with a successful outcome before reaching the end of the source;
in this case, trailing symbols may be ignored instead of causing an
error. For example, a backslash followed by two newlines may be
followed by arbitrary garbage. This will be fixed once the API
for the parser is better.
"""
# Check for source consisting of only blank lines and comments
for line in string.split(source, "\n"):
line = string.strip(line)
if line and line[0] != '#':
break # Leave it alone
else:
source = "pass" # Replace it with a 'pass' statement
err = err1 = err2 = None
code = code1 = code2 = None
try:
code = compile(source, filename, symbol)
except SyntaxError, err:
pass
try:
code1 = compile(source + "\n", filename, symbol)
except SyntaxError, err1:
pass
try:
code2 = compile(source + "\n\n", filename, symbol)
except SyntaxError, err2:
pass
if code:
return code
try:
e1 = err1.__dict__
except AttributeError:
e1 = err1
try:
e2 = err2.__dict__
except AttributeError:
e2 = err2
if not code1 and e1 == e2:
raise SyntaxError, err1
| {
"content_hash": "3f8c700a79ca70ba4e08276986a47e59",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 79,
"avg_line_length": 33.104651162790695,
"alnum_prop": 0.6578854935019318,
"repo_name": "MalloyPower/parsing-python",
"id": "080e00b87ebc1674c7df83e06a84370ac1026613",
"size": "2847",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.0/Lib/codeop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20170712_1744'),
]
operations = [
migrations.RemoveField(
model_name='userinfo',
name='integer_id',
),
migrations.AddField(
model_name='userinfo',
name='int_id',
field=models.CharField(blank=True, default='', max_length=8),
),
]
| {
"content_hash": "e201919370c43993a3d0233da40dd1d1",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 73,
"avg_line_length": 23,
"alnum_prop": 0.5612648221343873,
"repo_name": "wangjinyu/api_server",
"id": "e5a6622784052846eba58c1f84895d24bd519592",
"size": "579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/migrations/0003_auto_20170712_1752.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "46518"
},
{
"name": "JavaScript",
"bytes": "97398"
},
{
"name": "Python",
"bytes": "11310"
},
{
"name": "Shell",
"bytes": "1370"
}
],
"symlink_target": ""
} |
import m5
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath
import os, optparse, sys
# Get paths we might need
config_path = os.path.dirname(os.path.abspath(__file__))
config_root = os.path.dirname(config_path)
addToPath(config_root+'/configs/common')
addToPath(config_root+'/configs/ruby')
addToPath(config_root+'/configs/topologies')
import Ruby
import Options
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
# Add the ruby specific and protocol specific options
Ruby.define_options(parser)
(options, args) = parser.parse_args()
#
# Set the default cache size and associativity to be very small to encourage
# races between requests and writebacks.
#
options.l1d_size="256B"
options.l1i_size="256B"
options.l2_size="512B"
options.l3_size="1kB"
options.l1d_assoc=2
options.l1i_assoc=2
options.l2_assoc=2
options.l3_assoc=2
# this is a uniprocessor only test
options.num_cpus = 1
cpu = TimingSimpleCPU(cpu_id=0)
system = System(cpu = cpu, physmem = SimpleMemory(null = True),
clk_domain = SrcClockDomain(clock = '1GHz'))
# Create a seperate clock domain for components that should run at
# CPUs frequency
system.cpu.clk_domain = SrcClockDomain(clock = '2GHz')
Ruby.create_system(options, system)
# Create a separate clock for Ruby
system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock)
assert(len(system.ruby._cpu_ruby_ports) == 1)
# create the interrupt controller
cpu.createInterruptController()
#
# Tie the cpu cache ports to the ruby cpu ports and
# physmem, respectively
#
cpu.connectAllPorts(system.ruby._cpu_ruby_ports[0])
# -----------------------
# run simulation
# -----------------------
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'
# Not much point in this being higher than the L1 latency
m5.ticks.setGlobalFrequency('1ns')
| {
"content_hash": "deb759ebb564fccd5f2c1e8890ccfc09",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 76,
"avg_line_length": 25.684931506849313,
"alnum_prop": 0.7344,
"repo_name": "samueldotj/TeeRISC-Simulator",
"id": "27d56a31da756a7339ef6890a3d0b09755aeb432",
"size": "3457",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/configs/simple-timing-ruby.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "230559"
},
{
"name": "C",
"bytes": "930004"
},
{
"name": "C++",
"bytes": "9066852"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Java",
"bytes": "3096"
},
{
"name": "Perl",
"bytes": "1603541"
},
{
"name": "Python",
"bytes": "3151838"
},
{
"name": "Ruby",
"bytes": "19410"
},
{
"name": "Shell",
"bytes": "2193"
},
{
"name": "Visual Basic",
"bytes": "2884"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Linking_Charities.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "c4008e0a87f59353ce9b0c351675540d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 81,
"avg_line_length": 37.76190476190476,
"alnum_prop": 0.6242118537200504,
"repo_name": "linkingcharities/linkingcharities",
"id": "96ae664aa583a19c17aa6470f7fe268dd9f8438e",
"size": "815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Linking_Charities/manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28"
},
{
"name": "HTML",
"bytes": "6023"
},
{
"name": "Makefile",
"bytes": "124"
},
{
"name": "Python",
"bytes": "68704"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name="pep518_twin_forkbombs_second",
version="238",
py_modules=["pep518_twin_forkbombs_second"],
)
| {
"content_hash": "b715daae54c8750c70c8af8c3b8083f6",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 48,
"avg_line_length": 21.142857142857142,
"alnum_prop": 0.6959459459459459,
"repo_name": "pypa/pip",
"id": "c14c1cfb0259789ab547e61517c6d6a26568b23f",
"size": "148",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "tests/data/src/pep518_twin_forkbombs_second-238/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3137"
},
{
"name": "PowerShell",
"bytes": "2137"
},
{
"name": "Python",
"bytes": "7137503"
}
],
"symlink_target": ""
} |
import socket
import sys
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
port = sys.argv[1]
# Bind the socket to the port
server_address = ('localhost', int(port))
print sys.stderr, 'starting up on %s port %s' % server_address
sock.bind(server_address)
raw_input("click any key to shutdown...") | {
"content_hash": "3f8a63759a210c3206d9e61382d4370c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 62,
"avg_line_length": 23.642857142857142,
"alnum_prop": 0.7250755287009063,
"repo_name": "radiovisual/zaport",
"id": "7b8e04ec404065223449c849ca013172d2b36418",
"size": "331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/bindudp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "4012"
},
{
"name": "Python",
"bytes": "331"
}
],
"symlink_target": ""
} |
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListLocations(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListLocations Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListLocations, self).__init__(temboo_session, '/Library/CorpWatch/Lists/ListLocations')
def new_input_set(self):
return ListLocationsInputSet()
def _make_result_set(self, result, path):
return ListLocationsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListLocationsChoreographyExecution(session, exec_id, path)
class ListLocationsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListLocations
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((optional, string) The APIKey from CorpWatch if you have one.)
"""
super(ListLocationsInputSet, self)._set_input('APIKey', value)
def set_Address(self, value):
"""
Set the value of the Address input for this Choreo. ((optional, string) Enter an address fragment to search for. This can be either a street address, city, or state/subregion.)
"""
super(ListLocationsInputSet, self)._set_input('Address', value)
def set_CountryCode(self, value):
"""
Set the value of the CountryCode input for this Choreo. ((optional, string) Enter an ISO-3166 formatted country code. )
"""
super(ListLocationsInputSet, self)._set_input('CountryCode', value)
def set_Index(self, value):
"""
Set the value of the Index input for this Choreo. ((optional, integer) Set the index number of the first result to be returned. The index of the first result is 0.)
"""
super(ListLocationsInputSet, self)._set_input('Index', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) The number of results to be returned. Defaults to 100. Maximum is 5000.)
"""
super(ListLocationsInputSet, self)._set_input('Limit', value)
def set_MaxYear(self, value):
"""
Set the value of the MaxYear input for this Choreo. ((optional, integer) Indicate desired year of the most recent appearance in SEC filing data (e.g. indicating 2007 will search for companies that ceased filing in 2007).)
"""
super(ListLocationsInputSet, self)._set_input('MaxYear', value)
def set_MinYear(self, value):
"""
Set the value of the MinYear input for this Choreo. ((optional, integer) Indicate desired year of the earliest appearance in SEC filing data (e.g. indicating 2004 will search for companies that started filing in 2004).)
"""
super(ListLocationsInputSet, self)._set_input('MinYear', value)
def set_PostalCode(self, value):
"""
Set the value of the PostalCode input for this Choreo. ((optional, integer) Enter a postal code to be searched.)
"""
super(ListLocationsInputSet, self)._set_input('PostalCode', value)
def set_ResponseType(self, value):
"""
Set the value of the ResponseType input for this Choreo. ((optional, string) Specify json or xml for the type of response to be returned. Defaults to xml.)
"""
super(ListLocationsInputSet, self)._set_input('ResponseType', value)
def set_Type(self, value):
"""
Set the value of the Type input for this Choreo. ((optional, string) Indicates the origin of the location information found. Acceptable values: relation_loc, business, mailing, state_of_incorp. See documentation for more info.)
"""
super(ListLocationsInputSet, self)._set_input('Type', value)
def set_Year(self, value):
"""
Set the value of the Year input for this Choreo. ((optional, integer) If a year is specified, only records for that year will be returned and the data in the company objects returned will be set appropriately for the request year. Defaults to most recent.)
"""
super(ListLocationsInputSet, self)._set_input('Year', value)
class ListLocationsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListLocations Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from CorpWatch.)
"""
return self._output.get('Response', None)
class ListLocationsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListLocationsResultSet(response, path)
| {
"content_hash": "66c032b3b1aba7cd410fed3ad2d7aca1",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 264,
"avg_line_length": 49.34905660377358,
"alnum_prop": 0.6855285796214873,
"repo_name": "lupyuen/RaspberryPiImage",
"id": "5e7887f57566f2b81f7c0b27815002bb402c157f",
"size": "6116",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "home/pi/GrovePi/Software/Python/others/temboo/Library/CorpWatch/Lists/ListLocations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "82308"
},
{
"name": "C",
"bytes": "3197439"
},
{
"name": "C#",
"bytes": "33056"
},
{
"name": "C++",
"bytes": "1020255"
},
{
"name": "CSS",
"bytes": "208338"
},
{
"name": "CoffeeScript",
"bytes": "87200"
},
{
"name": "Eagle",
"bytes": "1632170"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "Groff",
"bytes": "286691"
},
{
"name": "HTML",
"bytes": "41527"
},
{
"name": "JavaScript",
"bytes": "403603"
},
{
"name": "Makefile",
"bytes": "33808"
},
{
"name": "Objective-C",
"bytes": "69457"
},
{
"name": "Perl",
"bytes": "96047"
},
{
"name": "Processing",
"bytes": "1304"
},
{
"name": "Python",
"bytes": "13358098"
},
{
"name": "Shell",
"bytes": "68795"
},
{
"name": "TeX",
"bytes": "4317"
}
],
"symlink_target": ""
} |
"""Module to handle /pkgs"""
import logging
import urllib
from google.appengine.api import memcache
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from simian.auth import gaeserver
from simian.mac import models
from simian.mac.common import auth
from simian.mac.munki import common
from simian.mac.munki import handlers
def PackageExists(filename):
"""Check whether a package exists.
Args:
filename: str, package filename like 'foo.dmg'
Returns:
True or False
"""
return models.PackageInfo.get_by_key_name(filename) is not None
class Packages(
handlers.AuthenticationHandler,
blobstore_handlers.BlobstoreDownloadHandler):
"""Handler for /pkgs/"""
def get(self, filename):
"""GET
Args:
filename: str, package filename like 'foo.dmg'
Returns:
None if a blob is being returned,
or a response object
"""
auth_return = auth.DoAnyAuth()
if hasattr(auth_return, 'email'):
email = auth_return.email()
if not auth.IsAdminUser(email) and not auth.IsSupportUser(email):
raise auth.IsAdminMismatch
filename = urllib.unquote(filename)
pkg = models.PackageInfo.MemcacheWrappedGet(filename)
if pkg is None or not pkg.blobstore_key:
self.error(404)
return
if common.IsPanicModeNoPackages():
self.error(503)
return
# Get the Blobstore BlobInfo for this package; memcache wrapped.
memcache_key = 'blobinfo_%s' % filename
blob_info = memcache.get(memcache_key)
if not blob_info:
blob_info = blobstore.BlobInfo.get(pkg.blobstore_key)
if blob_info:
memcache.set(memcache_key, blob_info, 300) # cache for 5 minutes.
else:
logging.critical(
'Failure fetching BlobInfo for %s. Verify the blob exists: %s',
pkg.filename, pkg.blobstore_key)
self.error(404)
return
header_date_str = self.request.headers.get('If-Modified-Since', '')
etag_nomatch_str = self.request.headers.get('If-None-Match', 0)
etag_match_str = self.request.headers.get('If-Match', 0)
pkg_date = blob_info.creation
pkg_size_bytes = blob_info.size
# TODO(user): The below can be simplified once all of our clients
# have ETag values set on the filesystem for these files. The
# parsing of If-Modified-Since could be removed. Removing it prematurely
# will cause a re-download of all packages on all clients for 1 iteration
# until they all have ETag values.
# Reduce complexity of elif conditional below.
# If an If-None-Match: ETag is supplied, don't worry about a
# missing file modification date -- the ETag supplies everything needed.
if etag_nomatch_str and not header_date_str:
resource_expired = False
else:
resource_expired = handlers.IsClientResourceExpired(
pkg_date, header_date_str)
# Client supplied If-Match: etag, but that etag does not match current
# etag. return 412.
if (etag_match_str and pkg.pkgdata_sha256 and
etag_match_str != pkg.pkgdata_sha256):
self.response.set_status(412)
# Client supplied no etag or If-No-Match: etag, and the etag did not
# match, or the client's file is older than the mod time of this package.
elif ((etag_nomatch_str and pkg.pkgdata_sha256 and
etag_nomatch_str != pkg.pkgdata_sha256) or resource_expired):
self.response.headers['Content-Disposition'] = str(
'attachment; filename=%s' % filename)
# header date empty or package has changed, send blob with last-mod date.
if pkg.pkgdata_sha256:
self.response.headers['ETag'] = str(pkg.pkgdata_sha256)
self.response.headers['Last-Modified'] = pkg_date.strftime(
handlers.HEADER_DATE_FORMAT)
self.response.headers['X-Download-Size'] = str(pkg_size_bytes)
self.send_blob(pkg.blobstore_key)
else:
# Client doesn't need to do anything, current version is OK based on
# ETag and/or last modified date.
if pkg.pkgdata_sha256:
self.response.headers['ETag'] = str(pkg.pkgdata_sha256)
self.response.set_status(304)
class ClientRepair(Packages):
"""Handler for /repair/"""
def get(self, client_id_str=''):
"""GET
Returns:
None if a blob is being returned,
or a response object
"""
session = auth.DoAnyAuth()
client_id = handlers.GetClientIdForRequest(
self.request, session=session, client_id_str=client_id_str)
logging.info('Repair client ID: %s', client_id)
filename = None
for pkg in models.PackageInfo.all().filter('name =', 'munkitools'):
if client_id.get('track', '') in pkg.catalogs:
filename = pkg.filename
break
if filename:
logging.info('Sending client: %s', filename)
super(ClientRepair, self).get(filename)
else:
logging.warning('No repair client found.')
| {
"content_hash": "5962d0e962e42ada97c40999b8f2432b",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 79,
"avg_line_length": 33.317567567567565,
"alnum_prop": 0.6779557899006287,
"repo_name": "alexandregz/simian",
"id": "048d547002924483106c58492329c3fed0adcc7d",
"size": "5556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/simian/mac/munki/handlers/pkgs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37847"
},
{
"name": "HTML",
"bytes": "89696"
},
{
"name": "JavaScript",
"bytes": "28084"
},
{
"name": "Makefile",
"bytes": "8128"
},
{
"name": "Python",
"bytes": "1431095"
},
{
"name": "Shell",
"bytes": "19945"
}
],
"symlink_target": ""
} |
"""The component for STIEBEL ELTRON heat pumps with ISGWeb Modbus module."""
from datetime import timedelta
import logging
from pystiebeleltron import pystiebeleltron
import voluptuous as vol
from homeassistant.components.modbus import (
CONF_HUB,
DEFAULT_HUB,
DOMAIN as MODBUS_DOMAIN,
)
from homeassistant.const import CONF_NAME, DEVICE_DEFAULT_NAME, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import Throttle
DOMAIN = "stiebel_eltron"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_NAME, default=DEVICE_DEFAULT_NAME): cv.string,
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
def setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the STIEBEL ELTRON unit.
Will automatically load climate platform.
"""
name = config[DOMAIN][CONF_NAME]
modbus_client = hass.data[MODBUS_DOMAIN][config[DOMAIN][CONF_HUB]]
hass.data[DOMAIN] = {
"name": name,
"ste_data": StiebelEltronData(name, modbus_client),
}
discovery.load_platform(hass, Platform.CLIMATE, DOMAIN, {}, config)
return True
class StiebelEltronData:
"""Get the latest data and update the states."""
def __init__(self, name, modbus_client):
"""Init the STIEBEL ELTRON data object."""
self.api = pystiebeleltron.StiebelEltronAPI(modbus_client, 1)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update unit data."""
if not self.api.update():
_LOGGER.warning("Modbus read failed")
else:
_LOGGER.debug("Data updated successfully")
| {
"content_hash": "36c618a7acef8397ba181bb96b58d74c",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 80,
"avg_line_length": 28.271428571428572,
"alnum_prop": 0.6796361798888327,
"repo_name": "mezz64/home-assistant",
"id": "84a39e3c87598cbcf19613ec7776e79f01933f01",
"size": "1979",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/stiebel_eltron/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
"""
# Licensed to the Apache Software Foundation (ASF) under one *
# or more contributor license agreements. See the NOTICE file *
# distributed with this work for additional information *
# regarding copyright ownership. The ASF licenses this file *
# to you under the Apache License, Version 2.0 (the *
# "License"); you may not use this file except in compliance *
# with the License. You may obtain a copy of the License at *
# *
# http://www.apache.org/licenses/LICENSE-2.0 *
# *
# Unless required by applicable law or agreed to in writing, *
# software distributed under the License is distributed on an *
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
# KIND, either express or implied. See the License for the *
# specific language governing permissions and limitations *
# under the License.
"""
from __future__ import absolute_import
from . import msg
#from . import support
#from . import transport
#from . import util
| {
"content_hash": "ce4b8b95b3e1119bef07554fbab493df",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 65,
"avg_line_length": 47.791666666666664,
"alnum_prop": 0.6102877070619006,
"repo_name": "OBIGOGIT/etch",
"id": "73e3a00315767d38caf9ef20b2252fdf34fe8bba",
"size": "1147",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "binding-python/runtime/src/main/python/etch/binding/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2513090"
},
{
"name": "C#",
"bytes": "1514713"
},
{
"name": "C++",
"bytes": "1109601"
},
{
"name": "CSS",
"bytes": "143"
},
{
"name": "Go",
"bytes": "158833"
},
{
"name": "Java",
"bytes": "2451144"
},
{
"name": "Perl",
"bytes": "290"
},
{
"name": "Python",
"bytes": "444086"
},
{
"name": "Shell",
"bytes": "62900"
},
{
"name": "VimL",
"bytes": "13679"
},
{
"name": "XSLT",
"bytes": "12890"
}
],
"symlink_target": ""
} |
import os
from django.core.management.base import BaseCommand
from poetry.apps.corpus.models import Poem
class Command(BaseCommand):
help = 'Automatic markup dump for Django'
def add_arguments(self, parser):
parser.add_argument('--out',
action='store',
dest='out',
default="",
help='Output')
def handle(self, *args, **options):
output = options.get('out')
with open(output, "w", encoding='utf-8') as f:
content = '['
poems = [poem for poem in Poem.objects.all() if poem.count_manual_markups() != 0]
markups = []
for poem in poems:
for markup in poem.markups.all():
if markup.author != "Automatic":
markups.append(markup)
break
print(len(markups))
for markup in markups:
content += markup.text + ","
content = content[:-1] + ']'
f.write(content) | {
"content_hash": "aa8bb0094294965474143b8f0ed6823a",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 93,
"avg_line_length": 35.225806451612904,
"alnum_prop": 0.48717948717948717,
"repo_name": "IlyaGusev/PoetryCorpus",
"id": "1fa85471713deecdd060ab2fb59c15a4d2acf487",
"size": "1117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poetry/apps/corpus/management/commands/get_manual.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3253"
},
{
"name": "HTML",
"bytes": "27479"
},
{
"name": "JavaScript",
"bytes": "19411"
},
{
"name": "Python",
"bytes": "96947"
},
{
"name": "Shell",
"bytes": "1168"
}
],
"symlink_target": ""
} |
import codecs
import sys
import getopt
import os
from learning.PageManager import PageManager
import json
import time
import logging
logger = logging.getLogger("landmark")
# logging.basicConfig(level=logging.INFO)
# logger = logging.getLogger("landmark")
# handler = logging.FileHandler('landmark.log')
# handler.setLevel(logging.INFO)
# formatter = logging.Formatter(u'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# handler.setFormatter(formatter)
# logger.addHandler(handler)
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def learn_rules_with_markup(markup_file, pages_map):
print pages_map
for page in pages_map:
print page
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "dh", ["debug", "help"])
write_debug_files = False
for opt in opts:
if opt in [('-d', ''), ('--debug', '')]:
write_debug_files = True
if opt in [('-h', ''), ('--help', '')]:
raise Usage('python -m learning.RuleLearner [OPTIONAL_PARAMS] [TEST_FILES_FOLDER] [MARKUP_FILE]\n\t[OPTIONAL_PARAMS]: -d to get debug stripe html files')
except getopt.error, msg:
raise Usage(msg)
logger.info('Running RuleLearner with file at %s for rules %s', args[0], args[1])
#read the directory location from arg0
page_file_dir = args[0]
pageManager = PageManager(write_debug_files)
start_time = time.time()
for subdir, dirs, files in os.walk(page_file_dir):
for the_file in files:
if the_file.startswith('.'):
continue
with codecs.open(os.path.join(subdir, the_file), "r", "utf-8") as myfile:
page_str = myfile.read().encode('utf-8')
pageManager.addPage(the_file, page_str)
logger.info("--- LOAD PAGES: %s seconds ---" % (time.time() - start_time))
#Read the markups from a file...
start_time = time.time()
markups_file = args[1]
with codecs.open(markups_file, "r", "utf-8") as myfile:
markup_str = myfile.read().encode('utf-8')
markups = json.loads(markup_str)
markups.pop("__SCHEMA__", None)
markups.pop("__URLS__", None)
logger.info("--- LOAD MARKUPS: %s seconds ---" % (time.time() - start_time))
pageManager.learnStripes(markups)
start_time = time.time()
rule_set = pageManager.learnRulesFromMarkup(markups)
logger.info("--- LEARN RULES FROM MARKUP: %s seconds ---" % (time.time() - start_time))
if(len(args) > 2):
output_file = args[2]
with codecs.open(output_file, "w", "utf-8") as myfile:
myfile.write(rule_set.toJson())
myfile.close()
else:
print rule_set.toJson()
except Usage, err:
print >>sys.stderr, err.msg
print >>sys.stderr, "for help use --help"
return 2
if __name__ == '__main__':
start_time = time.time()
main()
logger.info("--- %s seconds ---" % (time.time() - start_time))
# sys.exit(main()) | {
"content_hash": "f1517cfd7918c07f035caa19a68d7c3f",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 173,
"avg_line_length": 34.43877551020408,
"alnum_prop": 0.5466666666666666,
"repo_name": "usc-isi-i2/landmark-extraction",
"id": "c8024ed2ca0dd7f00e42ebffe28d0ab66f41364d",
"size": "3430",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/learning/RuleLearner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "219901"
},
{
"name": "HTML",
"bytes": "68207"
},
{
"name": "Python",
"bytes": "232643"
},
{
"name": "Shell",
"bytes": "1758"
}
],
"symlink_target": ""
} |
import datetime
from django import template
from django.core.urlresolvers import reverse as urlreverse
from django.conf import settings
from django.db.models import Q
from django.utils.safestring import mark_safe
from ietf.ietfauth.utils import user_is_person, has_role
from ietf.doc.models import BallotDocEvent, BallotPositionDocEvent, IESG_BALLOT_ACTIVE_STATES, IESG_SUBSTATE_TAGS
register = template.Library()
def render_ballot_icon(user, doc):
if not doc:
return ""
# FIXME: temporary backwards-compatibility hack
from ietf.doc.models import Document
if not isinstance(doc, Document):
doc = doc._draft
if doc.type_id == "draft":
if doc.get_state_slug("draft-iesg") not in IESG_BALLOT_ACTIVE_STATES:
return ""
elif doc.type_id == "charter":
if doc.get_state_slug() not in ("intrev", "iesgrev"):
return ""
elif doc.type_id == "conflrev":
if doc.get_state_slug() not in ("iesgeval","defer"):
return ""
elif doc.type_id == "statchg":
if doc.get_state_slug() not in ("iesgeval","defer"):
return ""
ballot = doc.active_ballot()
if not ballot:
return ""
def sort_key(t):
_, pos = t
if not pos:
return (2, 0)
elif pos.pos.blocking:
return (0, pos.pos.order)
else:
return (1, pos.pos.order)
positions = list(doc.active_ballot().active_ad_positions().items())
positions.sort(key=sort_key)
edit_position_url = ""
if has_role(user, "Area Director"):
edit_position_url = urlreverse('ietf.idrfc.views_ballot.edit_position', kwargs=dict(name=doc.name, ballot_id=ballot.pk))
title = "IESG positions (click to show more%s)" % (", right-click to edit position" if edit_position_url else "")
res = ['<a href="%s" data-popup="%s" data-edit="%s" title="%s" class="ballot-icon"><table>' % (
urlreverse("doc_ballot", kwargs=dict(name=doc.name, ballot_id=ballot.pk)),
urlreverse("ietf.doc.views_doc.ballot_popup", kwargs=dict(name=doc.name, ballot_id=ballot.pk)),
edit_position_url,
title
)]
res.append("<tr>")
for i, (ad, pos) in enumerate(positions):
if i > 0 and i % 5 == 0:
res.append("</tr>")
res.append("<tr>")
c = "position-%s" % (pos.pos.slug if pos else "norecord")
if user_is_person(user, ad):
c += " my"
res.append('<td class="%s" />' % c)
res.append("</tr>")
res.append("</table></a>")
return "".join(res)
class BallotIconNode(template.Node):
def __init__(self, doc_var):
self.doc_var = doc_var
def render(self, context):
doc = template.resolve_variable(self.doc_var, context)
return render_ballot_icon(context.get("user"), doc)
def do_ballot_icon(parser, token):
try:
tag_name, doc_name = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError, "%r tag requires exactly two arguments" % token.contents.split()[0]
return BallotIconNode(doc_name)
register.tag('ballot_icon', do_ballot_icon)
@register.filter
def my_position(doc, user):
if not has_role(user, "Area Director"):
return None
# FIXME: temporary backwards-compatibility hack
from ietf.doc.models import Document
if not isinstance(doc, Document):
doc = doc._draft
ballot = doc.active_ballot()
pos = "No Record"
if ballot:
changed_pos = doc.latest_event(BallotPositionDocEvent, type="changed_ballot_position", ad__user=user, ballot=ballot)
if changed_pos:
pos = changed_pos.pos.name;
return pos
@register.filter()
def state_age_colored(doc):
# FIXME: temporary backwards-compatibility hack
from ietf.doc.models import Document
if not isinstance(doc, Document):
doc = doc._draft
if doc.type_id == 'draft':
if not doc.get_state_slug() in ["active", "rfc"]:
# Don't show anything for expired/withdrawn/replaced drafts
return ""
main_state = doc.get_state_slug('draft-iesg')
if not main_state:
return ""
if main_state in ["dead", "watching", "pub"]:
return ""
try:
state_date = doc.docevent_set.filter(
Q(desc__istartswith="Draft Added by ")|
Q(desc__istartswith="Draft Added in state ")|
Q(desc__istartswith="Draft added in state ")|
Q(desc__istartswith="State changed to ")|
Q(desc__istartswith="State Changes to ")|
Q(desc__istartswith="Sub state has been changed to ")|
Q(desc__istartswith="State has been changed to ")|
Q(desc__istartswith="IESG has approved and state has been changed to")|
Q(desc__istartswith="IESG process started in state")
).order_by('-time')[0].time.date()
except IndexError:
state_date = datetime.date(1990,1,1)
days = (datetime.date.today() - state_date).days
# loosely based on
# http://trac.tools.ietf.org/group/iesg/trac/wiki/PublishPath
if main_state == "lc":
goal1 = 30
goal2 = 30
elif main_state == "rfcqueue":
goal1 = 60
goal2 = 120
elif main_state in ["lc-req", "ann"]:
goal1 = 4
goal2 = 7
elif 'need-rev' in [x.slug for x in doc.tags.all()]:
goal1 = 14
goal2 = 28
elif main_state == "pub-req":
goal1 = 7
goal2 = 14
elif main_state == "ad-eval":
goal1 = 14
goal2 = 28
else:
goal1 = 14
goal2 = 28
if days > goal2:
class_name = "ietf-small ietf-highlight-r"
elif days > goal1:
class_name = "ietf-small ietf-highlight-y"
else:
class_name = "ietf-small"
if days > goal1:
title = ' title="Goal is <%d days"' % (goal1,)
else:
title = ''
return mark_safe('<span class="%s"%s>(for %d day%s)</span>' % (
class_name, title, days, 's' if days != 1 else ''))
else:
return ""
| {
"content_hash": "c8ddb3929ef410f8f5feafdb73c59d97",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 128,
"avg_line_length": 34.57754010695187,
"alnum_prop": 0.559851531085679,
"repo_name": "mcr/ietfdb",
"id": "29f0b79e7273539a543d8edc13923fcfa460be2d",
"size": "8139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ietf/idrfc/templatetags/ballot_icon_redesign.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "239198"
},
{
"name": "JavaScript",
"bytes": "450755"
},
{
"name": "Perl",
"bytes": "3223"
},
{
"name": "Python",
"bytes": "10286676"
},
{
"name": "Ruby",
"bytes": "3468"
},
{
"name": "Shell",
"bytes": "39950"
},
{
"name": "TeX",
"bytes": "23944"
}
],
"symlink_target": ""
} |
import unittest
from base_test_class import BaseTestCase
from selenium.webdriver.common.by import By
import sys
class VariousPagesTest(BaseTestCase):
def test_user_status(self):
driver = self.driver
driver.get(self.base_url + "user")
def test_calendar_status(self):
driver = self.driver
driver.get(self.base_url + "calendar")
# click apply to see if this helps webdriver to catch the javascript errors we're seeing
driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click()
def suite():
suite = unittest.TestSuite()
suite.addTest(BaseTestCase('test_login'))
suite.addTest(VariousPagesTest('test_user_status'))
suite.addTest(VariousPagesTest('test_calendar_status'))
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(descriptions=True, failfast=True, verbosity=2)
ret = not runner.run(suite()).wasSuccessful()
BaseTestCase.tearDownDriver()
sys.exit(ret)
| {
"content_hash": "db37b925200b1757b7065a8b2d3e5148",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 96,
"avg_line_length": 31.806451612903224,
"alnum_prop": 0.6987829614604463,
"repo_name": "rackerlabs/django-DefectDojo",
"id": "0e2275d61ac59cf9bd51e49f73a9343e6e6f73ed",
"size": "986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/check_various_pages.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18132"
},
{
"name": "Groff",
"bytes": "91"
},
{
"name": "HTML",
"bytes": "666571"
},
{
"name": "JavaScript",
"bytes": "6393"
},
{
"name": "Python",
"bytes": "524728"
},
{
"name": "Shell",
"bytes": "20558"
},
{
"name": "XSLT",
"bytes": "6624"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from datetime import timedelta
from django.http import HttpResponse
from django.test import TestCase
from django.test.client import RequestFactory
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth import get_user_model
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.utils import timezone
from experiments import conf
from experiments.experiment_counters import ExperimentCounter
from experiments.middleware import ExperimentsRetentionMiddleware
from experiments.models import Experiment, ENABLED_STATE, Enrollment
from experiments.conf import CONTROL_GROUP, VISIT_PRESENT_COUNT_GOAL, VISIT_NOT_PRESENT_COUNT_GOAL
from experiments.signal_handlers import transfer_enrollments_to_user
from experiments.utils import participant
from mock import patch
import random
request_factory = RequestFactory()
TEST_ALTERNATIVE = 'blue'
TEST_GOAL = 'buy'
EXPERIMENT_NAME = 'backgroundcolor'
class WebUserTests(object):
def setUp(self):
self.experiment = Experiment(name=EXPERIMENT_NAME, state=ENABLED_STATE)
self.experiment.save()
self.request = request_factory.get('/')
self.request.session = DatabaseSession()
self.experiment_counter = ExperimentCounter()
def tearDown(self):
self.experiment_counter.delete(self.experiment)
def test_enrollment_initially_control(self):
experiment_user = participant(self.request)
self.assertEqual(experiment_user.get_alternative(EXPERIMENT_NAME), 'control', "Default Enrollment wasn't control")
def test_user_enrolls(self):
experiment_user = participant(self.request)
experiment_user.set_alternative(EXPERIMENT_NAME, TEST_ALTERNATIVE)
self.assertEqual(experiment_user.get_alternative(EXPERIMENT_NAME), TEST_ALTERNATIVE, "Wrong Alternative Set")
def test_record_goal_increments_counts(self):
experiment_user = participant(self.request)
experiment_user.confirm_human()
experiment_user.set_alternative(EXPERIMENT_NAME, TEST_ALTERNATIVE)
self.assertEqual(self.experiment_counter.goal_count(self.experiment, TEST_ALTERNATIVE, TEST_GOAL), 0)
experiment_user.goal(TEST_GOAL)
self.assertEqual(self.experiment_counter.goal_count(self.experiment, TEST_ALTERNATIVE, TEST_GOAL), 1, "Did not increment Goal count")
def test_can_record_goal_multiple_times(self):
experiment_user = participant(self.request)
experiment_user.confirm_human()
experiment_user.set_alternative(EXPERIMENT_NAME, TEST_ALTERNATIVE)
experiment_user.goal(TEST_GOAL)
experiment_user.goal(TEST_GOAL)
experiment_user.goal(TEST_GOAL)
self.assertEqual(self.experiment_counter.goal_count(self.experiment, TEST_ALTERNATIVE, TEST_GOAL), 1, "Did not increment goal count correctly")
self.assertEqual(self.experiment_counter.goal_distribution(self.experiment, TEST_ALTERNATIVE, TEST_GOAL), {3: 1}, "Incorrect goal count distribution")
def test_counts_increment_immediately_once_confirmed_human(self):
experiment_user = participant(self.request)
experiment_user.confirm_human()
experiment_user.set_alternative(EXPERIMENT_NAME, TEST_ALTERNATIVE)
self.assertEqual(self.experiment_counter.participant_count(self.experiment, TEST_ALTERNATIVE), 1, "Did not count participant after confirm human")
def test_visit_increases_goal(self):
thetime = timezone.now()
with patch('experiments.utils.now', return_value=thetime):
experiment_user = participant(self.request)
experiment_user.confirm_human()
experiment_user.set_alternative(EXPERIMENT_NAME, TEST_ALTERNATIVE)
experiment_user.visit()
self.assertEqual(self.experiment_counter.goal_distribution(self.experiment, TEST_ALTERNATIVE, VISIT_NOT_PRESENT_COUNT_GOAL), {1: 1}, "Not Present Visit was not correctly counted")
self.assertEqual(self.experiment_counter.goal_distribution(self.experiment, TEST_ALTERNATIVE, VISIT_PRESENT_COUNT_GOAL), {}, "Present Visit was not correctly counted")
with patch('experiments.utils.now', return_value=thetime + timedelta(hours=7)):
experiment_user.visit()
self.assertEqual(self.experiment_counter.goal_distribution(self.experiment, TEST_ALTERNATIVE, VISIT_NOT_PRESENT_COUNT_GOAL), {2: 1}, "No Present Visit was not correctly counted")
self.assertEqual(self.experiment_counter.goal_distribution(self.experiment, TEST_ALTERNATIVE, VISIT_PRESENT_COUNT_GOAL), {1: 1}, "Present Visit was not correctly counted")
def test_visit_twice_increases_once(self):
experiment_user = participant(self.request)
experiment_user.confirm_human()
experiment_user.set_alternative(EXPERIMENT_NAME, TEST_ALTERNATIVE)
experiment_user.visit()
experiment_user.visit()
self.assertEqual(self.experiment_counter.goal_distribution(self.experiment, TEST_ALTERNATIVE, VISIT_NOT_PRESENT_COUNT_GOAL), {1: 1}, "Visit was not correctly counted")
self.assertEqual(self.experiment_counter.goal_distribution(self.experiment, TEST_ALTERNATIVE, VISIT_PRESENT_COUNT_GOAL), {}, "Present Visit was not correctly counted")
def test_user_force_enrolls(self):
experiment_user = participant(self.request)
experiment_user.enroll(EXPERIMENT_NAME, ['control', 'alternative1', 'alternative2'], force_alternative='alternative2')
self.assertEqual(experiment_user.get_alternative(EXPERIMENT_NAME), 'alternative2')
def test_user_does_not_force_enroll_to_new_alternative(self):
alternatives = ['control', 'alternative1', 'alternative2']
experiment_user = participant(self.request)
experiment_user.enroll(EXPERIMENT_NAME, alternatives)
alternative = experiment_user.get_alternative(EXPERIMENT_NAME)
self.assertIsNotNone(alternative)
other_alternative = random.choice(list(set(alternatives) - set(alternative)))
experiment_user.enroll(EXPERIMENT_NAME, alternatives, force_alternative=other_alternative)
self.assertEqual(alternative, experiment_user.get_alternative(EXPERIMENT_NAME))
def test_second_force_enroll_does_not_change_alternative(self):
alternatives = ['control', 'alternative1', 'alternative2']
experiment_user = participant(self.request)
experiment_user.enroll(EXPERIMENT_NAME, alternatives, force_alternative='alternative1')
alternative = experiment_user.get_alternative(EXPERIMENT_NAME)
self.assertIsNotNone(alternative)
other_alternative = random.choice(list(set(alternatives) - set(alternative)))
experiment_user.enroll(EXPERIMENT_NAME, alternatives, force_alternative=other_alternative)
self.assertEqual(alternative, experiment_user.get_alternative(EXPERIMENT_NAME))
class WebUserAnonymousTestCase(WebUserTests, TestCase):
def setUp(self):
super(WebUserAnonymousTestCase, self).setUp()
self.request.user = AnonymousUser()
def test_confirm_human_increments_participant_count(self):
experiment_user = participant(self.request)
experiment_user.set_alternative(EXPERIMENT_NAME, TEST_ALTERNATIVE)
experiment_user.goal(TEST_GOAL)
self.assertEqual(self.experiment_counter.participant_count(self.experiment, TEST_ALTERNATIVE), 0, "Counted participant before confirmed human")
experiment_user.confirm_human()
self.assertEqual(self.experiment_counter.participant_count(self.experiment, TEST_ALTERNATIVE), 1, "Did not count participant after confirm human")
def test_confirm_human_increments_goal_count(self):
experiment_user = participant(self.request)
experiment_user.set_alternative(EXPERIMENT_NAME, TEST_ALTERNATIVE)
experiment_user.goal(TEST_GOAL)
self.assertEqual(self.experiment_counter.goal_count(self.experiment, TEST_ALTERNATIVE, TEST_GOAL), 0, "Counted goal before confirmed human")
experiment_user.confirm_human()
self.assertEqual(self.experiment_counter.goal_count(self.experiment, TEST_ALTERNATIVE, TEST_GOAL), 1, "Did not count goal after confirm human")
class WebUserAuthenticatedTestCase(WebUserTests, TestCase):
def setUp(self):
super(WebUserAuthenticatedTestCase, self).setUp()
User = get_user_model()
self.request.user = User(username='brian')
self.request.user.save()
class BotTests(object):
def setUp(self):
self.experiment = Experiment(name='backgroundcolor', state=ENABLED_STATE)
self.experiment.save()
self.experiment_counter = ExperimentCounter()
def test_user_does_not_enroll(self):
self.experiment_user.set_alternative(EXPERIMENT_NAME, TEST_ALTERNATIVE)
self.assertEqual(self.experiment_counter.participant_count(self.experiment, TEST_ALTERNATIVE), 0, "Bot counted towards results")
def test_user_does_not_fire_goals(self):
self.experiment_user.set_alternative(EXPERIMENT_NAME, TEST_ALTERNATIVE)
self.experiment_user.goal(TEST_GOAL)
self.assertEqual(self.experiment_counter.participant_count(self.experiment, TEST_ALTERNATIVE), 0, "Bot counted towards results")
def test_bot_in_control_group(self):
self.experiment_user.set_alternative(EXPERIMENT_NAME, TEST_ALTERNATIVE)
self.assertEqual(self.experiment_user.get_alternative(EXPERIMENT_NAME), 'control', "Bot enrolled in a group")
self.assertEqual(self.experiment_user.is_enrolled(self.experiment.name, TEST_ALTERNATIVE), False, "Bot in test alternative")
self.assertEqual(self.experiment_user.is_enrolled(self.experiment.name, CONTROL_GROUP), True, "Bot not in control group")
def tearDown(self):
self.experiment_counter.delete(self.experiment)
class LoggedOutBotTestCase(BotTests, TestCase):
def setUp(self):
super(LoggedOutBotTestCase, self).setUp()
self.request = request_factory.get('/', HTTP_USER_AGENT='GoogleBot/2.1')
self.experiment_user = participant(self.request)
class LoggedInBotTestCase(BotTests, TestCase):
def setUp(self):
super(LoggedInBotTestCase, self).setUp()
User = get_user_model()
self.user = User(username='brian')
self.user.is_confirmed_human = False
self.user.save()
self.experiment_user = participant(user=self.user)
class ParticipantCacheTestCase(TestCase):
def setUp(self):
self.experiment = Experiment.objects.create(name='test_experiment1', state=ENABLED_STATE)
self.experiment_counter = ExperimentCounter()
def tearDown(self):
self.experiment_counter.delete(self.experiment)
def test_transfer_enrollments(self):
User = get_user_model()
user = User.objects.create(username='test')
request = request_factory.get('/')
request.session = DatabaseSession()
participant(request).enroll('test_experiment1', ['alternative'])
request.user = user
transfer_enrollments_to_user(None, request, user)
# the call to the middleware will set last_seen on the experiment
# if the participant cache hasn't been wiped appropriately then the
# session experiment user will be impacted instead of the authenticated
# experiment user
ExperimentsRetentionMiddleware().process_response(request, HttpResponse())
self.assertIsNotNone(Enrollment.objects.all()[0].last_seen)
class ConfirmHumanTestCase(TestCase):
def setUp(self):
self.experiment = Experiment.objects.create(name='test_experiment1', state=ENABLED_STATE)
self.experiment_counter = ExperimentCounter()
self.experiment_user = participant(session=DatabaseSession())
self.alternative = self.experiment_user.enroll(self.experiment.name, ['alternative'])
self.experiment_user.goal('my_goal')
def tearDown(self):
self.experiment_counter.delete(self.experiment)
def test_confirm_human_updates_experiment(self):
self.assertIn('experiments_goals', self.experiment_user.session)
self.assertEqual(self.experiment_counter.participant_count(self.experiment, self.alternative), 0)
self.assertEqual(self.experiment_counter.goal_count(self.experiment, self.alternative, 'my_goal'), 0)
self.experiment_user.confirm_human()
self.assertNotIn('experiments_goals', self.experiment_user.session)
self.assertEqual(self.experiment_counter.participant_count(self.experiment, self.alternative), 1)
self.assertEqual(self.experiment_counter.goal_count(self.experiment, self.alternative, 'my_goal'), 1)
def test_confirm_human_called_twice(self):
"""
Ensuring that counters aren't incremented twice
"""
self.assertEqual(self.experiment_counter.participant_count(self.experiment, self.alternative), 0)
self.assertEqual(self.experiment_counter.goal_count(self.experiment, self.alternative, 'my_goal'), 0)
self.experiment_user.confirm_human()
self.experiment_user.confirm_human()
self.assertEqual(self.experiment_counter.participant_count(self.experiment, self.alternative), 1)
self.assertEqual(self.experiment_counter.goal_count(self.experiment, self.alternative, 'my_goal'), 1)
def test_confirm_human_sets_session(self):
self.assertFalse(self.experiment_user.session.get(conf.CONFIRM_HUMAN_SESSION_KEY, False))
self.experiment_user.confirm_human()
self.assertTrue(self.experiment_user.session.get(conf.CONFIRM_HUMAN_SESSION_KEY, False))
def test_session_already_confirmed(self):
"""
Testing that confirm_human works even if code outside of django-experiments updates the key
"""
self.experiment_user.session[conf.CONFIRM_HUMAN_SESSION_KEY] = True
self.experiment_user.confirm_human()
self.assertEqual(self.experiment_counter.participant_count(self.experiment, self.alternative), 1)
self.assertEqual(self.experiment_counter.goal_count(self.experiment, self.alternative, 'my_goal'), 1)
class DefaultAlternativeTestCase(TestCase):
def test_default_alternative(self):
experiment = Experiment.objects.create(name='test_default')
self.assertEqual(experiment.default_alternative, conf.CONTROL_GROUP)
experiment.ensure_alternative_exists('alt1')
experiment.ensure_alternative_exists('alt2')
self.assertEqual(conf.CONTROL_GROUP, participant(session=DatabaseSession()).enroll('test_default', ['alt1', 'alt2']))
experiment.set_default_alternative('alt2')
experiment.save()
self.assertEqual('alt2', participant(session=DatabaseSession()).enroll('test_default', ['alt1', 'alt2']))
experiment.set_default_alternative('alt1')
experiment.save()
self.assertEqual('alt1', participant(session=DatabaseSession()).enroll('test_default', ['alt1', 'alt2']))
| {
"content_hash": "a295d88783bd489d57a8f314ca426850",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 191,
"avg_line_length": 51.39383561643836,
"alnum_prop": 0.7265276204437929,
"repo_name": "uhuramedia/django-experiments",
"id": "5a09c2540906d68f040c5975d57c4bff68ba0dad",
"size": "15007",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "experiments/tests/test_webuser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1597"
},
{
"name": "HTML",
"bytes": "10966"
},
{
"name": "JavaScript",
"bytes": "9580"
},
{
"name": "Python",
"bytes": "124496"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
from c7n.manager import resources
from c7n.query import QueryResourceManager
@resources.register('step-machine')
class StepFunction(QueryResourceManager):
"""AWS Step Functions State Machine"""
class resource_type(object):
service = 'stepfunctions'
enum_spec = ('list_state_machines', 'stateMachines', None)
id = 'stateMachineArn'
name = 'name'
date = 'creationDate'
dimension = None
detail_spec = (
"describe_state_machine", "stateMachineArn",
'stateMachineArn', None)
| {
"content_hash": "625908f45c350292d4644b17483ef68f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 82,
"avg_line_length": 32.25,
"alnum_prop": 0.6651162790697674,
"repo_name": "jdubs/cloud-custodian",
"id": "f637ca4c66443e3a13e6d5236d32dc48fc06cf0f",
"size": "1235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "c7n/resources/sfn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1251"
},
{
"name": "Python",
"bytes": "1557818"
}
],
"symlink_target": ""
} |
import os
import sys
import argparse
import logging
logger = logging.getLogger()
from counterpartylib.lib import log
log.set_logger(logger)
from counterpartylib import server
from counterpartylib.lib import config
from counterpartycli.util import add_config_arguments, bootstrap
from counterpartycli.setup import generate_config_files
from counterpartycli import APP_VERSION
APP_NAME = 'counterparty-server'
CONFIG_ARGS = [
[('-v', '--verbose'), {'dest': 'verbose', 'action': 'store_true', 'default': False, 'help': 'sets log level to DEBUG instead of WARNING'}],
[('--testnet',), {'action': 'store_true', 'default': False, 'help': 'use {} testnet addresses and block numbers'.format(config.BTC_NAME)}],
[('--testcoin',), {'action': 'store_true', 'default': False, 'help': 'use the test {} network on every blockchain'.format(config.XCP_NAME)}],
[('--regtest',), {'action': 'store_true', 'default': False, 'help': 'use {} regtest addresses and block numbers'.format(config.BTC_NAME)}],
[('--customnet',), {'default': '', 'help': 'use a custom network (specify as UNSPENDABLE_ADDRESS|ADDRESSVERSION|P2SH_ADDRESSVERSION with version bytes in HH hex format)'}],
[('--api-limit-rows',), {'type': int, 'default': 1000, 'help': 'limit api calls to the set results (defaults to 1000). Setting to 0 removes the limit.'}],
[('--backend-name',), {'default': 'addrindex', 'help': 'the backend name to connect to'}],
[('--backend-connect',), {'default': 'localhost', 'help': 'the hostname or IP of the backend server'}],
[('--backend-port',), {'type': int, 'help': 'the backend port to connect to'}],
[('--backend-user',), {'default': 'bitcoinrpc', 'help': 'the username used to communicate with backend'}],
[('--backend-password',), {'help': 'the password used to communicate with backend'}],
[('--backend-ssl',), {'action': 'store_true', 'default': False, 'help': 'use SSL to connect to backend (default: false)'}],
[('--backend-ssl-no-verify',), {'action': 'store_true', 'default': False, 'help': 'verify SSL certificate of backend; disallow use of self‐signed certificates (default: true)'}],
[('--backend-poll-interval',), {'type': float, 'default': 0.5, 'help': 'poll interval, in seconds (default: 0.5)'}],
[('--no-check-asset-conservation',), {'action': 'store_true', 'default': False, 'help': 'Skip asset conservation checking (default: false)'}],
[('--p2sh-dust-return-pubkey',), {'help': 'pubkey to receive dust when multisig encoding is used for P2SH source (default: none)'}],
[('--indexd-connect',), {'default': 'localhost', 'help': 'the hostname or IP of the indexd server'}],
[('--indexd-port',), {'type': int, 'help': 'the indexd server port to connect to'}],
[('--rpc-host',), {'default': 'localhost', 'help': 'the IP of the interface to bind to for providing JSON-RPC API access (0.0.0.0 for all interfaces)'}],
[('--rpc-port',), {'type': int, 'help': 'port on which to provide the {} JSON-RPC API'.format(config.APP_NAME)}],
[('--rpc-user',), {'default': 'rpc', 'help': 'required username to use the {} JSON-RPC API (via HTTP basic auth)'.format(config.APP_NAME)}],
[('--rpc-password',), {'help': 'required password (for rpc-user) to use the {} JSON-RPC API (via HTTP basic auth)'.format(config.APP_NAME)}],
[('--rpc-no-allow-cors',), {'action': 'store_true', 'default': False, 'help': 'allow ajax cross domain request'}],
[('--rpc-batch-size',), {'type': int, 'default': config.DEFAULT_RPC_BATCH_SIZE, 'help': 'number of RPC queries by batch (default: {})'.format(config.DEFAULT_RPC_BATCH_SIZE)}],
[('--requests-timeout',), {'type': int, 'default': config.DEFAULT_REQUESTS_TIMEOUT, 'help': 'timeout value (in seconds) used for all HTTP requests (default: 5)'}],
[('--force',), {'action': 'store_true', 'default': False, 'help': 'skip backend check, version check, process lock (NOT FOR USE ON PRODUCTION SYSTEMS)'}],
[('--database-file',), {'default': None, 'help': 'the path to the SQLite3 database file'}],
[('--log-file',), {'nargs': '?', 'const': None, 'default': False, 'help': 'log to the specified file (specify option without filename to use the default location)'}],
[('--api-log-file',), {'nargs': '?', 'const': None, 'default': False, 'help': 'log API requests to the specified file (specify option without filename to use the default location)'}],
[('--utxo-locks-max-addresses',), {'type': int, 'default': config.DEFAULT_UTXO_LOCKS_MAX_ADDRESSES, 'help': 'max number of addresses for which to track UTXO locks'}],
[('--utxo-locks-max-age',), {'type': int, 'default': config.DEFAULT_UTXO_LOCKS_MAX_AGE, 'help': 'how long to keep a lock on a UTXO being tracked'}]
]
class VersionError(Exception):
pass
def main():
if os.name == 'nt':
from counterpartylib.lib import util_windows
#patch up cmd.exe's "challenged" (i.e. broken/non-existent) UTF-8 logging
util_windows.fix_win32_unicode()
# Post installation tasks
generate_config_files()
# Parse command-line arguments.
parser = argparse.ArgumentParser(prog=APP_NAME, description='Server for the {} protocol'.format(config.XCP_NAME), add_help=False)
parser.add_argument('-h', '--help', dest='help', action='store_true', help='show this help message and exit')
parser.add_argument('-V', '--version', action='version', version="{} v{}; {} v{}".format(APP_NAME, APP_VERSION, 'counterparty-lib', config.VERSION_STRING))
parser.add_argument('--config-file', help='the path to the configuration file')
add_config_arguments(parser, CONFIG_ARGS, 'server.conf')
subparsers = parser.add_subparsers(dest='action', help='the action to be taken')
parser_server = subparsers.add_parser('start', help='run the server')
parser_reparse = subparsers.add_parser('reparse', help='reparse all transactions in the database')
parser_vacuum = subparsers.add_parser('vacuum', help='VACUUM the database (to improve performance)')
parser_rollback = subparsers.add_parser('rollback', help='rollback database')
parser_rollback.add_argument('block_index', type=int, help='the index of the last known good block')
parser_kickstart = subparsers.add_parser('kickstart', help='rapidly build database by reading from Bitcoin Core blockchain')
parser_kickstart.add_argument('--bitcoind-dir', help='Bitcoin Core data directory')
parser_bootstrap = subparsers.add_parser('bootstrap', help='bootstrap database with hosted snapshot')
parser_bootstrap.add_argument('-q', '--quiet', dest='quiet', action='store_true', help='suppress progress bar')
#parser_bootstrap.add_argument('--branch', help='use a different branch for bootstrap db pulling')
args = parser.parse_args()
log.set_up(log.ROOT_LOGGER, verbose=args.verbose, console_logfilter=os.environ.get('COUNTERPARTY_LOGGING', None))
logger.info('Running v{} of {}.'.format(APP_VERSION, APP_NAME))
# Help message
if args.help:
parser.print_help()
sys.exit()
# Bootstrapping
if args.action == 'bootstrap':
bootstrap(testnet=args.testnet, quiet=args.quiet)
sys.exit()
def init_with_catch(fn, init_args):
try:
return fn(**init_args)
except TypeError as e:
if 'unexpected keyword argument' in str(e):
raise VersionError('Unsupported Server Parameter. CLI/Library Version Incompatibility.')
else:
raise e
# Configuration
COMMANDS_WITH_DB = ['reparse', 'rollback', 'kickstart', 'start', 'vacuum']
COMMANDS_WITH_CONFIG = ['debug_config']
if args.action in COMMANDS_WITH_DB or args.action in COMMANDS_WITH_CONFIG:
init_args = dict(database_file=args.database_file,
log_file=args.log_file, api_log_file=args.api_log_file,
testnet=args.testnet, testcoin=args.testcoin, regtest=args.regtest,
customnet=args.customnet,
api_limit_rows=args.api_limit_rows,
backend_name=args.backend_name,
backend_connect=args.backend_connect,
backend_port=args.backend_port,
backend_user=args.backend_user,
backend_password=args.backend_password,
backend_ssl=args.backend_ssl,
backend_ssl_no_verify=args.backend_ssl_no_verify,
backend_poll_interval=args.backend_poll_interval,
indexd_connect=args.indexd_connect, indexd_port=args.indexd_port,
rpc_host=args.rpc_host, rpc_port=args.rpc_port, rpc_user=args.rpc_user,
rpc_password=args.rpc_password, rpc_no_allow_cors=args.rpc_no_allow_cors,
requests_timeout=args.requests_timeout,
rpc_batch_size=args.rpc_batch_size,
check_asset_conservation=not args.no_check_asset_conservation,
force=args.force, verbose=args.verbose, console_logfilter=os.environ.get('COUNTERPARTY_LOGGING', None),
p2sh_dust_return_pubkey=args.p2sh_dust_return_pubkey,
utxo_locks_max_addresses=args.utxo_locks_max_addresses,
utxo_locks_max_age=args.utxo_locks_max_age)
#,broadcast_tx_mainnet=args.broadcast_tx_mainnet)
if args.action in COMMANDS_WITH_DB:
db = init_with_catch(server.initialise, init_args)
elif args.action in COMMANDS_WITH_CONFIG:
init_with_catch(server.initialise_config, init_args)
# PARSING
if args.action == 'reparse':
server.reparse(db)
elif args.action == 'rollback':
server.reparse(db, block_index=args.block_index)
elif args.action == 'kickstart':
server.kickstart(db, bitcoind_dir=args.bitcoind_dir)
elif args.action == 'start':
server.start_all(db)
elif args.action == 'debug_config':
server.debug_config()
elif args.action == 'vacuum':
server.vacuum(db)
else:
parser.print_help()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| {
"content_hash": "5a2d8ed298335ac3f692d0649ce90a8d",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 187,
"avg_line_length": 59.52571428571429,
"alnum_prop": 0.6304118268215417,
"repo_name": "CounterpartyXCP/counterparty-cli",
"id": "9248dacb4ec290f65fff157385a2b53cbe14ae71",
"size": "10444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "counterpartycli/server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91627"
}
],
"symlink_target": ""
} |
from lxml import etree
from xml.etree.ElementTree import iterparse
# coding: utf-8
from sqlalchemy import Table, Column,Integer,String,Float , DateTime, create_engine,MetaData,or_
from sqlalchemy.orm import mapper, sessionmaker
# import loadText as lt
import os
import pprint
import re
import os
import sys
import shutil
#from gensim import corpora,models,similarities
import itertools
import threading, time
from sets import Set
import cStringIO
from lxml import etree, html
from xml.etree.ElementTree import iterparse
import HTMLParser
import datetime
import pandas as pd
import numpy as np
import colorsys
import threading, Queue
# import xapian
# from xapian import SimpleStopper
import networkx as nx
from networkx.readwrite import json_graph
import json
singlelock = threading.Lock()
html_parser = HTMLParser.HTMLParser()
entities = [ ('&-',''), ('&|','')]
def create_text_db(text_folder):
engine = create_engine('sqlite:///%s/W3C.db'%(text_folder), echo=True)
metadata = MetaData(bind=engine)
documents_table = Table('Messages', metadata,
Column('Id', String(150), primary_key=True),
Column('Subject', String ),
Column('Text', String),
Column('AuthorID', String),
Column('AuthorName', String),
Column('AuthorEmail', String),
Column('ThreadID', String),
Column('Date', DateTime),
Column('Type', String),
Column('ResponseTo', String),
Column('URL', String),
Column('Level', String),
Column ('SubjectChanged',String),
Column('FromOtherList', String)
)
metadata.create_all()
class Message(object):
pass
def init():
folder=os.path.join('')
engine = create_engine('sqlite:///' + os.path.join(folder, 'w3c.db'), echo=False)
metadata = MetaData(bind=engine)
global Session
Session=sessionmaker(bind=engine)
global message_table
message_table= Table('Messages', metadata, autoload=True)
try:
mapper(Message, message_table)
except:
pass
base='http://lists.w3.org/Archives/Public/public-html/{0}/{1}.html'
def create_threads(size=10):
init()
session=Session()
messages=session.query(Message).all()
# messages=session.query(Message).all()
print len(messages)
G=nx.DiGraph()
for message in messages:
if message.ResponseTo !='':
G.add_node(message.ResponseTo, name=message.AuthorName[0:20])
G.add_node(message.Id, name=message.AuthorName[0:20])
G.add_edge(message.ResponseTo, message.Id)
threads=nx.weakly_connected_component_subgraphs(G)
threads=[t for t in threads if len(t)>size]
thread_info=[]
for thread in threads:
root=nx.topological_sort(thread)[0]
m=session.query(Message).filter(or_(Message.Id==str(root), Message.ResponseTo==str(root)) ).first()
thread_info.append((len(thread), m.Subject, root, thread ))
return thread_info
def json_thread(size=100):
threads=create_threads(size=size)
for thread_size, thread_subject, root, t_thread in threads:
names= [d['name'] for n, d in t_thread.nodes(data=True)]
unique_names=list(set(names))
colors=pick_color(len(unique_names))
col_pallette=dict(zip(unique_names, colors))
for n,d in t_thread.nodes(data=True):
d['color']=col_pallette[d['name']]
# root=nx.topological_sort(t_thread)[0]
file='%s_%s.json'%(thread_size, thread_subject)
file=re.sub('/',':', file )
data = json_graph.tree_data(t_thread,root=root)
with open(os.path.join('json', file), 'w') as outfile:
json.dump(data,outfile)
def pick_color(n=1):
h = np.random.random() # use random start value
golden_ratio_conjugate = 0.618033988749895
hexcolors=[]
for i in range(n):
h += golden_ratio_conjugate
h %= 1
rgb_tuple=colorsys.hsv_to_rgb(h, .5, .95)
rgb_tuple=tuple( map(lambda x: int(x*256), rgb_tuple))
hexcolors.append('#%02x%02x%02x' % rgb_tuple)
return hexcolors
# def hsv_to_rgb(h, s, v):
#
# h_i = int(h*6)
# f = h*6 - h_i
# p = v * (1 - s)
# q = v * (1 - f*s)
# t = v * (1 - (1 - f) * s)
#
# if h_i==0: r, g, b = v, t, p
# if h_i==1: r, g, b = q, v, p
# if h_i==2: r, g, b = p, v, t
# if h_i==3: r, g, b = p, q, v
# if h_i==4: r, g, b = t, p, v
# if h_i==5: r, g, b = v, p, q
# return int(r*256), int(g*256), int(b*256)
###################
#POPULATE
class updater(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
global files_being_proccessed
while True:
# gets the url from the queue
doci = self.queue.get()
period,id=doci.split('~')
# save doc
try:
self.process(period, id)
except:
print 'error @', period, id
# send a signal to the queue that the job is done
self.queue.task_done()
singlelock.acquire()
print period, id, ' Done..'
singlelock.release()
# rem_files= [file for file in files_being_proccessed if file!=doc]
# files_being_proccessed=rem_files
# update_remaining(files_being_proccessed)
def process(self, period, id):
murl=base.format(period,"%04d" % (int(id)))
print murl
# xmldata=re.sub(r' (\w+)=[A-Z0-9-]+>','>',xmldata)
#
# for before, after in entities:
# xmldata = xmldata.replace(before, after.encode('utf-8'))
# xmldatafile=cStringIO.StringIO(xmldata)
# context = etree.iterparse( xmldatafile, tag='DOC' )
try:
doc = html.parse(murl)
except:
print 'Missing page, ', id
return
comments=doc.getroot().xpath("//comment()")
comments_dict={}
for comment in comments:
comment=html_parser.unescape(unicode(comment))
tag=re.findall(r"(?<=\s).*?(?==)", comment)[0]
value=re.findall(r"(?<=\").*?(?=\")", comment)[0]
comments_dict[tag]= unicode(value)
#ID
m_id= comments_dict['id']
# m_id= doc.getroot().xpath("//span[@id='message-id']/text()")[0]
# try:
# m_id= re.findall(r"(?<=<).*?(?=>)",m_id)[0]
# except:
# m_id=doc.getroot().xpath("//span[@id='message-id']/text()")[0].strip()
#
#SUBJECT
subject= comments_dict['subject']
#subject=doc.getroot().xpath("//meta[@name='Subject']/@content")[0]
#AUTHOR
author_name=comments_dict.get('name', '')
author_email=comments_dict['email']
#author=doc.getroot().xpath("//meta[@name='Author']/@content")
#DATE
m_date=doc.getroot().xpath("//meta[@name='Date']/@content")[0]
m_date_array=map(int, m_date.split('-'))
m_date=datetime.date(m_date_array[0],m_date_array[1],m_date_array[2])
print m_date
#TYPE
type_='Message'
if '[BUG]' in subject:
type_='Bug'
#RESPONSETO
responseto=comments_dict.get('inreplyto', '')
#TEXT
o_text= ' '.join(doc.getroot().xpath("//pre[@id='body']/text()"))
text=re.sub(r'>+.*\n', '',o_text)
text=re.sub(r'On.*wrote:\n', '',text)
text=re.sub(r'\s+',' ', text)
#URL
i = message_table.insert()
q = i.execute({'Id':m_id,
'Text':text,
'AuthorEmail':author_email,
'AuthorName':author_name,
'Subject':subject,
'Date':m_date,
'Type': type_,
'ResponseTo':responseto,
'URL': murl})
del doc
if __name__ == '__main__':
# files=[]
file='htmlmessages.csv'
message_list=pd.read_csv(file)
queue = Queue.Queue()
for j in range(10):
t=updater(queue)
t.setDaemon(True)
t.start()
init()
print 'starting'
ids=range(0,20)
for i, r in message_list.iterrows():
for j in range(r['count']):
queue.put(r['period']+'~'+str(j))
# for i in ids:
#
# queue.put(str(ids[i])+'~'+str(ids[i]))
queue.join()
# db_pass.flush()
# db_doc.flush()
print 'Finished...', i
| {
"content_hash": "df11e9f573cd52e63fe9423116e8af53",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 107,
"avg_line_length": 24.947229551451187,
"alnum_prop": 0.5032258064516129,
"repo_name": "ygenc/ygenc.github.io",
"id": "9a3a8938fca807487c1380eaa5a0f1e8c834025e",
"size": "9455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projects/html5/html5.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "317848"
},
{
"name": "HTML",
"bytes": "10311204"
},
{
"name": "Hack",
"bytes": "2670"
},
{
"name": "JavaScript",
"bytes": "2135171"
},
{
"name": "PHP",
"bytes": "14402"
},
{
"name": "Python",
"bytes": "9455"
},
{
"name": "Ruby",
"bytes": "87"
},
{
"name": "SCSS",
"bytes": "4670"
}
],
"symlink_target": ""
} |
import os
import subprocess
import signal
import struct
import csv
try:
import pandas
except ImportError:
pandas = None
from wlauto import Instrument, Parameter, Executable
from wlauto.exceptions import InstrumentError, ConfigError
from wlauto.utils.types import list_of_numbers
class EnergyProbe(Instrument):
name = 'energy_probe'
description = """Collects power traces using the ARM energy probe.
This instrument requires ``caiman`` utility to be installed in the workload automation
host and be in the PATH. Caiman is part of DS-5 and should be in ``/path/to/DS-5/bin/`` .
Energy probe can simultaneously collect energy from up to 3 power rails.
To connect the energy probe on a rail, connect the white wire to the pin that is closer to the
Voltage source and the black wire to the pin that is closer to the load (the SoC or the device
you are probing). Between the pins there should be a shunt resistor of known resistance in the
range of 5 to 20 mOhm. The resistance of the shunt resistors is a mandatory parameter
``resistor_values``.
.. note:: This instrument can process results a lot faster if python pandas is installed.
"""
parameters = [
Parameter('resistor_values', kind=list_of_numbers, default=[],
description="""The value of shunt resistors. This is a mandatory parameter."""),
Parameter('labels', kind=list, default=[],
description="""Meaningful labels for each of the monitored rails."""),
Parameter('device_entry', kind=str, default='/dev/ttyACM0',
description="""Path to /dev entry for the energy probe (it should be /dev/ttyACMx)"""),
]
MAX_CHANNELS = 3
def __init__(self, device, **kwargs):
super(EnergyProbe, self).__init__(device, **kwargs)
self.attributes_per_sample = 3
self.bytes_per_sample = self.attributes_per_sample * 4
self.attributes = ['power', 'voltage', 'current']
for i, val in enumerate(self.resistor_values):
self.resistor_values[i] = int(1000 * float(val))
def validate(self):
if subprocess.call('which caiman', stdout=subprocess.PIPE, shell=True):
raise InstrumentError('caiman not in PATH. Cannot enable energy probe')
if not self.resistor_values:
raise ConfigError('At least one resistor value must be specified')
if len(self.resistor_values) > self.MAX_CHANNELS:
raise ConfigError('{} Channels where specified when Energy Probe supports up to {}'
.format(len(self.resistor_values), self.MAX_CHANNELS))
if pandas is None:
self.logger.warning("pandas package will significantly speed up this instrument")
self.logger.warning("to install it try: pip install pandas")
def setup(self, context):
if not self.labels:
self.labels = ["PORT_{}".format(channel) for channel, _ in enumerate(self.resistor_values)]
self.output_directory = os.path.join(context.output_directory, 'energy_probe')
rstring = ""
for i, rval in enumerate(self.resistor_values):
rstring += '-r {}:{} '.format(i, rval)
self.command = 'caiman -d {} -l {} {}'.format(self.device_entry, rstring, self.output_directory)
os.makedirs(self.output_directory)
def start(self, context):
self.logger.debug(self.command)
self.caiman = subprocess.Popen(self.command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
preexec_fn=os.setpgrp,
shell=True)
def stop(self, context):
os.killpg(self.caiman.pid, signal.SIGTERM)
def update_result(self, context): # pylint: disable=too-many-locals
num_of_channels = len(self.resistor_values)
processed_data = [[] for _ in xrange(num_of_channels)]
filenames = [os.path.join(self.output_directory, '{}.csv'.format(label)) for label in self.labels]
struct_format = '{}I'.format(num_of_channels * self.attributes_per_sample)
not_a_full_row_seen = False
with open(os.path.join(self.output_directory, "0000000000"), "rb") as bfile:
while True:
data = bfile.read(num_of_channels * self.bytes_per_sample)
if data == '':
break
try:
unpacked_data = struct.unpack(struct_format, data)
except struct.error:
if not_a_full_row_seen:
self.logger.warn('possibly missaligned caiman raw data, row contained {} bytes'.format(len(data)))
continue
else:
not_a_full_row_seen = True
for i in xrange(num_of_channels):
index = i * self.attributes_per_sample
processed_data[i].append({attr: val for attr, val in
zip(self.attributes, unpacked_data[index:index + self.attributes_per_sample])})
for i, path in enumerate(filenames):
with open(path, 'w') as f:
if pandas is not None:
self._pandas_produce_csv(processed_data[i], f)
else:
self._slow_produce_csv(processed_data[i], f)
# pylint: disable=R0201
def _pandas_produce_csv(self, data, f):
dframe = pandas.DataFrame(data)
dframe = dframe / 1000.0
dframe.to_csv(f)
def _slow_produce_csv(self, data, f):
new_data = []
for entry in data:
new_data.append({key: val / 1000.0 for key, val in entry.items()})
writer = csv.DictWriter(f, self.attributes)
writer.writeheader()
writer.writerows(new_data)
| {
"content_hash": "619a6d4fcb9da9acb0957c171b5e81ea",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 125,
"avg_line_length": 47.689922480620154,
"alnum_prop": 0.5832249674902471,
"repo_name": "jimboatarm/workload-automation",
"id": "6fc229ee28cf13aa87655a5292521c1a536bc50f",
"size": "6832",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "wlauto/instrumentation/energy_probe/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "40003"
},
{
"name": "HTML",
"bytes": "243720"
},
{
"name": "Java",
"bytes": "227178"
},
{
"name": "JavaScript",
"bytes": "6578"
},
{
"name": "Jupyter Notebook",
"bytes": "1322"
},
{
"name": "Makefile",
"bytes": "430"
},
{
"name": "Python",
"bytes": "1557762"
},
{
"name": "Shell",
"bytes": "39222"
},
{
"name": "Vim script",
"bytes": "901"
}
],
"symlink_target": ""
} |
from mongoengine.connection import get_db
class query_counter(object):
""" Query_counter contextmanager to get the number of queries. """
def __init__(self):
""" Construct the query_counter. """
self.counter = 0
self.db = get_db()
def __enter__(self):
""" On every with block we need to drop the profile collection. """
self.db.set_profiling_level(0)
self.db.system.profile.really_drop()
self.db.set_profiling_level(2)
return self
def __exit__(self, t, value, traceback):
""" Reset the profiling level. """
self.db.set_profiling_level(0)
def __eq__(self, value):
""" == Compare querycounter. """
return value == self._get_count()
def __ne__(self, value):
""" != Compare querycounter. """
return not self.__eq__(value)
def __lt__(self, value):
""" < Compare querycounter. """
return self._get_count() < value
def __le__(self, value):
""" <= Compare querycounter. """
return self._get_count() <= value
def __gt__(self, value):
""" > Compare querycounter. """
return self._get_count() > value
def __ge__(self, value):
""" >= Compare querycounter. """
return self._get_count() >= value
def __int__(self):
""" int representation. """
return self._get_count()
def __repr__(self):
""" repr query_counter as the number of queries. """
return u"%s" % self._get_count()
def _get_count(self):
""" Get the number of queries. """
count = self.db.system.profile.find().count() - self.counter
self.counter += 1
return count
| {
"content_hash": "4e01255d02684f578907cafc801a4215",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 75,
"avg_line_length": 29.06779661016949,
"alnum_prop": 0.5440233236151604,
"repo_name": "colinhowe/mongoengine",
"id": "20620eba3cdfce08087c91c01989ea0f2b045aa2",
"size": "1715",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mongoengine/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "516347"
}
],
"symlink_target": ""
} |
from django.test import SimpleTestCase
from corehq.apps.users.admin import CustomUserAdmin
class CustomUserAdminTest(SimpleTestCase):
def test_fieldsets(self):
"""
Test that the value of CustomUserAdmin.fieldsets,
dynamically calculated by removing fields from UserAdmin,
matches hard-coded value.
This will alert us of any changes to Django's own UserAdmin that affect this,
and allow us to make any changes necessitated by that.
This is probably over-careful, but might help us quickly avoid a surprise.
"""
self.assertEqual(CustomUserAdmin.fieldsets, (
(None, {'fields': ('username', 'password')}),
('Personal info', {'fields': ('first_name', 'last_name', 'email')}),
('Permissions', {'fields': ('is_active', 'groups', 'user_permissions')}),
('Important dates', {'fields': ('last_login', 'date_joined')}),
))
| {
"content_hash": "43237152143e14f111cd56187fea5b10",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 85,
"avg_line_length": 39.583333333333336,
"alnum_prop": 0.6378947368421053,
"repo_name": "dimagi/commcare-hq",
"id": "28063745a3868710d069fb87016a43a71318e315",
"size": "950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/users/tests/test_admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questions', '0053_related_name'),
('tasks', '0031_related_name'),
]
operations = [
migrations.AddField(
model_name='task',
name='catalogs',
field=models.ManyToManyField(blank=True, help_text='The catalogs this task can be used with. An empty list implies that this task can be used with every catalog.', to='questions.Catalog', verbose_name='Catalogs'),
),
]
| {
"content_hash": "9e4acbb35a2a3645b0699fd2e902c7ad",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 225,
"avg_line_length": 32.11764705882353,
"alnum_prop": 0.6282051282051282,
"repo_name": "rdmorganiser/rdmo",
"id": "e72ba4213277b239a27eaf8a199d3b79b5e7a002",
"size": "596",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rdmo/tasks/migrations/0032_task_catalogs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "426256"
},
{
"name": "JavaScript",
"bytes": "110821"
},
{
"name": "Python",
"bytes": "1265092"
},
{
"name": "SCSS",
"bytes": "20373"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import tensorflow_transform as tft
from tensorflow.keras.callbacks import TensorBoard
from tensorflow_hub import KerasLayer
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from tfx_bsl.tfxio import dataset_options
from config import (
HUB_URL,
HUB_DIM,
N_NEURONS,
N_CLASSES,
LABEL_KEY,
TRAIN_BATCH_SIZE,
EVAL_BATCH_SIZE,
MODEL_NAME,
transformed_name
)
def _get_serve_tf_examples_fn(model, tf_transform_output):
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
def _input_fn(file_pattern, data_accessor, tf_transform_output, batch_size=200):
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.TensorFlowDatasetOptions(
batch_size=batch_size,
label_key=transformed_name(LABEL_KEY)),
tf_transform_output.transformed_metadata.schema
)
def _load_hub_module_layer():
hub_module = KerasLayer(
HUB_URL, output_shape=[HUB_DIM],
input_shape=[], dtype=tf.string, trainable=True)
return hub_module
def _build_keras_model():
hub_module = _load_hub_module_layer()
model = Sequential([
hub_module,
Dense(N_NEURONS, activation='relu'),
Dense(N_CLASSES, activation='softmax')
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]
)
return model
def run_fn(fn_args):
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(fn_args.train_files, fn_args.data_accessor,
tf_transform_output, TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(fn_args.eval_files, fn_args.data_accessor,
tf_transform_output, EVAL_BATCH_SIZE)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = _build_keras_model()
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)
| {
"content_hash": "df293312802fb2925bad0ec351da6f37",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 83,
"avg_line_length": 31.676190476190477,
"alnum_prop": 0.6268791340950091,
"repo_name": "GoogleCloudPlatform/mlops-on-gcp",
"id": "74d8daaaf435ce624ea1437b2700536f65ac59cb",
"size": "3326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "immersion/guided_projects/guided_project_3_nlp_starter/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "15195"
},
{
"name": "HCL",
"bytes": "8348"
},
{
"name": "JavaScript",
"bytes": "1143"
},
{
"name": "Jupyter Notebook",
"bytes": "6737030"
},
{
"name": "Mustache",
"bytes": "1946"
},
{
"name": "Python",
"bytes": "1235643"
},
{
"name": "Shell",
"bytes": "30775"
}
],
"symlink_target": ""
} |
''' Bokeh Application Handler to look for Bokeh server lifecycle callbacks
in a specified Python module.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import os
from types import ModuleType
# Bokeh imports
from ...core.types import PathLike
from ...util.callback_manager import _check_callback
from .code_runner import CodeRunner
from .lifecycle import LifecycleHandler
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'ServerLifecycleHandler',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class ServerLifecycleHandler(LifecycleHandler):
''' Load a script which contains server lifecycle callbacks.
.. autoclasstoc::
'''
def __init__(self, *, filename: PathLike, argv: list[str] = [], package: ModuleType | None = None) -> None:
'''
Keyword Args:
filename (str) : path to a module to load lifecycle callbacks from
argv (list[str], optional) : a list of string arguments to use as
``sys.argv`` when the callback code is executed. (default: [])
'''
super().__init__()
with open(filename, encoding='utf-8') as f:
source = f.read()
self._runner = CodeRunner(source, filename, argv, package=package)
if not self._runner.failed:
# unlike ScriptHandler, we only load the module one time
self._module = self._runner.new_module()
def extract_callbacks() -> None:
contents = self._module.__dict__
if 'on_server_loaded' in contents:
self._on_server_loaded = contents['on_server_loaded']
if 'on_server_unloaded' in contents:
self._on_server_unloaded = contents['on_server_unloaded']
if 'on_session_created' in contents:
self._on_session_created = contents['on_session_created']
if 'on_session_destroyed' in contents:
self._on_session_destroyed = contents['on_session_destroyed']
_check_callback(self._on_server_loaded, ('server_context',), what="on_server_loaded")
_check_callback(self._on_server_unloaded, ('server_context',), what="on_server_unloaded")
_check_callback(self._on_session_created, ('session_context',), what="on_session_created")
_check_callback(self._on_session_destroyed, ('session_context',), what="on_session_destroyed")
self._runner.run(self._module, extract_callbacks)
# Properties --------------------------------------------------------------
@property
def error(self) -> str | None:
''' If the handler fails, may contain a related error message.
'''
return self._runner.error
@property
def error_detail(self) -> str | None:
''' If the handler fails, may contain a traceback or other details.
'''
return self._runner.error_detail
@property
def failed(self) -> bool:
''' ``True`` if the lifecycle callbacks failed to execute
'''
return self._runner.failed
# Public methods ----------------------------------------------------------
def url_path(self) -> str | None:
''' The last path component for the basename of the path to the
callback module.
'''
if self.failed:
return None
else:
# TODO should fix invalid URL characters
return '/' + os.path.splitext(os.path.basename(self._runner.path))[0]
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| {
"content_hash": "356b1222b245c7f6fddbfd01e822d34f",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 111,
"avg_line_length": 35.81060606060606,
"alnum_prop": 0.4482758620689655,
"repo_name": "bokeh/bokeh",
"id": "ed88b030654f25f378892451b7f0957f4b8bdf3e",
"size": "5058",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-3.1",
"path": "src/bokeh/application/handlers/server_lifecycle.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1884"
},
{
"name": "Dockerfile",
"bytes": "1924"
},
{
"name": "GLSL",
"bytes": "44696"
},
{
"name": "HTML",
"bytes": "53475"
},
{
"name": "JavaScript",
"bytes": "20301"
},
{
"name": "Less",
"bytes": "46376"
},
{
"name": "Python",
"bytes": "4475226"
},
{
"name": "Shell",
"bytes": "7673"
},
{
"name": "TypeScript",
"bytes": "3652153"
}
],
"symlink_target": ""
} |
import cgi
import random
import shlex
import logging
import traceback
import oembed
import jinja2
from operator import attrgetter
from urlparse import urlparse, urlunparse
import pymongo
from pylons import tmpl_context as c, app_globals as g
from pylons import request
from paste.deploy.converters import asint
from BeautifulSoup import BeautifulSoup
from . import helpers as h
from . import security
log = logging.getLogger(__name__)
_macros = {}
class macro(object):
def __init__(self, context=None):
self._context = context
def __call__(self, func):
_macros[func.__name__] = (func, self._context)
return func
class parse(object):
def __init__(self, context):
self._context = context
def __call__(self, s):
try:
if s.startswith('quote '):
return '[[' + s[len('quote '):] + ']]'
try:
parts = [unicode(x, 'utf-8')
for x in shlex.split(s.encode('utf-8'))]
if not parts:
return '[[' + s + ']]'
macro = self._lookup_macro(parts[0])
if not macro:
return '[[' + s + ']]'
for t in parts[1:]:
if '=' not in t:
return '[-%s: missing =-]' % ' '.join(parts)
args = dict(t.split('=', 1) for t in parts[1:])
response = macro(**h.encode_keys(args))
return response
except (ValueError, TypeError) as ex:
log.warn('macro error. Upwards stack is %s',
''.join(traceback.format_stack()),
exc_info=True)
msg = cgi.escape(u'[[%s]] (%s)' % (s, repr(ex)))
return '\n<div class="error"><pre><code>%s</code></pre></div>' % msg
except Exception, ex:
raise
return '[[Error parsing %s: %s]]' % (s, ex)
def _lookup_macro(self, s):
macro, context = _macros.get(s, (None, None))
if context is None or context == self._context:
return macro
else:
return None
@macro('neighborhood-wiki')
def neighborhood_feeds(tool_name, max_number=5, sort='pubdate'):
from allura import model as M
from allura.lib.widgets.macros import NeighborhoodFeeds
feed = M.Feed.query.find(
dict(
tool_name=tool_name,
neighborhood_id=c.project.neighborhood._id))
feed = feed.sort(sort, pymongo.DESCENDING).limit(int(max_number)).all()
output = ((dict(
href=item.link,
title=item.title,
author=item.author_name,
ago=h.ago(item.pubdate),
description=g.markdown.cached_convert(item, 'description')))
for item in feed)
feeds = NeighborhoodFeeds(feeds=output)
g.resource_manager.register(feeds)
response = feeds.display(feeds=output)
return response
@macro('neighborhood-wiki')
def neighborhood_blog_posts(max_number=5, sort='timestamp', summary=False):
from forgeblog import model as BM
from allura.lib.widgets.macros import BlogPosts
posts = BM.BlogPost.query.find(dict(
neighborhood_id=c.project.neighborhood._id,
state='published'))
posts = posts.sort(sort, pymongo.DESCENDING).limit(int(max_number)).all()
output = ((dict(
href=post.url(),
title=post.title,
author=post.author().display_name,
ago=h.ago(post.timestamp),
description=summary and ' ' or g.markdown.cached_convert(post, 'text')))
for post in posts if post.app and
security.has_access(post, 'read', project=post.app.project)() and
security.has_access(post.app.project, 'read', project=post.app.project)())
posts = BlogPosts(posts=output)
g.resource_manager.register(posts)
response = posts.display(posts=output)
return response
@macro()
def project_blog_posts(max_number=5, sort='timestamp', summary=False, mount_point=None):
from forgeblog import model as BM
from allura.lib.widgets.macros import BlogPosts
app_config_ids = []
for conf in c.project.app_configs:
if conf.tool_name.lower() == 'blog' and (mount_point is None or conf.options.mount_point == mount_point):
app_config_ids.append(conf._id)
posts = BM.BlogPost.query.find({
'app_config_id': {'$in': app_config_ids},
'state': 'published',
})
posts = posts.sort(sort, pymongo.DESCENDING).limit(int(max_number)).all()
output = ((dict(
href=post.url(),
title=post.title,
author=post.author().display_name,
ago=h.ago(post.timestamp),
description=summary and ' ' or g.markdown.cached_convert(post, 'text')))
for post in posts if security.has_access(post, 'read', project=post.app.project)() and
security.has_access(post.app.project, 'read', project=post.app.project)())
posts = BlogPosts(posts=output)
g.resource_manager.register(posts)
response = posts.display(posts=output)
return response
def get_projects_for_macro(
category=None, sort='last_updated',
show_total=False, limit=100, labels='', award='', private=False,
columns=1, show_proj_icon=True, show_download_button=False, show_awards_banner=True,
initial_q={}):
from allura.lib.widgets.project_list import ProjectList
from allura.lib import utils
from allura import model as M
# 'trove' is internal substitution for 'category' filter in wiki macro
trove = category
limit = int(limit)
q = dict(
deleted=False,
is_nbhd_project=False)
q.update(initial_q)
if labels:
or_labels = labels.split('|')
q['$or'] = [{'labels': {'$all': l.split(',')}} for l in or_labels]
if trove is not None:
trove = M.TroveCategory.query.get(fullpath=trove)
if award:
aw = M.Award.query.find(dict(
created_by_neighborhood_id=c.project.neighborhood_id,
short=award)).first()
if aw:
ids = [grant.granted_to_project_id for grant in
M.AwardGrant.query.find(dict(
granted_by_neighborhood_id=c.project.neighborhood_id,
award_id=aw._id))]
if '_id' in q:
ids = list(set(q['_id']['$in']).intersection(ids))
q['_id'] = {'$in': ids}
if trove is not None:
q['trove_' + trove.type] = trove._id
sort_key, sort_dir = 'last_updated', pymongo.DESCENDING
if sort == 'alpha':
sort_key, sort_dir = 'name', pymongo.ASCENDING
elif sort == 'random':
sort_key, sort_dir = None, None
elif sort == 'last_registered':
sort_key, sort_dir = '_id', pymongo.DESCENDING
elif sort == '_id':
sort_key, sort_dir = '_id', pymongo.DESCENDING
projects = []
if private:
# Only return private projects.
# Can't filter these with a mongo query directly - have to iterate
# through and check the ACL of each project.
for chunk in utils.chunked_find(M.Project, q, sort_key=sort_key,
sort_dir=sort_dir):
projects.extend([p for p in chunk if p.private])
total = len(projects)
if sort == 'random':
projects = random.sample(projects, min(limit, total))
else:
projects = projects[:limit]
else:
total = None
if sort == 'random':
# MongoDB doesn't have a random sort built in, so...
# 1. Do a direct pymongo query (faster than ORM) to fetch just the
# _ids of objects that match our criteria
# 2. Choose a random sample of those _ids
# 3. Do an ORM query to fetch the objects with those _ids
# 4. Shuffle the results
from ming.orm import mapper
m = mapper(M.Project)
collection = M.main_doc_session.db[m.collection.m.collection_name]
docs = list(collection.find(q, {'_id': 1}))
if docs:
ids = [doc['_id'] for doc in
random.sample(docs, min(limit, len(docs)))]
if '_id' in q:
ids = list(set(q['_id']['$in']).intersection(ids))
q['_id'] = {'$in': ids}
projects = M.Project.query.find(q).all()
random.shuffle(projects)
else:
projects = M.Project.query.find(q).limit(limit).sort(sort_key,
sort_dir).all()
pl = ProjectList()
g.resource_manager.register(pl)
response = pl.display(projects=projects,
columns=columns, show_proj_icon=show_proj_icon,
show_download_button=show_download_button,
show_awards_banner=show_awards_banner,
)
if show_total:
if total is None:
total = 0
for p in M.Project.query.find(q):
if h.has_access(p, 'read')():
total = total + 1
response = '<p class="macro_projects_total">%s Projects</p>%s' % \
(total, response)
return response
@macro('neighborhood-wiki')
def projects(category=None, sort='last_updated',
show_total=False, limit=100, labels='', award='', private=False,
columns=1, show_proj_icon=True, show_download_button=False, show_awards_banner=True,
display_mode=None, grid_view_tools='', # old & unused now
):
initial_q = dict(neighborhood_id=c.project.neighborhood_id)
return get_projects_for_macro(
category=category, sort=sort,
show_total=show_total, limit=limit, labels=labels, award=award, private=private,
columns=columns, show_proj_icon=show_proj_icon, show_download_button=show_download_button,
show_awards_banner=show_awards_banner,
initial_q=initial_q)
@macro('userproject-wiki')
def my_projects(category=None, sort='last_updated',
show_total=False, limit=100, labels='', award='', private=False,
columns=1, show_proj_icon=True, show_download_button=False, show_awards_banner=True,
display_mode=None, grid_view_tools='', # old & unused now
):
myproj_user = c.project.user_project_of
if myproj_user is None:
myproj_user = c.user.anonymous()
ids = []
for p in myproj_user.my_projects():
ids.append(p._id)
initial_q = dict(_id={'$in': ids})
return get_projects_for_macro(
category=category, sort=sort,
show_total=show_total, limit=limit, labels=labels, award=award, private=private,
columns=columns, show_proj_icon=show_proj_icon, show_download_button=show_download_button,
show_awards_banner=show_awards_banner,
initial_q=initial_q)
@macro()
def project_screenshots():
from allura.lib.widgets.project_list import ProjectScreenshots
ps = ProjectScreenshots()
g.resource_manager.register(ps)
response = ps.display(project=c.project)
return response
@macro()
def gittip_button(username):
from allura.lib.widgets.macros import GittipButton
button = GittipButton(username=username)
g.resource_manager.register(button)
response = button.display(username=username)
return response
def parse_repo(repo):
if not repo:
return None
from allura import model as M
parts = repo.split(':')
project, app = c.project, None
nbhd = c.project.neighborhood if c.project else None
if len(parts) == 3:
nbhd = M.Neighborhood.query.get(url_prefix='/' + parts[0] + '/')
project = M.Project.query.get(
shortname=parts[1],
neighborhood_id=nbhd._id) if nbhd else None
app = project.app_instance(parts[2]) if project else None
if len(parts) == 2:
project = M.Project.query.get(
shortname=parts[0],
neighborhood_id=nbhd._id) if nbhd else None
app = project.app_instance(parts[1]) if project else None
elif len(parts) == 1:
app = project.app_instance(parts[0]) if project else None
return app
def include_file(repo, path=None, rev=None, **kw):
app = parse_repo(repo)
if not app:
return '[[include repo %s (not found)]]' % repo
if not h.has_access(app.repo, 'read')():
return "[[include: you don't have a read permission for repo %s]]" % repo
rev = app.repo.head if rev is None else rev
try:
file = app.repo.commit(rev).get_path(path)
except Exception:
return "[[include can't find file %s in revision %s]]" % (path, rev)
text = ''
if file.has_pypeline_view:
text = h.render_any_markup(file.name, file.text, code_mode=True)
elif file.has_html_view:
text = g.highlight(file.text, filename=file.name)
else:
return "[[include can't display file %s in revision %s]]" % (path, rev)
from allura.lib.widgets.macros import Include
sb = Include()
g.resource_manager.register(sb)
return sb.display(text=text, attrs=kw)
@macro()
def include(ref=None, repo=None, **kw):
from allura import model as M
from allura.lib.widgets.macros import Include
if repo is not None:
return include_file(repo, **kw)
if ref is None:
return '[-include-]'
link = M.Shortlink.lookup(ref)
if not link:
return '[[include %s (not found)]]' % ref
artifact = link.ref.artifact
if artifact is None:
return '[[include (artifact not found)]]' % ref
if not h.has_access(artifact, 'read')():
return "[[include: you don't have a read permission for %s]]" % ref
included = request.environ.setdefault('allura.macro.included', set())
if artifact in included:
return '[[include %s (already included)]' % ref
else:
included.add(artifact)
sb = Include()
g.resource_manager.register(sb)
response = sb.display(artifact=artifact, attrs=kw)
return response
@macro()
def img(src=None, **kw):
attrs = ('%s="%s"' % t for t in kw.iteritems())
included = request.environ.setdefault('allura.macro.att_embedded', set())
included.add(src)
if '://' in src:
return '<img src="%s" %s/>' % (src, ' '.join(attrs))
else:
return '<img src="./attachment/%s" %s/>' % (src, ' '.join(attrs))
@macro()
def project_admins():
admins = c.project.users_with_role('Admin')
from allura.lib.widgets.macros import ProjectAdmins
output = ((dict(
url=user.url(),
name=user.display_name))
for user in admins)
users = ProjectAdmins(users=output)
g.resource_manager.register(users)
response = users.display(users=output)
return response
@macro()
def members(limit=20):
from allura.lib.widgets.macros import Members
limit = asint(limit)
admins = set(c.project.users_with_role('Admin'))
members = sorted(c.project.users(), key=attrgetter('display_name'))
output = [dict(
url=user.url(),
name=user.display_name,
admin=' (admin)' if user in admins else '',
)
for user in members[:limit]]
over_limit = len(members) > limit
users = Members(users=output, over_limit=over_limit)
g.resource_manager.register(users)
response = users.display(users=output, over_limit=over_limit)
return response
@macro()
def embed(url=None):
consumer = oembed.OEmbedConsumer()
endpoint = oembed.OEmbedEndpoint(
'http://www.youtube.com/oembed', ['http://*.youtube.com/*', 'https://*.youtube.com/*'])
consumer.addEndpoint(endpoint)
try:
html = consumer.embed(url)['html']
except oembed.OEmbedNoEndpoint:
html = None
if html:
# youtube has a trailing ")" at the moment
html = html.rstrip(')')
# convert iframe src from http to https, to avoid mixed security blocking when used on an https page
html = BeautifulSoup(html)
embed_url = html.find('iframe').get('src')
if embed_url:
embed_url = urlparse(embed_url)
if embed_url.scheme == 'http':
embed_url = urlunparse(['https'] + list(embed_url[1:]))
else:
embed_url = embed_url.geturl()
html.find('iframe')['src'] = embed_url
return jinja2.Markup('<p>%s</p>' % html)
return '[[embed url=%s]]' % url
| {
"content_hash": "01626e35016df3dc27659345c9ac2633",
"timestamp": "",
"source": "github",
"line_count": 457,
"max_line_length": 113,
"avg_line_length": 36.11159737417943,
"alnum_prop": 0.5922559534630067,
"repo_name": "heiths/allura",
"id": "c65c504c0488bfb1d3c2e631468aa2cee9ccc7d5",
"size": "17373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Allura/allura/lib/macro.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6142"
},
{
"name": "CSS",
"bytes": "173671"
},
{
"name": "HTML",
"bytes": "751039"
},
{
"name": "JavaScript",
"bytes": "1136845"
},
{
"name": "Makefile",
"bytes": "7788"
},
{
"name": "Puppet",
"bytes": "6872"
},
{
"name": "Python",
"bytes": "4238265"
},
{
"name": "RAML",
"bytes": "26153"
},
{
"name": "Ruby",
"bytes": "7006"
},
{
"name": "Shell",
"bytes": "131827"
},
{
"name": "XSLT",
"bytes": "3357"
}
],
"symlink_target": ""
} |
"""
test_romannumeral
----------------------------------
Tests for `romannumeral` module.
"""
import unittest
from romannumeral import RomanNumeral
from romannumeral import ParseError
from romannumeral import OutOfRangeError
class TestRomannumeral(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_create_roman_from_int(self):
r = RomanNumeral(10)
self.assertEqual(r.value, 10)
self.assertEqual(r.string, 'X')
def test_create_roman_from_str(self):
r = RomanNumeral('X')
self.assertEqual(r.value, 10)
self.assertEqual(r.string, 'X')
def test_create_roman_exhaustive(self):
for n in range(10000):
if n == 0 or n >= 4000:
self.assertRaises(OutOfRangeError, RomanNumeral, n)
else:
r = RomanNumeral(n)
self.assertEqual(r.value, n)
def test_roman_from_badstring(self):
""" Roman from malformed string should through parse exception """
# no random sstring to roman
self.assertRaises(ParseError, RomanNumeral, 'dfadsfafaf')
# no string representation of number
self.assertRaises(ParseError, RomanNumeral, '101')
# no lower case
self.assertRaises(ParseError, RomanNumeral, 'xviii')
# no spaces
self.assertRaises(ParseError, RomanNumeral, 'X V I I I')
def test_roman_from_decimal(self):
""" Roman from malformed string should through parse exception """
self.assertRaises(ParseError, RomanNumeral, 3.14)
def test_roman_from_negative(self):
""" Roman below 0 throw an overflow exception """
self.assertRaises(OutOfRangeError, RomanNumeral, -1)
def test_roman_from_over_3999(self):
""" Roman over 3999 throw an overflow exception """
self.assertRaises(OutOfRangeError, RomanNumeral, 9001)
def test_roman_addition(self):
x = 2000
for y in range(1, 4000):
if 0 < x + y < 4000:
roman_math = RomanNumeral(x) + RomanNumeral(y)
self.assertEqual(roman_math, RomanNumeral(x + y))
else:
self.assertRaises(OutOfRangeError, RomanNumeral, x + y)
def test_roman_subtraction(self):
x = 2000
for y in range(1, 4000):
if 0 < x - y < 4000:
roman_math = RomanNumeral(x) - RomanNumeral(y)
self.assertEqual(roman_math, RomanNumeral(x - y))
else:
self.assertRaises(OutOfRangeError, RomanNumeral, x - y)
def test_roman_multiplication(self):
x = 10
for y in range(1, 4000):
if 0 < x * y < 4000:
roman_math = RomanNumeral(x) * RomanNumeral(y)
self.assertEqual(roman_math, RomanNumeral(x * y))
else:
self.assertRaises(OutOfRangeError, RomanNumeral, x * y)
def test_roman_division(self):
x = 3999
for y in range(1, 4000):
if 0 < x / y < 4000:
roman_math = RomanNumeral(x) / RomanNumeral(y)
self.assertEqual(roman_math, RomanNumeral(x // y))
else:
self.assertRaises(OutOfRangeError, RomanNumeral, x // y)
def test_roman_exponent(self):
x = 2
for y in range(1, 60):
if 0 < x ** y < 4000:
roman_math = RomanNumeral(x) ** RomanNumeral(y)
self.assertEqual(roman_math, RomanNumeral(x ** y))
else:
self.assertRaises(OutOfRangeError, RomanNumeral, x ** y)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "af2a6e9b24572b2cf2840a8da3eb3a97",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 74,
"avg_line_length": 28.765625,
"alnum_prop": 0.5760456273764258,
"repo_name": "jay3686/romannumeral",
"id": "c76802f2f79ee70c0d3a1d61fe181dd2b9dd40bb",
"size": "3729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_romannumeral.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "17365"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
} |
"""Tests boosted_trees prediction kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.core.kernels.boosted_trees import boosted_trees_pb2
from tensorflow.python.framework import test_util
from tensorflow.python.ops import boosted_trees_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
class TrainingPredictionOpsTest(test_util.TensorFlowTestCase):
"""Tests prediction ops for training."""
def testCachedPredictionOnEmptyEnsemble(self):
"""Tests that prediction on a dummy ensemble does not fail."""
with self.test_session() as session:
# Create a dummy ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto='')
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# No previous cached values.
cached_tree_ids = [0, 0]
cached_node_ids = [0, 0]
# We have two features: 0 and 1. Values don't matter here on a dummy
# ensemble.
feature_0_values = [67, 5]
feature_1_values = [9, 17]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# Nothing changed.
self.assertAllClose(cached_tree_ids, new_tree_ids)
self.assertAllClose(cached_node_ids, new_node_ids)
self.assertAllClose([[0], [0]], logits_updates)
def testNoCachedPredictionButTreeExists(self):
"""Tests that predictions are updated once trees are added."""
with self.test_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 0
threshold: 15
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 1.14
}
}
nodes {
leaf {
scalar: 8.79
}
}
}
tree_weights: 0.1
tree_metadata {
is_finalized: true
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Two examples, none were cached before.
cached_tree_ids = [0, 0]
cached_node_ids = [0, 0]
feature_0_values = [67, 5]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are in the first tree.
self.assertAllClose([0, 0], new_tree_ids)
self.assertAllClose([2, 1], new_node_ids)
self.assertAllClose([[0.1 * 8.79], [0.1 * 1.14]], logits_updates)
def testCachedPredictionIsCurrent(self):
"""Tests that prediction based on previous node in the tree works."""
with self.test_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 15
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
original_leaf {
scalar: -2
}
}
}
nodes {
leaf {
scalar: 1.14
}
}
nodes {
leaf {
scalar: 8.79
}
}
}
tree_weights: 0.1
tree_metadata {
is_finalized: true
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Two examples, one was cached in node 1 first, another in node 0.
cached_tree_ids = [0, 0]
cached_node_ids = [1, 2]
# We have two features: 0 and 1. Values don't matter because trees didn't
# change.
feature_0_values = [67, 5]
feature_1_values = [9, 17]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# Nothing changed.
self.assertAllClose(cached_tree_ids, new_tree_ids)
self.assertAllClose(cached_node_ids, new_node_ids)
self.assertAllClose([[0], [0]], logits_updates)
def testCachedPredictionFromTheSameTree(self):
"""Tests that prediction based on previous node in the tree works."""
with self.test_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 15
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
original_leaf {
scalar: -2
}
}
}
nodes {
bucketized_split {
feature_id: 1
threshold: 7
left_id: 3
right_id: 4
}
metadata {
gain: 1.4
original_leaf {
scalar: 7.14
}
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 7
left_id: 5
right_id: 6
}
metadata {
gain: 2.7
original_leaf {
scalar: -4.375
}
}
}
nodes {
leaf {
scalar: 1.14
}
}
nodes {
leaf {
scalar: 8.79
}
}
nodes {
leaf {
scalar: -5.875
}
}
nodes {
leaf {
scalar: -2.075
}
}
}
tree_weights: 0.1
tree_metadata {
is_finalized: true
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Two examples, one was cached in node 1 first, another in node 0.
cached_tree_ids = [0, 0]
cached_node_ids = [1, 0]
# We have two features: 0 and 1.
feature_0_values = [67, 5]
feature_1_values = [9, 17]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are still in the same tree.
self.assertAllClose([0, 0], new_tree_ids)
# When using the full tree, the first example will end up in node 4,
# the second in node 5.
self.assertAllClose([4, 5], new_node_ids)
# Full predictions for each instance would be 8.79 and -5.875,
# so an update from the previous cached values lr*(7.14 and -2) would be
# 1.65 and -3.875, and then multiply them by 0.1 (lr)
self.assertAllClose([[0.1 * 1.65], [0.1 * -3.875]], logits_updates)
def testCachedPredictionFromPreviousTree(self):
"""Tests the predictions work when we have cache from previous trees."""
with self.test_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 28
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 1.14
}
}
nodes {
leaf {
scalar: 8.79
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 26
left_id: 1
right_id: 2
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 50
left_id: 3
right_id: 4
}
}
nodes {
leaf {
scalar: 7
}
}
nodes {
leaf {
scalar: 5
}
}
nodes {
leaf {
scalar: 6
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 0
threshold: 34
left_id: 1
right_id: 2
}
}
nodes {
leaf {
scalar: -7.0
}
}
nodes {
leaf {
scalar: 5.0
}
}
}
tree_metadata {
is_finalized: true
}
tree_metadata {
is_finalized: true
}
tree_metadata {
is_finalized: false
}
tree_weights: 0.1
tree_weights: 0.1
tree_weights: 0.1
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Two examples, one was cached in node 1 first, another in node 2.
cached_tree_ids = [0, 0]
cached_node_ids = [1, 0]
# We have two features: 0 and 1.
feature_0_values = [36, 32]
feature_1_values = [11, 27]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# Example 1 will get to node 3 in tree 1 and node 2 of tree 2
# Example 2 will get to node 2 in tree 1 and node 1 of tree 2
# We are in the last tree.
self.assertAllClose([2, 2], new_tree_ids)
# When using the full tree, the first example will end up in node 4,
# the second in node 5.
self.assertAllClose([2, 1], new_node_ids)
# Example 1: tree 0: 8.79, tree 1: 5.0, tree 2: 5.0 = >
# change = 0.1*(5.0+5.0)
# Example 2: tree 0: 1.14, tree 1: 7.0, tree 2: -7 = >
# change= 0.1(1.14+7.0-7.0)
self.assertAllClose([[1], [0.114]], logits_updates)
def testCachedPredictionFromTheSameTreeWithPostPrunedNodes(self):
"""Tests that prediction based on previous node in the tree works."""
with self.test_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id:0
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.2
}
}
nodes {
leaf {
scalar: 0.01
}
}
nodes {
bucketized_split {
feature_id: 1
threshold: 5
left_id: 3
right_id: 4
}
metadata {
gain: 0.5
original_leaf {
scalar: 0.0143
}
}
}
nodes {
leaf {
scalar: 0.0553
}
}
nodes {
leaf {
scalar: 0.0783
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 3
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 2
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.07
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.083
}
post_pruned_nodes_meta {
new_node_id: 3
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 4
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.22
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.57
}
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 3
}
""", tree_ensemble_config)
# Create existing ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
cached_tree_ids = [0, 0, 0, 0, 0, 0]
# Leaves 3,4, 7 and 8 got deleted during post-pruning, leaves 5 and 6
# changed the ids to 3 and 4 respectively.
cached_node_ids = [3, 4, 5, 6, 7, 8]
# We have two features: 0 and 1.
feature_0_values = [12, 17, 35, 36, 23, 11]
feature_1_values = [12, 12, 17, 18, 123, 24]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are still in the same tree.
self.assertAllClose([0, 0, 0, 0, 0, 0], new_tree_ids)
# Examples from leaves 3,4,7,8 should be in leaf 1, examples from leaf 5
# and 6 in leaf 3 and 4.
self.assertAllClose([1, 1, 3, 4, 1, 1], new_node_ids)
cached_values = [[0.08], [0.093], [0.0553], [0.0783], [0.15 + 0.08],
[0.5 + 0.08]]
self.assertAllClose([[0.01], [0.01], [0.0553], [0.0783], [0.01], [0.01]],
logits_updates + cached_values)
def testCachedPredictionFromThePreviousTreeWithPostPrunedNodes(self):
"""Tests that prediction based on previous node in the tree works."""
with self.test_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id:0
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.2
}
}
nodes {
leaf {
scalar: 0.01
}
}
nodes {
bucketized_split {
feature_id: 1
threshold: 5
left_id: 3
right_id: 4
}
metadata {
gain: 0.5
original_leaf {
scalar: 0.0143
}
}
}
nodes {
leaf {
scalar: 0.0553
}
}
nodes {
leaf {
scalar: 0.0783
}
}
}
trees {
nodes {
leaf {
scalar: 0.55
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_layers_grown: 3
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 2
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.07
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.083
}
post_pruned_nodes_meta {
new_node_id: 3
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 4
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.22
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.57
}
}
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 2
num_layers_attempted: 4
}
""", tree_ensemble_config)
# Create existing ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
cached_tree_ids = [0, 0, 0, 0, 0, 0]
# Leaves 3,4, 7 and 8 got deleted during post-pruning, leaves 5 and 6
# changed the ids to 3 and 4 respectively.
cached_node_ids = [3, 4, 5, 6, 7, 8]
# We have two features: 0 and 1.
feature_0_values = [12, 17, 35, 36, 23, 11]
feature_1_values = [12, 12, 17, 18, 123, 24]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are in the last tree.
self.assertAllClose([1, 1, 1, 1, 1, 1], new_tree_ids)
# Examples from leaves 3,4,7,8 should be in leaf 1, examples from leaf 5
# and 6 in leaf 3 and 4 in tree 0. For tree 1, all of the examples are in
# the root node.
self.assertAllClose([0, 0, 0, 0, 0, 0], new_node_ids)
cached_values = [[0.08], [0.093], [0.0553], [0.0783], [0.15 + 0.08],
[0.5 + 0.08]]
root = 0.55
self.assertAllClose([[root + 0.01], [root + 0.01], [root + 0.0553],
[root + 0.0783], [root + 0.01], [root + 0.01]],
logits_updates + cached_values)
def testCachedPredictionTheWholeTreeWasPruned(self):
"""Tests that prediction based on previous node in the tree works."""
with self.test_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
leaf {
scalar: 0.00
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -6.0
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 5.0
}
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
cached_tree_ids = [
0,
0,
]
# The predictions were cached in 1 and 2, both were pruned to the root.
cached_node_ids = [1, 2]
# We have two features: 0 and 1.These are not going to be used anywhere.
feature_0_values = [12, 17]
feature_1_values = [12, 12]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are in the last tree.
self.assertAllClose([0, 0], new_tree_ids)
self.assertAllClose([0, 0], new_node_ids)
self.assertAllClose([[-6.0], [5.0]], logits_updates)
class PredictionOpsTest(test_util.TensorFlowTestCase):
"""Tests prediction ops for inference."""
def testPredictionMultipleTree(self):
"""Tests the predictions work when we have multiple trees."""
with self.test_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 28
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 1.14
}
}
nodes {
leaf {
scalar: 8.79
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 26
left_id: 1
right_id: 2
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 50
left_id: 3
right_id: 4
}
}
nodes {
leaf {
scalar: 7.0
}
}
nodes {
leaf {
scalar: 5.0
}
}
nodes {
leaf {
scalar: 6.0
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 0
threshold: 34
left_id: 1
right_id: 2
}
}
nodes {
leaf {
scalar: -7.0
}
}
nodes {
leaf {
scalar: 5.0
}
}
}
tree_weights: 0.1
tree_weights: 0.2
tree_weights: 1.0
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
feature_0_values = [36, 32]
feature_1_values = [11, 27]
# Example 1: tree 0: 1.14, tree 1: 5.0, tree 2: 5.0 = >
# logit = 0.1*5.0+0.2*5.0+1*5
# Example 2: tree 0: 1.14, tree 1: 7.0, tree 2: -7 = >
# logit= 0.1*1.14+0.2*7.0-1*7.0
expected_logits = [[6.114], [-5.486]]
# Do with parallelization, e.g. EVAL
predict_op = boosted_trees_ops.predict(
tree_ensemble_handle,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits = session.run(predict_op)
self.assertAllClose(expected_logits, logits)
# Do without parallelization, e.g. INFER - the result is the same
predict_op = boosted_trees_ops.predict(
tree_ensemble_handle,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits = session.run(predict_op)
self.assertAllClose(expected_logits, logits)
if __name__ == '__main__':
googletest.main()
| {
"content_hash": "3b9e11bc28e3c1217107be0b1f6f61f4",
"timestamp": "",
"source": "github",
"line_count": 902,
"max_line_length": 80,
"avg_line_length": 29.446784922394677,
"alnum_prop": 0.503708444712172,
"repo_name": "eaplatanios/tensorflow",
"id": "54f33f336015cc9cb50658941b8e157cc1b94df9",
"size": "27250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/boosted_trees/prediction_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "163987"
},
{
"name": "C++",
"bytes": "34944901"
},
{
"name": "CMake",
"bytes": "5123"
},
{
"name": "CSS",
"bytes": "9206"
},
{
"name": "Go",
"bytes": "1047216"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "423531"
},
{
"name": "JavaScript",
"bytes": "3127"
},
{
"name": "Jupyter Notebook",
"bytes": "1833814"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "19718973"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Scala",
"bytes": "3606806"
},
{
"name": "Shell",
"bytes": "352897"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
} |
"""Example code for
Service : ReportDefinitionService
Operation: mutate (ADD)
API Reference: https://github.com/yahoojp-marketing/sponsored-search-api-documents/blob/201901/docs/en/api_reference/services/ReportDefinitionService.md
Generated by 'api_reference_example_generator.py' using code template 'examples/sample_template.py.template'
"""
import logging
import json
from yahooads import promotionalads
logging.basicConfig(level=logging.INFO)
# logging.getLogger('suds.client').setLevel(logging.DEBUG)
# logging.getLogger('suds.transport').setLevel(logging.DEBUG)
SERVICE = 'ReportDefinitionService'
OPERATION = 'mutate (ADD)'
OPERAND = {
"operator": "ADD",
"accountId": "SAMPLE-ACCOUNT-ID",
"operand": {
"reportName": "Sample Report Definition",
"reportType": "ACCOUNT",
"dateRangeType": "CUSTOM_DATE",
"dateRange": {
"startDate": "19700101",
"endDate": "20371231"
},
"fields": [
"COST",
"IMPS",
"CLICKS",
"CLICK_RATE",
"AVG_CPC",
"AVG_DELIVER_RANK",
"TRACKING_URL",
"CONVERSIONS",
"CONV_RATE",
"CONV_VALUE",
"COST_PER_CONV",
"VALUE_PER_CONV",
"NETWORK",
"CLICK_TYPE",
"DEVICE",
"DAY",
"DAY_OF_WEEK",
"QUARTER",
"YEAR",
"MONTH",
"MONTH_OF_YEAR",
"WEEK",
"HOUR_OF_DAY"
],
"sortFields": {
"type": "ASC",
"field": "CLICKS"
},
"filters": {
"field": "COST",
"operator": "NOT_EQUALS",
"value": "100"
},
"isTemplate": "TRUE",
"intervalType": "SPECIFYDAY",
"specifyDay": "1",
"format": "CSV",
"encode": "UTF-8",
"language": "JA",
"compress": "NONE",
"includeZeroImpressions": "TRUE",
"includeDeleted": "TRUE"
}
}
"""
SAMPLE RESPONSE = {
"rval": {
"ListReturnValue.Type": "ReportDefinitionReturnValue",
"Operation.Type": "ADD",
"values": {
"operationSucceeded": "true",
"reportDefinition": {
"accountId": "SAMPLE-ACCOUNT-ID",
"reportId": "22222",
"reportName": "Sample Report Definition",
"reportType": "ACCOUNT",
"dateRangeType": "CUSTOM_DATE",
"dateRange": {
"startDate": "19700101",
"endDate": "20371231"
},
"fields": [
"COST",
"IMPS",
"CLICKS",
"CLICK_RATE",
"AVG_CPC",
"AVG_DELIVER_RANK",
"TRACKING_URL",
"CONVERSIONS",
"CONV_RATE",
"CONV_VALUE",
"COST_PER_CONV",
"VALUE_PER_CONV",
"NETWORK",
"CLICK_TYPE",
"DEVICE",
"DAY",
"DAY_OF_WEEK",
"QUARTER",
"YEAR",
"MONTH",
"MONTH_OF_YEAR",
"WEEK",
"HOUR_OF_DAY"
],
"sortFields": {
"type": "ASC",
"field": "CLICKS"
},
"filters": {
"field": "COST",
"operator": "NOT_EQUALS",
"value": "100"
},
"isTemplate": "TRUE",
"intervalType": "SPECIFYDAY",
"specifyDay": "1",
"format": "CSV",
"encode": "UTF-8",
"language": "JA",
"compress": "NONE",
"includeZeroImpressions": "TRUE",
"includeDeleted": "TRUE"
}
}
}
}
"""
def main():
client = promotionalads.PromotionalAdsClient.LoadFromConfiguration()
service = client.GetService(SERVICE)
print("REQUEST : {}.{}\n{}".format(SERVICE, OPERATION, json.dumps(OPERAND, indent=2)))
try:
if OPERATION == "get":
response = service.get(OPERAND)
elif OPERATION.startswith("get"):
get_method = getattr(service, OPERATION)
response = get_method(OPERAND)
elif OPERATION.startswith("mutate"):
response = service.mutate(OPERAND)
else:
raise("Unknown Operation '{}'".format(OPERATION))
print("RESPONSE :\n{}".format(response))
except Exception as e:
print("Exception at '{}' operations \n{}".format(SERVICE, e))
raise e
if __name__ == '__main__':
main()
| {
"content_hash": "dcc0790864a37113feba4e42144dd190",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 152,
"avg_line_length": 25.548192771084338,
"alnum_prop": 0.5201603395425607,
"repo_name": "becomejapan/yahooads-python-lib",
"id": "9c2114384b5217d56898f991e5f1c9cc14ceb96d",
"size": "4840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/ReportDefinitionService/ReportDefinitionService_mutate_ADD.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "30856"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import celery
import kombu
import os
import platform as _platform
from celery import datastructures
from celery import platforms
from celery.utils.text import pretty
from celery.utils.imports import qualname
from .defaults import find
SETTINGS_INFO = """%s %s"""
BUGREPORT_INFO = """
software -> celery:%(celery_v)s kombu:%(kombu_v)s py:%(py_v)s
platform -> system:%(system)s arch:%(arch)s imp:%(py_i)s
loader -> %(loader)s
settings -> transport:%(transport)s results:%(results)s
%(human_settings)s
"""
class Settings(datastructures.ConfigurationView):
@property
def CELERY_RESULT_BACKEND(self):
"""Resolves deprecated alias ``CELERY_BACKEND``."""
return self.first("CELERY_RESULT_BACKEND", "CELERY_BACKEND")
@property
def BROKER_TRANSPORT(self):
"""Resolves compat aliases :setting:`BROKER_BACKEND`
and :setting:`CARROT_BACKEND`."""
return self.first("BROKER_TRANSPORT",
"BROKER_BACKEND", "CARROT_BACKEND")
@property
def BROKER_BACKEND(self):
"""Deprecated compat alias to :attr:`BROKER_TRANSPORT`."""
return self.BROKER_TRANSPORT
@property
def BROKER_HOST(self):
return (os.environ.get("CELERY_BROKER_URL") or
self.first("BROKER_URL", "BROKER_HOST"))
def without_defaults(self):
# the last stash is the default settings, so just skip that
return Settings({}, self._order[:-1])
def find_value_for_key(self, name, namespace="celery"):
return self.get_by_parts(*self.find_option(name, namespace)[:-1])
def find_option(self, name, namespace="celery"):
return find(name, namespace)
def get_by_parts(self, *parts):
return self["_".join(filter(None, parts))]
def humanize(self):
return "\n".join(SETTINGS_INFO % (key + ':', pretty(value, width=50))
for key, value in self.without_defaults().iteritems())
class AppPickler(object):
"""Default application pickler/unpickler."""
def __call__(self, cls, *args):
kwargs = self.build_kwargs(*args)
app = self.construct(cls, **kwargs)
self.prepare(app, **kwargs)
return app
def prepare(self, app, **kwargs):
app.conf.update(kwargs["changes"])
def build_kwargs(self, *args):
return self.build_standard_kwargs(*args)
def build_standard_kwargs(self, main, changes, loader, backend, amqp,
events, log, control, accept_magic_kwargs):
return dict(main=main, loader=loader, backend=backend, amqp=amqp,
changes=changes, events=events, log=log, control=control,
set_as_current=False,
accept_magic_kwargs=accept_magic_kwargs)
def construct(self, cls, **kwargs):
return cls(**kwargs)
def _unpickle_app(cls, pickler, *args):
return pickler()(cls, *args)
def bugreport(app):
return BUGREPORT_INFO % {"system": _platform.system(),
"arch": _platform.architecture(),
"py_i": platforms.pyimplementation(),
"celery_v": celery.__version__,
"kombu_v": kombu.__version__,
"py_v": _platform.python_version(),
"transport": app.conf.BROKER_TRANSPORT,
"results": app.conf.CELERY_RESULT_BACKEND,
"human_settings": app.conf.humanize(),
"loader": qualname(app.loader.__class__)}
| {
"content_hash": "544a5574adb90c9955a6252d202f8b47",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 77,
"avg_line_length": 32.763636363636365,
"alnum_prop": 0.5979467258601554,
"repo_name": "couchbaselabs/celery",
"id": "00a97a82ff33929bd5be6c71c091aef6fd479d1a",
"size": "3604",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "celery/app/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Wrapper for signalfd(2) system call.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import errno
import logging
import os
import ctypes
from ctypes import (
c_int,
c_void_p,
c_uint32,
c_uint64,
c_uint8,
c_int32,
)
from ctypes.util import find_library
import enum
from ._sigsetops import (
SigSet,
sigaddset,
sigfillset,
)
_LOGGER = logging.getLogger(__name__)
###############################################################################
# Map the C interface
_LIBC_PATH = find_library('c')
_LIBC = ctypes.CDLL(_LIBC_PATH, use_errno=True)
if getattr(_LIBC, 'signalfd', None) is None:
raise ImportError('Unsupported libc version found: %s' % _LIBC_PATH)
###############################################################################
# int signalfd(int fd, const sigset_t *mask, int flags);
_SIGNALFD_DECL = ctypes.CFUNCTYPE(c_int, c_int, c_void_p, c_int,
use_errno=True)
_SIGNALFD = _SIGNALFD_DECL(('signalfd', _LIBC))
def signalfd(sigset, flags=0, prev_fd=-1):
"""create/update a signal file descriptor.
"""
if isinstance(sigset, SigSet):
new_set = sigset
elif sigset == 'all':
new_set = SigSet()
sigfillset(new_set)
else:
new_set = SigSet()
for signum in sigset:
sigaddset(new_set, signum)
new_set_p = ctypes.pointer(new_set)
fileno = _SIGNALFD(prev_fd, new_set_p, flags)
if fileno < 0:
err = ctypes.get_errno()
raise OSError(err, os.strerror(err),
'signalfd(%r, %r, %r)' % (prev_fd, new_set, flags))
return fileno
###############################################################################
# Constants copied from sys/signalfd.h
#
# See man signalfd(2) for more details.
#
class SFDFlags(enum.IntEnum):
"""Flags supported by SignalFD.
"""
#: Set the O_NONBLOCK file status flag on the new open file description.
#: Using this flag saves extra calls to fcntl(2) to achieve the same
#: result.
NONBLOCK = 0o4000
#: Set the close-on-exec (FD_CLOEXEC) flag on the new file descriptor. See
#: the description of the O_CLOEXEC flag in open(2) for reasons why this
#: may be useful.
CLOEXEC = 0o2000000
#: Set the O_NONBLOCK file status flag on the new open file description. Using
#: this flag saves extra calls to fcntl(2) to achieve the same result.
#: (since Linux 2.6.27)
SFD_NONBLOCK = SFDFlags.NONBLOCK
#: Set the close-on-exec (FD_CLOEXEC) flag on the new file descriptor. See the
#: description of the O_CLOEXEC flag in open(2) for reasons why this may be
#: useful.
#: (since Linux 2.6.27)
SFD_CLOEXEC = SFDFlags.CLOEXEC
###############################################################################
# The signalfd_siginfo structure
#
class SFDSigInfo(ctypes.Structure):
"""The signalfd_siginfo structure.
The format of the signalfd_siginfo structure(s) returned by read(2)s from a
signalfd file descriptor is as follows:
struct signalfd_siginfo {
uint32_t ssi_signo; /* Signal number */
int32_t ssi_errno; /* Error number (unused) */
int32_t ssi_code; /* Signal code */
uint32_t ssi_pid; /* PID of sender */
uint32_t ssi_uid; /* Real UID of sender */
int32_t ssi_fd; /* File descriptor (SIGIO) */
uint32_t ssi_tid; /* Kernel timer ID (POSIX timers)
uint32_t ssi_band; /* Band event (SIGIO) */
uint32_t ssi_overrun; /* POSIX timer overrun count */
uint32_t ssi_trapno; /* Trap number that caused signal */
int32_t ssi_status; /* Exit status or signal (SIGCHLD) */
int32_t ssi_int; /* Integer sent by sigqueue(2) */
uint64_t ssi_ptr; /* Pointer sent by sigqueue(2) */
uint64_t ssi_utime; /* User CPU time consumed (SIGCHLD) */
uint64_t ssi_stime; /* System CPU time consumed (SIGCHLD) */
uint64_t ssi_addr; /* Address that generated signal
(for hardware-generated signals) */
uint8_t pad[X]; /* Pad size to 128 bytes (allow for
additional fields in the future) */
};
"""
# pylint: disable=bad-whitespace
_FIELDS = [
('ssi_signo', c_uint32), #: Signal number
('ssi_errno', c_int32), #: Error number (unused)
('ssi_code', c_int32), #: Signal code
('ssi_pid', c_uint32), #: PID of sender
('ssi_uid', c_uint32), #: Real UID of sender
('ssi_fd', c_int32), #: File descriptor (SIGIO)
('ssi_tid', c_uint32), #: Kernel timer ID (POSIX timers)
('ssi_band', c_uint32), #: Band event (SIGIO)
('ssi_overrun', c_uint32), #: POSIX timer overrun count
('ssi_trapno', c_uint32), #: Trap number that caused signal
('ssi_status', c_int32), #: Exit status or signal (SIGCHLD)
('ssi_int', c_int32), #: Integer sent by sigqueue(2)
('ssi_ptr', c_uint64), #: Pointer sent by sigqueue(2)
('ssi_utime', c_uint64), #: User CPU time consumed (SIGCHLD)
('ssi_stime', c_uint64), #: System CPU time consumed (SIGCHLD)
('ssi_addr', c_uint64), #: Address that generated signal
]
__PADWORDS = 128 - sum([ctypes.sizeof(field[1]) for
field in _FIELDS])
_fields_ = _FIELDS + [
('_pad', c_uint8 * __PADWORDS), # Pad size to 128 bytes (allow for
# additional fields in the future)
]
def signalfd_read(sfd):
"""Read signalfd_siginfo data from a signalfd filedescriptor.
"""
try:
data = os.read(sfd, ctypes.sizeof(SFDSigInfo))
except OSError as err:
# Ignore signal interruptions
if err.errno != errno.EINTR:
raise
return None
return SFDSigInfo.from_buffer_copy(data)
###############################################################################
__all__ = [
'SFD_NONBLOCK',
'SFD_CLOEXEC',
'signalfd',
'signalfd_read',
]
| {
"content_hash": "0f8d592dd0b474d31488104bd9a7a9f3",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 79,
"avg_line_length": 33.62765957446808,
"alnum_prop": 0.5472951597595698,
"repo_name": "Morgan-Stanley/treadmill",
"id": "bb5d65dda49a3b80cacdeb8777c33f059b2d2a10",
"size": "6322",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python/treadmill/syscall/signalfd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3750"
},
{
"name": "Python",
"bytes": "3372983"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "51646"
}
],
"symlink_target": ""
} |
"""
Samsara
~~~~~~~
Samsara service package
"""
from setuptools import setup, find_packages
setup(
name='samsara_sdk',
version='',
url='https://github.com/samsara/samsara',
author='Samsara Developers',
author_email='samsara.systems+info@gmail.com',
description="A Python Client for Samsaza's Ingestion-API",
long_description="""
# Install and use
to install use:
pip install samsara_sdk
to use please refer to the documentation at this website:
http://samsara-analytics.io/docs/clients/python-client/
""",
packages=find_packages(),
include_package_data=True,
platforms='any',
keywords=['analytics', 'client', 'samsara'],
tests_require=find_packages(include=['*-dev'])
)
| {
"content_hash": "3445bee51c93edd788f4b9accd20a5f3",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 62,
"avg_line_length": 20.243243243243242,
"alnum_prop": 0.6662216288384513,
"repo_name": "samsara/samsara",
"id": "5b7dddedc2a604043b20fc11c9cff3a1bf056c01",
"size": "773",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "clients/python/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "342475"
},
{
"name": "Go",
"bytes": "58894"
},
{
"name": "HCL",
"bytes": "39101"
},
{
"name": "Java",
"bytes": "24749"
},
{
"name": "Lua",
"bytes": "16152"
},
{
"name": "Objective-C",
"bytes": "104"
},
{
"name": "Python",
"bytes": "13974"
},
{
"name": "Ruby",
"bytes": "52737"
},
{
"name": "Shell",
"bytes": "75841"
},
{
"name": "Swift",
"bytes": "31918"
}
],
"symlink_target": ""
} |
import pandas as pd
import json
import numpy as np
class NpEncoder(json.JSONEncoder):
def default(self, obj):
"""
Converts the dictionary's values into a JSON serializable data type
"""
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
class ParsePsortExperimental:
def __init__(self, max_entries):
self.max_entries = max_entries
def parse_psortdb(self):
"""
To parse database psortdb Experimental-PSORTdb-v4.00.tsv file
and create JSON files conforming to datanator_pattern/observation_compiled.json
Args:
max_entries: int
number of rows to parse.
A JSON file will be created for each of the tsv file's first <max_entries> rows
Return:
()
"""
data=pd.read_csv('Experimental-PSORTdb-v4.00.tsv',delimiter="\t")
data = data.where(pd.notnull(data), None)
for i in range(self.max_entries):
d={}
#entity
d["entity"]={}
d["entity"]["type"]="protein"
d["entity"]["name"]=str(data.iloc[i,6]).replace(".","")
if data.iloc[i,7] != None:
d["entity"]["synonyms"]=str(data.iloc[i,7]).split(",")
else:
d["entity"]["synonyms"]=[]
#identifiers
d["entity"]["identifiers"]=[]
uniprot={}
uniprot["name_space"]="uniprot_id"
uniprot["value"]=data.iloc[i,0]
ref_seq = {}
ref_seq["name_space"]="Refseq_Accession"
ref_seq["value"]=data.iloc[i,1]
other_accession = {}
other_accession["name_space"]="Other_Accession"
other_accession["value"]=data.iloc[i,2]
d["entity"]["identifiers"].append(uniprot)
d["entity"]["identifiers"].append(ref_seq)
d["entity"]["identifiers"].append(other_accession)
#localizations
d["value"]={}
if data.iloc[i,3] != None:
d["value"]["experimental_localization"] = str(data.iloc[i,3]).split(",")
else:
d["value"]["experimental_localization"] = []
if data.iloc[i,4] != None:
d["value"]["secondary_localizaton"] = str(data.iloc[i,4]).split(",")
else:
d["value"]["secondary_localizaton"] = []
#genotype
d["genotype"]={}
d["genotype"]["taxon"]={}
d["genotype"]["taxon"]["ncbi_taxonomy_id"]=data.iloc[i,9]
d["genotype"]["taxon"]["name"]=data.iloc[i,10]
#environment
d["environment"]={}
d["environment"]["GramStain"]=data.iloc[i,13]
#source
d["source"]={}
d["source"]["namespace"]="ePSORTdb"
d["source"]["value"]="Version "+str(data.iloc[i,17])
#name is the JSON file's name
if (data.iloc[i,0]!=None):
name = data.iloc[i,0] #SwissProt_ID
else:
name = data.iloc[i,2] #Other_Accession
with open("Experimental_PSortdb/"+name+".json","w+") as f:
json.dump(d,f,cls=NpEncoder,indent=4)
p1=ParsePsortExperimental(10)
p1.parse_psortdb()
| {
"content_hash": "89a864012915a7994109dfd36592b5eb",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 95,
"avg_line_length": 36.73737373737374,
"alnum_prop": 0.49546329392356336,
"repo_name": "KarrLab/kinetic_datanator",
"id": "1de0421c470a38956bfbd2a47bd89371544e0123",
"size": "3637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datanator/data_source/protein_localization/parse_psortdb_experimental.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1217"
},
{
"name": "Dockerfile",
"bytes": "171"
},
{
"name": "HTML",
"bytes": "50579"
},
{
"name": "Python",
"bytes": "980025"
}
],
"symlink_target": ""
} |
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the catchcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Catchcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Catchcoin")
return os.path.expanduser("~/.catchcoin")
def read_catchcoin_config(dbdir):
"""Read the catchcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "catchcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a catchcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 18332 if testnet else 8332
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the catchcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(catchcoind):
info = catchcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
catchcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = catchcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(catchcoind):
address_summary = dict()
address_to_account = dict()
for info in catchcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = catchcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = catchcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-catchcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(catchcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(catchcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to catchcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = catchcoind.createrawtransaction(inputs, outputs)
signed_rawtx = catchcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(catchcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = catchcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(catchcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = catchcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(catchcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get catchcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send catchcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of catchcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_catchcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
catchcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(catchcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(catchcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(catchcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(catchcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = catchcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| {
"content_hash": "2ccfd6d13baa62be6f0d5dee5272be71",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 111,
"avg_line_length": 38.69444444444444,
"alnum_prop": 0.6185006665982976,
"repo_name": "tinybike/catchcoin",
"id": "cb2f2dd506a75607d0b38bfe97c9259e77600b79",
"size": "10135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/spendfrom/spendfrom.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "301826"
},
{
"name": "C++",
"bytes": "2879017"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Objective-C++",
"bytes": "6266"
},
{
"name": "Python",
"bytes": "95592"
},
{
"name": "Shell",
"bytes": "40370"
},
{
"name": "TypeScript",
"bytes": "10322619"
}
],
"symlink_target": ""
} |
import abc
import logging
from django.db import models
from django.utils import timezone
from framework import sentry
from osf.exceptions import ValidationValueError, ValidationTypeError
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from osf.utils.fields import NonNaiveDateTimeField
from osf.utils import akismet, oopspam
from website import settings
logger = logging.getLogger(__name__)
def _get_akismet_client():
"""
AKISMET_APIKEY should be `None` for local testing.
:return:
"""
return akismet.AkismetClient(
apikey=settings.AKISMET_APIKEY,
website=settings.DOMAIN,
verify=bool(settings.AKISMET_APIKEY)
)
def _get_oopspam_client():
"""
OOPSPAM_APIKEY should be `None` for local testing.
:return:
"""
return oopspam.OOPSpamClient()
def _validate_reports(value, *args, **kwargs):
from osf.models import OSFUser
for key, val in value.items():
if not OSFUser.load(key):
raise ValidationValueError('Keys must be user IDs')
if not isinstance(val, dict):
raise ValidationTypeError('Values must be dictionaries')
if ('category' not in val or 'text' not in val or 'date' not in val or 'retracted' not in val):
raise ValidationValueError(
('Values must include `date`, `category`, ',
'`text`, `retracted` keys')
)
class SpamStatus(object):
UNKNOWN = None
FLAGGED = 1
SPAM = 2
HAM = 4
class SpamMixin(models.Model):
"""Mixin to add to objects that can be marked as spam.
"""
class Meta:
abstract = True
# # Node fields that trigger an update to search on save
# SPAM_UPDATE_FIELDS = {
# 'spam_status',
# }
spam_status = models.IntegerField(default=SpamStatus.UNKNOWN, null=True, blank=True, db_index=True)
spam_pro_tip = models.CharField(default=None, null=True, blank=True, max_length=200)
# Data representing the original spam indication
# - author: author name
# - author_email: email of the author
# - content: data flagged
# - headers: request headers
# - Remote-Addr: ip address from request
# - User-Agent: user agent from request
# - Referer: referrer header from request (typo +1, rtd)
spam_data = DateTimeAwareJSONField(default=dict, blank=True)
date_last_reported = NonNaiveDateTimeField(default=None, null=True, blank=True, db_index=True)
# Reports is a dict of reports keyed on reporting user
# Each report is a dictionary including:
# - date: date reported
# - retracted: if a report has been retracted
# - category: What type of spam does the reporter believe this is
# - text: Comment on the comment
reports = DateTimeAwareJSONField(
default=dict, blank=True, validators=[_validate_reports]
)
def flag_spam(self):
# If ham and unedited then tell user that they should read it again
if self.spam_status == SpamStatus.UNKNOWN:
self.spam_status = SpamStatus.FLAGGED
def remove_flag(self, save=False):
if self.spam_status != SpamStatus.FLAGGED:
return
for report in self.reports.values():
if not report.get('retracted', True):
return
self.spam_status = SpamStatus.UNKNOWN
if save:
self.save()
@property
def is_spam(self):
return self.spam_status == SpamStatus.SPAM
@property
def is_spammy(self):
return self.spam_status in [SpamStatus.FLAGGED, SpamStatus.SPAM]
@property
def is_ham(self):
return self.spam_status == SpamStatus.HAM
@property
def is_hammy(self):
return self.is_ham or (
self.spam_status == SpamStatus.UNKNOWN and self.is_assumed_ham
)
@property
def is_assumed_ham(self):
"""If True, will automatically skip spam checks.
Override to set criteria for assumed ham.
"""
return False
def report_abuse(self, user, save=False, **kwargs):
"""Report object is spam or other abuse of OSF
:param user: User submitting report
:param save: Save changes
:param kwargs: Should include category and message
:raises ValueError: if user is reporting self
"""
if user == self.user:
raise ValueError('User cannot report self.')
self.flag_spam()
date = timezone.now()
report = {'date': date, 'retracted': False}
report.update(kwargs)
if 'text' not in report:
report['text'] = None
self.reports[user._id] = report
self.date_last_reported = report['date']
if save:
self.save()
def retract_report(self, user, save=False):
"""Retract last report by user
Only marks the last report as retracted because there could be
history in how the object is edited that requires a user
to flag or retract even if object is marked as HAM.
:param user: User retracting
:param save: Save changes
"""
if user._id in self.reports:
if not self.reports[user._id]['retracted']:
self.reports[user._id]['retracted'] = True
self.remove_flag()
else:
raise ValueError('User has not reported this content')
if save:
self.save()
def confirm_ham(self, save=False, train_akismet=True):
# not all mixins will implement check spam pre-req, only submit ham when it was incorrectly flagged
if (
settings.SPAM_CHECK_ENABLED and
self.spam_data and self.spam_status in [SpamStatus.FLAGGED, SpamStatus.SPAM] and
train_akismet
):
client = _get_akismet_client()
client.submit_ham(
user_ip=self.spam_data['headers']['Remote-Addr'],
user_agent=self.spam_data['headers'].get('User-Agent'),
referrer=self.spam_data['headers'].get('Referer'),
comment_content=self.spam_data['content'],
comment_author=self.spam_data['author'],
comment_author_email=self.spam_data['author_email'],
)
logger.info('confirm_ham update sent')
self.spam_status = SpamStatus.HAM
if save:
self.save()
def confirm_spam(self, save=False, train_akismet=True):
# not all mixins will implement check spam pre-req, only submit spam when it was incorrectly flagged
if (
settings.SPAM_CHECK_ENABLED and
self.spam_data and self.spam_status in [SpamStatus.UNKNOWN, SpamStatus.HAM] and
train_akismet
):
client = _get_akismet_client()
client.submit_spam(
user_ip=self.spam_data['headers']['Remote-Addr'],
user_agent=self.spam_data['headers'].get('User-Agent'),
referrer=self.spam_data['headers'].get('Referer'),
comment_content=self.spam_data['content'],
comment_author=self.spam_data['author'],
comment_author_email=self.spam_data['author_email'],
)
logger.info('confirm_spam update sent')
self.spam_status = SpamStatus.SPAM
if save:
self.save()
@abc.abstractmethod
def check_spam(self, user, saved_fields, request_headers, save=False):
"""Must return is_spam"""
pass
def do_check_spam(self, author, author_email, content, request_headers, update=True):
if self.is_hammy:
return False
if self.is_spammy:
return True
akismet_client = _get_akismet_client()
oopspam_client = _get_oopspam_client()
remote_addr = request_headers['Remote-Addr']
user_agent = request_headers.get('User-Agent')
referer = request_headers.get('Referer')
akismet_is_spam, pro_tip = akismet_client.check_comment(
user_ip=remote_addr,
user_agent=user_agent,
referrer=referer,
comment_content=content,
comment_author=author,
comment_author_email=author_email
)
try:
oopspam_is_spam, oopspam_details = oopspam_client.check_content(
user_ip=remote_addr,
content=content
)
except oopspam.OOPSpamClientError:
sentry.log_exception()
oopspam_is_spam = False
if update:
self.spam_pro_tip = pro_tip
self.spam_data['headers'] = {
'Remote-Addr': remote_addr,
'User-Agent': user_agent,
'Referer': referer,
}
self.spam_data['content'] = content
self.spam_data['author'] = author
self.spam_data['author_email'] = author_email
if akismet_is_spam and oopspam_is_spam:
self.flag_spam()
self.spam_data['who_flagged'] = 'both'
self.spam_data['oopspam_data'] = oopspam_details
elif akismet_is_spam:
self.flag_spam()
self.spam_data['who_flagged'] = 'akismet'
elif oopspam_is_spam:
self.flag_spam()
self.spam_data['who_flagged'] = 'oopspam'
self.spam_data['oopspam_data'] = oopspam_details
return akismet_is_spam or oopspam_is_spam
| {
"content_hash": "d0b20b72246401db2a53ada3600c0110",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 108,
"avg_line_length": 35.48134328358209,
"alnum_prop": 0.5985908087075402,
"repo_name": "Johnetordoff/osf.io",
"id": "b650878fbb48cc28a6362eabc232d47daaf5a403",
"size": "9509",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "osf/models/spam.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93635"
},
{
"name": "Dockerfile",
"bytes": "5876"
},
{
"name": "HTML",
"bytes": "373738"
},
{
"name": "JavaScript",
"bytes": "1596130"
},
{
"name": "Mako",
"bytes": "679193"
},
{
"name": "Python",
"bytes": "11587197"
},
{
"name": "Shell",
"bytes": "2841"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import six
from time import time
from sentry.exceptions import InvalidConfiguration
from sentry.ratelimits.base import RateLimiter
from sentry.utils.hashlib import md5_text
from sentry.utils.redis import get_cluster_from_options
class RedisRateLimiter(RateLimiter):
window = 60
def __init__(self, **options):
self.cluster, options = get_cluster_from_options('SENTRY_RATELIMITER_OPTIONS', options)
def validate(self):
try:
with self.cluster.all() as client:
client.ping()
except Exception as e:
raise InvalidConfiguration(six.text_type(e))
def is_limited(self, key, limit, project=None, window=None):
if window is None:
window = self.window
key_hex = md5_text(key).hexdigest()
bucket = int(time() / window)
if project:
key = 'rl:%s:%s:%s' % (key_hex, project.id, bucket)
else:
key = 'rl:%s:%s' % (key_hex, bucket)
with self.cluster.map() as client:
result = client.incr(key)
client.expire(key, window)
return result.value > limit
| {
"content_hash": "9b4e11ee48e82deb03b0a9abbee9f821",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 95,
"avg_line_length": 28,
"alnum_prop": 0.6215986394557823,
"repo_name": "JackDanger/sentry",
"id": "e7aaeea90eec21e56476ac45b90befd1de7c5dc5",
"size": "1176",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "src/sentry/ratelimits/redis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "583430"
},
{
"name": "HTML",
"bytes": "319622"
},
{
"name": "JavaScript",
"bytes": "624672"
},
{
"name": "Makefile",
"bytes": "2660"
},
{
"name": "Python",
"bytes": "6279717"
}
],
"symlink_target": ""
} |
"""Constants for Owlet component."""
SENSOR_OXYGEN_LEVEL = "oxygen_level"
SENSOR_HEART_RATE = "heart_rate"
SENSOR_BASE_STATION = "base_station_on"
SENSOR_MOVEMENT = "movement"
| {
"content_hash": "be807533f3c309c0d18281824c54e7ef",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 39,
"avg_line_length": 29.5,
"alnum_prop": 0.7344632768361582,
"repo_name": "Cinntax/home-assistant",
"id": "f145100dbc41bd43aa9a140f178ee61c8e75f09a",
"size": "177",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/owlet/const.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17374056"
},
{
"name": "Shell",
"bytes": "6792"
}
],
"symlink_target": ""
} |
import sys
import wmi
import ctypes
import time
#For Development
DEBUG = True
FAIL_SILENTLY = False
#For Code
ONE_TIME_ALERT_ONLY = True
ShouldStartCharging = True
ShouldStopCharging = True
connect_charger = """Battery Level < 40%
Charging State : Disconnected
*** CONNECT CHARGER ***"""
disconnect_charger = """Battery Level > 80%
Charging State : Connected
*** DISCONNECT CHARGER ***"""
def Mbox(title, text, style):
ctypes.windll.user32.MessageBoxA(0, text, title, style)
def get_batstats():
#c = wmi.WMI()
t = wmi.WMI(moniker = "//./root/wmi")
batts = t.ExecQuery('Select * from BatteryFullChargedCapacity')
for i, b in enumerate(batts):
full_charge = b.FullChargedCapacity * 1.0
batts = t.ExecQuery('Select * from BatteryStatus where Voltage > 0')
for i, b in enumerate(batts):
return (full_charge, b.RemainingCapacity, b.Charging)
def do_batlogic():
global ShouldStartCharging, ShouldStopCharging
charge_capacity, charge_current, charge_status = get_batstats()
charge_percentage = charge_current / charge_capacity * 100
if DEBUG:
print str(charge_percentage), str(charge_status)
IS_CHARGING = charge_status
IS_DISCHARGING = not IS_CHARGING
if IS_DISCHARGING:
#Reset Stop Charging Alter Toggle
ShouldStopCharging = True
if DEBUG:
print "Should Stop Charging == True"
elif IS_CHARGING:
#Reset Start Charging Alert Toggle
ShouldStartCharging = True
if DEBUG:
print "Should Start Charging == True"
#Special case when the laptop stops charging at 100%
if charge_percentage == 100 and ShouldStopCharging:
Mbox('Battery Level Alert', disconnect_charger, 0)
if ONE_TIME_ALERT_ONLY:
ShouldStopCharging = False
else:
pass
if DEBUG:
print "100% Charge"
elif charge_percentage > 80 and IS_CHARGING and ShouldStopCharging:
Mbox('Battery Level Alert', disconnect_charger, 0)
if ONE_TIME_ALERT_ONLY:
ShouldStopCharging = False
else:
pass
if DEBUG:
print "> 80 Charge"
elif charge_percentage < 40 and IS_DISCHARGING and ShouldStartCharging:
Mbox('Battery Level Alert', connect_charger, 0)
if ONE_TIME_ALERT_ONLY:
ShouldStartCharging = False
else:
pass
if DEBUG:
print "< 40% Charge"
while True:
time.sleep(5)
if FAIL_SILENTLY:
try:
if DEBUG:
print "Cycling..."
do_batlogic()
except:
if DEBUG:
print "Error... Stopping Cycles"
break
else:
if DEBUG:
print "Cycling..."
do_batlogic()
sys.exit()
| {
"content_hash": "2c28a6257d0678e6271dd916dc8303ef",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 75,
"avg_line_length": 26.70754716981132,
"alnum_prop": 0.6114447191805016,
"repo_name": "nitred/batterylevelalert",
"id": "44d10fc344a1e59c4ab43be50eed0eb2d500251b",
"size": "2831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "batlvl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2831"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse
from django.shortcuts import render_to_response
from ac_example.forms import ExampleForm
def example(request):
valid = False
if request.GET:
form = ExampleForm(request.GET)
if form.is_valid():
valid = True
else:
form = ExampleForm()
return render_to_response("autocomplete.html", {'form':form,'valid':valid})
| {
"content_hash": "03cbd94bf25e55e24dc12f527161c7b1",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 79,
"avg_line_length": 26.733333333333334,
"alnum_prop": 0.6708229426433915,
"repo_name": "henriquebastos/django-autocomplete",
"id": "a7e9e20eaeb3744c0c3f60c4213a24319cd3362a",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ac_example/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9182"
}
],
"symlink_target": ""
} |
"""
Installs and configures nova
"""
import os
import uuid
import logging
import platform
import socket
from packstack.installer import basedefs, processors, utils, validators
from packstack.installer.exceptions import ScriptRuntimeError
from packstack.modules.shortcuts import get_mq
from packstack.modules.ospluginutils import (NovaConfig, getManifestTemplate,
appendManifestFile, manifestfiles)
#------------------ oVirt installer initialization ------------------
PLUGIN_NAME = "OS-Nova"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
def initConfig(controller):
if platform.linux_distribution()[0] == "Fedora":
primary_netif = "em1"
secondary_netif = "em2"
else:
primary_netif = "eth0"
secondary_netif = "eth1"
nova_params = {
"NOVA": [
{"CMD_OPTION": "nova-db-passwd",
"USAGE": "The password to use for the Nova to access DB",
"PROMPT": "Enter the password for the Nova DB access",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": uuid.uuid4().hex[:16],
"MASK_INPUT": True,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_NOVA_DB_PW",
"USE_DEFAULT": True,
"NEED_CONFIRM": True,
"CONDITION": False},
{"CMD_OPTION": "nova-ks-passwd",
"USAGE": ("The password to use for the Nova to authenticate "
"with Keystone"),
"PROMPT": "Enter the password for the Nova Keystone access",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": uuid.uuid4().hex[:16],
"MASK_INPUT": True,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_NOVA_KS_PW",
"USE_DEFAULT": True,
"NEED_CONFIRM": True,
"CONDITION": False},
{"CMD_OPTION": "novasched-cpu-allocation-ratio",
"USAGE": ("The overcommitment ratio for virtual to physical CPUs."
" Set to 1.0 to disable CPU overcommitment"),
"PROMPT": "Enter the CPU overcommitment ratio. Set to 1.0 to "
"disable CPU overcommitment",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_float],
"DEFAULT_VALUE": 16.0,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novasched-ram-allocation-ratio",
"USAGE": ("The overcommitment ratio for virtual to physical RAM. "
"Set to 1.0 to disable RAM overcommitment"),
"PROMPT": ("Enter the RAM overcommitment ratio. Set to 1.0 to "
"disable RAM overcommitment"),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_float],
"DEFAULT_VALUE": 1.5,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novacompute-migrate-protocol",
"USAGE": ("Protocol used for instance migration. Allowed values "
"are tcp and ssh. Note that by defaul nova user is "
"created with /sbin/nologin shell so that ssh protocol "
"won't be working. To make ssh protocol work you have "
"to fix nova user on compute hosts manually."),
"PROMPT": ("Enter protocol which will be used for instance "
"migration"),
"OPTION_LIST": ['tcp', 'ssh'],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": 'tcp',
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
],
"NOVA_NETWORK": [
{"CMD_OPTION": "novacompute-privif",
"USAGE": ("Private interface for Flat DHCP on the Nova compute "
"servers"),
"PROMPT": ("Enter the Private interface for Flat DHCP on the Nova"
" compute servers"),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": secondary_netif,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_COMPUTE_PRIVIF",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-manager",
"USAGE": "Nova network manager",
"PROMPT": "Enter the Nova network manager",
"OPTION_LIST": [r'^nova\.network\.manager\.\w+Manager$'],
"VALIDATORS": [validators.validate_regexp],
"DEFAULT_VALUE": "nova.network.manager.FlatDHCPManager",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_MANAGER",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-pubif",
"USAGE": "Public interface on the Nova network server",
"PROMPT": "Enter the Public interface on the Nova network server",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": primary_netif,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_PUBIF",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-privif",
"USAGE": ("Private interface for network manager on the Nova "
"network server"),
"PROMPT": ("Enter the Private interface for network manager on "
"the Nova network server"),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": secondary_netif,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_PRIVIF",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-fixed-range",
"USAGE": "IP Range for network manager",
"PROMPT": "Enter the IP Range for network manager",
"OPTION_LIST": ["^[\:\.\da-fA-f]+(\/\d+){0,1}$"],
"PROCESSORS": [processors.process_cidr],
"VALIDATORS": [validators.validate_regexp],
"DEFAULT_VALUE": "192.168.32.0/22",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_FIXEDRANGE",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-floating-range",
"USAGE": "IP Range for Floating IP's",
"PROMPT": "Enter the IP Range for Floating IP's",
"OPTION_LIST": ["^[\:\.\da-fA-f]+(\/\d+){0,1}$"],
"PROCESSORS": [processors.process_cidr],
"VALIDATORS": [validators.validate_regexp],
"DEFAULT_VALUE": "10.3.4.0/22",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_FLOATRANGE",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-default-floating-pool",
"USAGE": ("Name of the default floating pool to which the "
"specified floating ranges are added to"),
"PROMPT": "What should the default floating pool be called?",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "nova",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_NOVA_NETWORK_DEFAULTFLOATINGPOOL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-auto-assign-floating-ip",
"USAGE": "Automatically assign a floating IP to new instances",
"PROMPT": ("Should new instances automatically have a floating "
"IP assigned?"),
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "n",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
],
"NOVA_NETWORK_VLAN": [
{"CMD_OPTION": "novanetwork-vlan-start",
"USAGE": "First VLAN for private networks",
"PROMPT": "Enter first VLAN for private networks",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": 100,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_VLAN_START",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-num-networks",
"USAGE": "Number of networks to support",
"PROMPT": "How many networks should be supported",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": 1,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_NUMBER",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-network-size",
"USAGE": "Number of addresses in each private subnet",
"PROMPT": "How many addresses should be in each private subnet",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": 255,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_SIZE",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
],
}
def use_nova_network(config):
return (config['CONFIG_NOVA_INSTALL'] == 'y' and
config['CONFIG_NEUTRON_INSTALL'] != 'y')
def use_nova_network_vlan(config):
manager = 'nova.network.manager.VlanManager'
return (config['CONFIG_NOVA_INSTALL'] == 'y' and
config['CONFIG_NEUTRON_INSTALL'] != 'y' and
config['CONFIG_NOVA_NETWORK_MANAGER'] == manager)
nova_groups = [
{"GROUP_NAME": "NOVA",
"DESCRIPTION": "Nova Options",
"PRE_CONDITION": "CONFIG_NOVA_INSTALL",
"PRE_CONDITION_MATCH": "y",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True},
{"GROUP_NAME": "NOVA_NETWORK",
"DESCRIPTION": "Nova Network Options",
"PRE_CONDITION": use_nova_network,
"PRE_CONDITION_MATCH": True,
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True},
{"GROUP_NAME": "NOVA_NETWORK_VLAN",
"DESCRIPTION": "Nova Network VLAN Options",
"PRE_CONDITION": use_nova_network_vlan,
"PRE_CONDITION_MATCH": True,
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True},
]
for group in nova_groups:
params = nova_params[group["GROUP_NAME"]]
controller.addGroup(group, params)
def initSequences(controller):
if controller.CONF['CONFIG_NOVA_INSTALL'] != 'y':
return
if controller.CONF['CONFIG_NEUTRON_INSTALL'] == 'y':
network_title = ('Adding Openstack Network-related '
'Nova manifest entries')
network_function = create_neutron_manifest
else:
network_title = 'Adding Nova Network manifest entries'
network_function = create_network_manifest
novaapisteps = [
{'title': 'Adding Nova API manifest entries',
'functions': [create_api_manifest]},
{'title': 'Adding Nova Keystone manifest entries',
'functions': [create_keystone_manifest]},
{'title': 'Adding Nova Cert manifest entries',
'functions': [create_cert_manifest]},
{'title': 'Adding Nova Conductor manifest entries',
'functions': [create_conductor_manifest]},
{'title': 'Creating ssh keys for Nova migration',
'functions': [create_ssh_keys]},
{'title': 'Gathering ssh host keys for Nova migration',
'functions': [gather_host_keys]},
{'title': 'Adding Nova Compute manifest entries',
'functions': [create_compute_manifest]},
{'title': 'Adding Nova Scheduler manifest entries',
'functions': [create_sched_manifest]},
{'title': 'Adding Nova VNC Proxy manifest entries',
'functions': [create_vncproxy_manifest]},
{'title': network_title,
'functions': [network_function]},
{'title': 'Adding Nova Common manifest entries',
'functions': [create_common_manifest]},
{'title': 'Adding Ceph manifest entries',
'functions': [create_ceph_manifest]},
]
controller.addSequence("Installing OpenStack Nova API", [], [],
novaapisteps)
#------------------------- helper functions -------------------------
def check_ifcfg(host, device):
"""
Raises ScriptRuntimeError if given host does not have give device.
"""
server = utils.ScriptRunner(host)
cmd = "ip addr show dev %s || ( echo Device %s does not exist && exit 1 )"
server.append(cmd % (device, device))
server.execute()
def bring_up_ifcfg(host, device):
"""
Brings given device up if it's down. Raises ScriptRuntimeError in case
of failure.
"""
server = utils.ScriptRunner(host)
server.append('ip link show up | grep "%s"' % device)
try:
server.execute()
except ScriptRuntimeError:
server.clear()
cmd = 'ip link set dev %s up'
server.append(cmd % device)
try:
server.execute()
except ScriptRuntimeError:
msg = ('Failed to bring up network interface %s on host %s.'
' Interface should be up so Openstack can work'
' properly.' % (device, host))
raise ScriptRuntimeError(msg)
#-------------------------- step functions --------------------------
def create_ssh_keys(config, messages):
migration_key = os.path.join(basedefs.VAR_DIR, 'nova_migration_key')
# Generate key
local = utils.ScriptRunner()
local.append('ssh-keygen -t rsa -b 2048 -f "%s" -N ""' % migration_key)
local.execute()
with open(migration_key) as fp:
secret = fp.read().strip()
with open('%s.pub' % migration_key) as fp:
public = fp.read().strip()
config['NOVA_MIGRATION_KEY_TYPE'] = 'ssh-rsa'
config['NOVA_MIGRATION_KEY_PUBLIC'] = public.split()[1]
config['NOVA_MIGRATION_KEY_SECRET'] = secret
def gather_host_keys(config, messages):
global compute_hosts
for host in compute_hosts:
local = utils.ScriptRunner()
local.append('ssh-keyscan %s' % host)
retcode, hostkey = local.execute()
config['HOST_KEYS_%s' % host] = hostkey
def create_api_manifest(config, messages):
# Since this step is running first, let's create necesary variables here
# and make them global
global compute_hosts, network_hosts
com_var = config.get("CONFIG_COMPUTE_HOSTS", "")
compute_hosts = set([i.strip() for i in com_var.split(",") if i.strip()])
net_var = config.get("CONFIG_NETWORK_HOSTS", "")
network_hosts = set([i.strip() for i in net_var.split(",") if i.strip()])
# This is a hack around us needing to generate the neutron metadata
# password, but the nova puppet plugin uses the existence of that
# password to determine whether or not to configure neutron metadata
# proxy support. So the nova_api.pp template needs unquoted 'undef'
# to disable metadata support if neutron is not being installed.
if config['CONFIG_NEUTRON_INSTALL'] != 'y':
config['CONFIG_NEUTRON_METADATA_PW_UNQUOTED'] = 'undef'
else:
config['CONFIG_NEUTRON_METADATA_PW_UNQUOTED'] = \
"'%s'" % config['CONFIG_NEUTRON_METADATA_PW']
manifestfile = "%s_api_nova.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("nova_api.pp")
config['FIREWALL_SERVICE_NAME'] = "nova api"
config['FIREWALL_PORTS'] = "['8773', '8774', '8775']"
config['FIREWALL_CHAIN'] = "INPUT"
config['FIREWALL_PROTOCOL'] = 'tcp'
config['FIREWALL_ALLOWED'] = "'ALL'"
config['FIREWALL_SERVICE_ID'] = "nova_api"
manifestdata += getManifestTemplate("firewall.pp")
appendManifestFile(manifestfile, manifestdata, 'novaapi')
def create_keystone_manifest(config, messages):
manifestfile = "%s_keystone.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("keystone_nova.pp")
appendManifestFile(manifestfile, manifestdata)
def create_cert_manifest(config, messages):
manifestfile = "%s_nova.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("nova_cert.pp")
appendManifestFile(manifestfile, manifestdata)
def create_conductor_manifest(config, messages):
manifestfile = "%s_nova.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("nova_conductor.pp")
appendManifestFile(manifestfile, manifestdata)
def create_compute_manifest(config, messages):
global compute_hosts, network_hosts
migrate_protocol = config['CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL']
if migrate_protocol == 'ssh':
config['CONFIG_NOVA_COMPUTE_MIGRATE_URL'] = (
'qemu+ssh://nova@%s/system?no_verify=1&'
'keyfile=/etc/nova/ssh/nova_migration_key'
)
else:
config['CONFIG_NOVA_COMPUTE_MIGRATE_URL'] = (
'qemu+tcp://nova@%s/system'
)
ssh_hostkeys = ''
for host in compute_hosts:
try:
hostname, aliases, addrs = socket.gethostbyaddr(host)
except socket.herror:
hostname, aliases, addrs = (host, [], [])
for hostkey in config['HOST_KEYS_%s' % host].split('\n'):
hostkey = hostkey.strip()
if not hostkey:
continue
_, host_key_type, host_key_data = hostkey.split()
config['SSH_HOST_NAME'] = hostname
config['SSH_HOST_ALIASES'] = ','.join(
'"%s"' % addr for addr in aliases + addrs
)
config['SSH_HOST_KEY'] = host_key_data
config['SSH_HOST_KEY_TYPE'] = host_key_type
ssh_hostkeys += getManifestTemplate("sshkey.pp")
for host in compute_hosts:
config["CONFIG_NOVA_COMPUTE_HOST"] = host
manifestdata = getManifestTemplate("nova_compute.pp")
if migrate_protocol == 'ssh' or migrate_protocol == 'tcp':
for c_host in compute_hosts:
config['FIREWALL_SERVICE_NAME'] = "nova qemu migration"
config['FIREWALL_PORTS'] = ['16509']
config['FIREWALL_PORTS'].append('49152-49215')
config['FIREWALL_CHAIN'] = "INPUT"
config['FIREWALL_PROTOCOL'] = 'tcp'
config['FIREWALL_ALLOWED'] = "'%s'" % c_host
config['FIREWALL_SERVICE_ID'] = ("nova_qemu_migration_%s_%s"
% (host, c_host))
manifestdata += getManifestTemplate("firewall.pp")
if config['CONFIG_VMWARE_BACKEND'] == 'y':
manifestdata += getManifestTemplate("nova_compute_vmware.pp")
else:
manifestdata += getManifestTemplate("nova_compute_libvirt.pp")
if (config['CONFIG_VMWARE_BACKEND'] != 'y' and
config['CONFIG_CINDER_INSTALL'] == 'y' and
config['CONFIG_CINDER_BACKEND'] == 'gluster'):
manifestdata += getManifestTemplate("nova_gluster.pp")
if (config['CONFIG_VMWARE_BACKEND'] != 'y' and
config['CONFIG_CINDER_INSTALL'] == 'y' and
config['CONFIG_CINDER_BACKEND'] == 'nfs'):
manifestdata += getManifestTemplate("nova_nfs.pp")
manifestfile = "%s_nova.pp" % host
nova_config_options = NovaConfig()
if config['CONFIG_NEUTRON_INSTALL'] != 'y':
if host not in network_hosts:
nova_config_options.addOption(
"DEFAULT/flat_interface",
config['CONFIG_NOVA_COMPUTE_PRIVIF']
)
check_ifcfg(host, config['CONFIG_NOVA_COMPUTE_PRIVIF'])
try:
bring_up_ifcfg(host, config['CONFIG_NOVA_COMPUTE_PRIVIF'])
except ScriptRuntimeError as ex:
# just warn user to do it by himself
messages.append(str(ex))
if config['CONFIG_CEILOMETER_INSTALL'] == 'y':
mq_template = get_mq(config, "nova_ceilometer")
manifestdata += getManifestTemplate(mq_template)
manifestdata += getManifestTemplate("nova_ceilometer.pp")
config['FIREWALL_PORTS'] = ['5900-5999']
if migrate_protocol == 'tcp':
config['FIREWALL_PORTS'].append('16509')
config['FIREWALL_ALLOWED'] = "'%s'" % config['CONFIG_CONTROLLER_HOST']
config['FIREWALL_SERVICE_NAME'] = "nova compute"
config['FIREWALL_SERVICE_ID'] = "nova_compute"
config['FIREWALL_CHAIN'] = "INPUT"
config['FIREWALL_PROTOCOL'] = 'tcp'
manifestdata += getManifestTemplate("firewall.pp")
manifestdata += "\n" + nova_config_options.getManifestEntry()
manifestdata += "\n" + ssh_hostkeys
appendManifestFile(manifestfile, manifestdata)
def create_network_manifest(config, messages):
global compute_hosts, network_hosts
if config['CONFIG_NEUTRON_INSTALL'] == "y":
return
# set default values for VlanManager in case this values are not in config
for key, value in [('CONFIG_NOVA_NETWORK_VLAN_START', 100),
('CONFIG_NOVA_NETWORK_SIZE', 255),
('CONFIG_NOVA_NETWORK_NUMBER', 1)]:
config[key] = config.get(key, value)
api_host = config['CONFIG_CONTROLLER_HOST']
multihost = len(network_hosts) > 1
config['CONFIG_NOVA_NETWORK_MULTIHOST'] = multihost and 'true' or 'false'
for host in network_hosts:
for i in ('CONFIG_NOVA_NETWORK_PRIVIF', 'CONFIG_NOVA_NETWORK_PUBIF'):
check_ifcfg(host, config[i])
try:
bring_up_ifcfg(host, config[i])
except ScriptRuntimeError as ex:
# just warn user to do it by himself
messages.append(str(ex))
key = 'CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP'
config[key] = config[key] == "y"
# We need to explicitly set the network size
routing_prefix = config['CONFIG_NOVA_NETWORK_FIXEDRANGE'].split('/')[1]
net_size = 2 ** (32 - int(routing_prefix))
config['CONFIG_NOVA_NETWORK_FIXEDSIZE'] = str(net_size)
manifestfile = "%s_nova.pp" % host
manifestdata = getManifestTemplate("nova_network.pp")
# Restart libvirt if we deploy nova network on compute
if host in compute_hosts:
manifestdata += getManifestTemplate("nova_network_libvirt.pp")
# in multihost mode each compute host runs nova-api-metadata
if multihost and host != api_host and host in compute_hosts:
manifestdata += getManifestTemplate("nova_metadata.pp")
appendManifestFile(manifestfile, manifestdata)
def create_sched_manifest(config, messages):
manifestfile = "%s_nova.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("nova_sched.pp")
appendManifestFile(manifestfile, manifestdata)
def create_vncproxy_manifest(config, messages):
manifestfile = "%s_nova.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("nova_vncproxy.pp")
appendManifestFile(manifestfile, manifestdata)
def create_common_manifest(config, messages):
global compute_hosts, network_hosts
network_type = (config['CONFIG_NEUTRON_INSTALL'] == "y" and
'neutron' or 'nova')
network_multi = len(network_hosts) > 1
dbacces_hosts = set([config.get('CONFIG_CONTROLLER_HOST')])
dbacces_hosts |= network_hosts
for manifestfile, marker in manifestfiles.getFiles():
if manifestfile.endswith("_nova.pp"):
host, manifest = manifestfile.split('_', 1)
host = host.strip()
if host in compute_hosts and host not in dbacces_hosts:
# we should omit password in case we are installing only
# nova-compute to the host
perms = "nova"
else:
perms = "nova:%(CONFIG_NOVA_DB_PW)s"
sqlconn = "mysql://%s@%%(CONFIG_MARIADB_HOST)s/nova" % perms
config['CONFIG_NOVA_SQL_CONN'] = sqlconn % config
# for nova-network in multihost mode each compute host is metadata
# host otherwise we use api host
if (network_type == 'nova' and network_multi and
host in compute_hosts):
metadata = host
else:
metadata = config['CONFIG_CONTROLLER_HOST']
config['CONFIG_NOVA_METADATA_HOST'] = metadata
data = getManifestTemplate(get_mq(config, "nova_common"))
data += getManifestTemplate("nova_common.pp")
appendManifestFile(os.path.split(manifestfile)[1], data)
def create_neutron_manifest(config, messages):
if config['CONFIG_NEUTRON_INSTALL'] != "y":
return
virt_driver = 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver'
config['CONFIG_NOVA_LIBVIRT_VIF_DRIVER'] = virt_driver
for manifestfile, marker in manifestfiles.getFiles():
if manifestfile.endswith("_nova.pp"):
data = getManifestTemplate("nova_neutron.pp")
appendManifestFile(os.path.split(manifestfile)[1], data)
def create_ceph_manifest(config, messages):
global compute_hosts
if (config['CONFIG_VMWARE_BACKEND'] != 'y' and
config['CONFIG_CINDER_INSTALL'] == 'y' and
config['CONFIG_CINDER_BACKEND'] == 'ceph'):
manifestdata = getManifestTemplate("nova_ceph.pp")
manifestfile = "%s_nova.pp" % config['CONFIG_CONTROLLER_HOST']
appendManifestFile(manifestfile, manifestdata)
for host in compute_hosts:
manifestdata = getManifestTemplate("nova_ceph_compute.pp")
manifestfile = "%s_nova_ceph.pp" % host
appendManifestFile(manifestfile, manifestdata)
| {
"content_hash": "1fc020145fa12fe0a2fc4e96840c0614",
"timestamp": "",
"source": "github",
"line_count": 674,
"max_line_length": 79,
"avg_line_length": 41.253709198813056,
"alnum_prop": 0.5701852184858839,
"repo_name": "yuw726/openstack-packstack",
"id": "04ad0c16a3af3da74f36625fa240877513886068",
"size": "27830",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "packstack/plugins/nova_300.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "187904"
},
{
"name": "Diff",
"bytes": "21134"
},
{
"name": "HTML",
"bytes": "164"
},
{
"name": "Pascal",
"bytes": "923"
},
{
"name": "Puppet",
"bytes": "102034"
},
{
"name": "Python",
"bytes": "393275"
},
{
"name": "Ruby",
"bytes": "16469"
},
{
"name": "Shell",
"bytes": "3016"
}
],
"symlink_target": ""
} |
"""Public forms."""
from urllib.parse import urljoin, urlparse
from flask import redirect, request, url_for
from flask_wtf import FlaskForm
from wtforms import HiddenField, PasswordField, StringField
from wtforms.validators import DataRequired
from personal_website.user.models import User
def is_safe_url(target):
"""Test for safe url."""
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc
def get_redirect_target():
"""Redirect function."""
for target in request.args.get('next'), request.referrer:
if not target:
continue
if is_safe_url(target):
return target
class RedirectForm(FlaskForm):
"""Form with safe redirect method."""
next = HiddenField()
def __init__(self, *args, **kwargs):
"""Initialize RedirectForm."""
FlaskForm.__init__(self, *args, **kwargs)
if not self.next.data:
self.next.data = get_redirect_target() or ''
def redirect(self, endpoint='user.members', **values):
"""Safe redirect method."""
if is_safe_url(self.next.data):
return redirect(self.next.data)
target = get_redirect_target()
return redirect(target or url_for(endpoint, **values))
class LoginForm(RedirectForm):
"""Login form."""
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
def __init__(self, *args, **kwargs):
"""Create instance."""
super().__init__(*args, **kwargs)
self.user = None
def validate(self):
"""Validate the form."""
initial_validation = super().validate()
if not initial_validation:
return False
self.user = User.query.filter_by(username=self.username.data).first()
if not self.user:
self.username.errors.append('Unknown username')
return False
if not self.user.check_password(self.password.data):
self.password.errors.append('Invalid password')
return False
if not self.user.active:
self.username.errors.append('User not activated')
return False
return True
| {
"content_hash": "5b2397d3719700cb8b77bc6314b6334c",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 85,
"avg_line_length": 30.350649350649352,
"alnum_prop": 0.6281557552417629,
"repo_name": "arewellborn/Personal-Website",
"id": "55ed90217c93f48bd30723d03934ad019b9a925a",
"size": "2361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "personal_website/public/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4414"
},
{
"name": "HTML",
"bytes": "12401"
},
{
"name": "JavaScript",
"bytes": "181413"
},
{
"name": "Python",
"bytes": "37533"
}
],
"symlink_target": ""
} |
from itertools import product
from distutils.version import LooseVersion
import operator
import pytest
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, Categorical, DataFrame, isna, notna,
bdate_range, date_range, _np_version_under1p10,
CategoricalIndex)
from pandas.core.index import MultiIndex
from pandas.core.indexes.datetimes import Timestamp
from pandas.core.indexes.timedeltas import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import lrange, range, PY35
from pandas import compat
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, assert_index_equal)
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from .common import TestData
class TestSeriesAnalytics(TestData):
@pytest.mark.parametrize("use_bottleneck", [True, False])
@pytest.mark.parametrize("method, unit", [
("sum", 0.0),
("prod", 1.0)
])
def test_empty(self, method, unit, use_bottleneck):
with pd.option_context("use_bottleneck", use_bottleneck):
# GH 9422 / 18921
# Entirely empty
s = Series([])
# NA by default
result = getattr(s, method)()
assert result == unit
# Explicit
result = getattr(s, method)(min_count=0)
assert result == unit
result = getattr(s, method)(min_count=1)
assert isna(result)
# Skipna, default
result = getattr(s, method)(skipna=True)
result == unit
# Skipna, explicit
result = getattr(s, method)(skipna=True, min_count=0)
assert result == unit
result = getattr(s, method)(skipna=True, min_count=1)
assert isna(result)
# All-NA
s = Series([np.nan])
# NA by default
result = getattr(s, method)()
assert result == unit
# Explicit
result = getattr(s, method)(min_count=0)
assert result == unit
result = getattr(s, method)(min_count=1)
assert isna(result)
# Skipna, default
result = getattr(s, method)(skipna=True)
result == unit
# skipna, explicit
result = getattr(s, method)(skipna=True, min_count=0)
assert result == unit
result = getattr(s, method)(skipna=True, min_count=1)
assert isna(result)
# Mix of valid, empty
s = Series([np.nan, 1])
# Default
result = getattr(s, method)()
assert result == 1.0
# Explicit
result = getattr(s, method)(min_count=0)
assert result == 1.0
result = getattr(s, method)(min_count=1)
assert result == 1.0
# Skipna
result = getattr(s, method)(skipna=True)
assert result == 1.0
result = getattr(s, method)(skipna=True, min_count=0)
assert result == 1.0
result = getattr(s, method)(skipna=True, min_count=1)
assert result == 1.0
# GH #844 (changed in 9422)
df = DataFrame(np.empty((10, 0)))
assert (getattr(df, method)(1) == unit).all()
s = pd.Series([1])
result = getattr(s, method)(min_count=2)
assert isna(result)
s = pd.Series([np.nan])
result = getattr(s, method)(min_count=2)
assert isna(result)
s = pd.Series([np.nan, 1])
result = getattr(s, method)(min_count=2)
assert isna(result)
@pytest.mark.parametrize('method, unit', [
('sum', 0.0),
('prod', 1.0),
])
def test_empty_multi(self, method, unit):
s = pd.Series([1, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product([('a', 'b'), (0, 1)]))
# 1 / 0 by default
result = getattr(s, method)(level=0)
expected = pd.Series([1, unit], index=['a', 'b'])
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(s, method)(level=0, min_count=0)
expected = pd.Series([1, unit], index=['a', 'b'])
tm.assert_series_equal(result, expected)
# min_count=1
result = getattr(s, method)(level=0, min_count=1)
expected = pd.Series([1, np.nan], index=['a', 'b'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method", ['mean', 'median', 'std', 'var'])
def test_ops_consistency_on_empty(self, method):
# GH 7869
# consistency on empty
# float
result = getattr(Series(dtype=float), method)()
assert isna(result)
# timedelta64[ns]
result = getattr(Series(dtype='m8[ns]'), method)()
assert result is pd.NaT
def test_nansum_buglet(self):
s = Series([1.0, np.nan], index=[0, 1])
result = np.nansum(s)
assert_almost_equal(result, 1)
@pytest.mark.parametrize("use_bottleneck", [True, False])
def test_sum_overflow(self, use_bottleneck):
with pd.option_context('use_bottleneck', use_bottleneck):
# GH 6915
# overflowing on the smaller int dtypes
for dtype in ['int32', 'int64']:
v = np.arange(5000000, dtype=dtype)
s = Series(v)
result = s.sum(skipna=False)
assert int(result) == v.sum(dtype='int64')
result = s.min(skipna=False)
assert int(result) == 0
result = s.max(skipna=False)
assert int(result) == v[-1]
for dtype in ['float32', 'float64']:
v = np.arange(5000000, dtype=dtype)
s = Series(v)
result = s.sum(skipna=False)
assert result == v.sum(dtype=dtype)
result = s.min(skipna=False)
assert np.allclose(float(result), 0.0)
result = s.max(skipna=False)
assert np.allclose(float(result), v[-1])
def test_sum(self):
self._check_stat_op('sum', np.sum, check_allna=False)
def test_sum_inf(self):
s = Series(np.random.randn(10))
s2 = s.copy()
s[5:8] = np.inf
s2[5:8] = np.nan
assert np.isinf(s.sum())
arr = np.random.randn(100, 100).astype('f4')
arr[:, 2] = np.inf
with pd.option_context("mode.use_inf_as_na", True):
assert_almost_equal(s.sum(), s2.sum())
res = nanops.nansum(arr, axis=1)
assert np.isinf(res).all()
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_median(self):
self._check_stat_op('median', np.median)
# test with integers, test failure
int_ts = Series(np.ones(10, dtype=int), index=lrange(10))
tm.assert_almost_equal(np.median(int_ts), int_ts.median())
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_min(self):
self._check_stat_op('min', np.min, check_objects=True)
def test_max(self):
self._check_stat_op('max', np.max, check_objects=True)
def test_var_std(self):
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
result = self.ts.std(ddof=4)
expected = np.std(self.ts.values, ddof=4)
assert_almost_equal(result, expected)
result = self.ts.var(ddof=4)
expected = np.var(self.ts.values, ddof=4)
assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = self.ts.iloc[[0]]
result = s.var(ddof=1)
assert isna(result)
result = s.std(ddof=1)
assert isna(result)
def test_sem(self):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.ts.sem(ddof=4)
expected = np.std(self.ts.values,
ddof=4) / np.sqrt(len(self.ts.values))
assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = self.ts.iloc[[0]]
result = s.sem(ddof=1)
assert isna(result)
@td.skip_if_no_scipy
def test_skew(self):
from scipy.stats import skew
alt = lambda x: skew(x, bias=False)
self._check_stat_op('skew', alt)
# test corner cases, skew() returns NaN unless there's at least 3
# values
min_N = 3
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
assert np.isnan(s.skew())
assert np.isnan(df.skew()).all()
else:
assert 0 == s.skew()
assert (df.skew() == 0).all()
@td.skip_if_no_scipy
def test_kurt(self):
from scipy.stats import kurtosis
alt = lambda x: kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
tm.assert_almost_equal(s.kurt(), s.kurt(level=0)['bar'])
# test corner cases, kurt() returns NaN unless there's at least 4
# values
min_N = 4
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
assert np.isnan(s.kurt())
assert np.isnan(df.kurt()).all()
else:
assert 0 == s.kurt()
assert (df.kurt() == 0).all()
def test_describe(self):
s = Series([0, 1, 2, 3, 4], name='int_data')
result = s.describe()
expected = Series([5, 2, s.std(), 0, 1, 2, 3, 4],
name='int_data',
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_series_equal(result, expected)
s = Series([True, True, False, False, False], name='bool_data')
result = s.describe()
expected = Series([5, 2, False, 3], name='bool_data',
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
s = Series(['a', 'a', 'b', 'c', 'd'], name='str_data')
result = s.describe()
expected = Series([5, 4, 'a', 2], name='str_data',
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
def test_argsort(self):
self._check_accum_op('argsort', check_dtype=False)
argsorted = self.ts.argsort()
assert issubclass(argsorted.dtype.type, np.integer)
# GH 2967 (introduced bug in 0.11-dev I think)
s = Series([Timestamp('201301%02d' % (i + 1)) for i in range(5)])
assert s.dtype == 'datetime64[ns]'
shifted = s.shift(-1)
assert shifted.dtype == 'datetime64[ns]'
assert isna(shifted[4])
result = s.argsort()
expected = Series(lrange(5), dtype='int64')
assert_series_equal(result, expected)
result = shifted.argsort()
expected = Series(lrange(4) + [-1], dtype='int64')
assert_series_equal(result, expected)
def test_argsort_stable(self):
s = Series(np.random.randint(0, 100, size=10000))
mindexer = s.argsort(kind='mergesort')
qindexer = s.argsort()
mexpected = np.argsort(s.values, kind='mergesort')
qexpected = np.argsort(s.values, kind='quicksort')
tm.assert_series_equal(mindexer, Series(mexpected),
check_dtype=False)
tm.assert_series_equal(qindexer, Series(qexpected),
check_dtype=False)
pytest.raises(AssertionError, tm.assert_numpy_array_equal,
qindexer, mindexer)
def test_cumsum(self):
self._check_accum_op('cumsum')
def test_cumprod(self):
self._check_accum_op('cumprod')
def test_cummin(self):
tm.assert_numpy_array_equal(self.ts.cummin().values,
np.minimum.accumulate(np.array(self.ts)))
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.cummin()[1::2]
expected = np.minimum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
def test_cummax(self):
tm.assert_numpy_array_equal(self.ts.cummax().values,
np.maximum.accumulate(np.array(self.ts)))
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.cummax()[1::2]
expected = np.maximum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
def test_cummin_datetime64(self):
s = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT', '2000-1-1',
'NaT', '2000-1-3']))
expected = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT',
'2000-1-1', 'NaT', '2000-1-1']))
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', '2000-1-2', '2000-1-1', '2000-1-1', '2000-1-1'
]))
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummax_datetime64(self):
s = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT', '2000-1-1',
'NaT', '2000-1-3']))
expected = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT',
'2000-1-2', 'NaT', '2000-1-3']))
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-3'
]))
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummin_timedelta64(self):
s = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'1 min',
'NaT',
'3 min', ]))
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'1 min',
'NaT',
'1 min', ]))
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'2 min',
'1 min',
'1 min',
'1 min', ]))
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummax_timedelta64(self):
s = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'1 min',
'NaT',
'3 min', ]))
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'2 min',
'NaT',
'3 min', ]))
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'2 min',
'2 min',
'2 min',
'3 min', ]))
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_npdiff(self):
pytest.skip("skipping due to Series no longer being an "
"ndarray")
# no longer works as the return type of np.diff is now nd.array
s = Series(np.arange(5))
r = np.diff(s)
assert_series_equal(Series([nan, 0, 0, 0, nan]), r)
def _check_stat_op(self, name, alternate, check_objects=False,
check_allna=False):
with pd.option_context('use_bottleneck', False):
f = getattr(Series, name)
# add some NaNs
self.series[5:15] = np.NaN
# idxmax, idxmin, min, and max are valid for dates
if name not in ['max', 'min']:
ds = Series(date_range('1/1/2001', periods=10))
pytest.raises(TypeError, f, ds)
# skipna or no
assert notna(f(self.series))
assert isna(f(self.series, skipna=False))
# check the result is correct
nona = self.series.dropna()
assert_almost_equal(f(nona), alternate(nona.values))
assert_almost_equal(f(self.series), alternate(nona.values))
allna = self.series * nan
if check_allna:
assert np.isnan(f(allna))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
f(s)
# 2888
l = [0]
l.extend(lrange(2 ** 40, 2 ** 40 + 1000))
s = Series(l, dtype='int64')
assert_almost_equal(float(f(s)), float(alternate(s.values)))
# check date range
if check_objects:
s = Series(bdate_range('1/1/2000', periods=10))
res = f(s)
exp = alternate(s)
assert res == exp
# check on string data
if name not in ['sum', 'min', 'max']:
pytest.raises(TypeError, f, Series(list('abc')))
# Invalid axis.
pytest.raises(ValueError, f, self.series, axis=1)
# Unimplemented numeric_only parameter.
if 'numeric_only' in compat.signature(f).args:
tm.assert_raises_regex(NotImplementedError, name, f,
self.series, numeric_only=True)
def _check_accum_op(self, name, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(func(self.ts).values,
func(np.array(self.ts)),
check_dtype=check_dtype)
# with missing values
ts = self.ts.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected,
check_dtype=False)
def test_compress(self):
cond = [True, False, True, False, False]
s = Series([1, -1, 5, 8, 7],
index=list('abcde'), name='foo')
expected = Series(s.values.compress(cond),
index=list('ac'), name='foo')
tm.assert_series_equal(s.compress(cond), expected)
def test_numpy_compress(self):
cond = [True, False, True, False, False]
s = Series([1, -1, 5, 8, 7],
index=list('abcde'), name='foo')
expected = Series(s.values.compress(cond),
index=list('ac'), name='foo')
tm.assert_series_equal(np.compress(cond, s), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.compress,
cond, s, axis=1)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.compress,
cond, s, out=s)
def test_round(self):
self.ts.index.name = "index_name"
result = self.ts.round(2)
expected = Series(np.round(self.ts.values, 2),
index=self.ts.index, name='ts')
assert_series_equal(result, expected)
assert result.name == self.ts.name
def test_numpy_round(self):
# See gh-12600
s = Series([1.53, 1.36, 0.06])
out = np.round(s, decimals=0)
expected = Series([2., 1., 0.])
assert_series_equal(out, expected)
msg = "the 'out' parameter is not supported"
with tm.assert_raises_regex(ValueError, msg):
np.round(s, decimals=0, out=s)
def test_built_in_round(self):
if not compat.PY3:
pytest.skip(
'build in round cannot be overridden prior to Python 3')
s = Series([1.123, 2.123, 3.123], index=lrange(3))
result = round(s)
expected_rounded0 = Series([1., 2., 3.], index=lrange(3))
tm.assert_series_equal(result, expected_rounded0)
decimals = 2
expected_rounded = Series([1.12, 2.12, 3.12], index=lrange(3))
result = round(s, decimals)
tm.assert_series_equal(result, expected_rounded)
def test_prod_numpy16_bug(self):
s = Series([1., 1., 1.], index=lrange(3))
result = s.prod()
assert not isinstance(result, Series)
def test_all_any(self):
ts = tm.makeTimeSeries()
bool_series = ts > 0
assert not bool_series.all()
assert bool_series.any()
# Alternative types, with implicit 'object' dtype.
s = Series(['abc', True])
assert 'abc' == s.any() # 'abc' || True => 'abc'
def test_all_any_params(self):
# Check skipna, with implicit 'object' dtype.
s1 = Series([np.nan, True])
s2 = Series([np.nan, False])
assert s1.all(skipna=False) # nan && True => True
assert s1.all(skipna=True)
assert np.isnan(s2.any(skipna=False)) # nan || False => nan
assert not s2.any(skipna=True)
# Check level.
s = pd.Series([False, False, True, True, False, True],
index=[0, 0, 1, 1, 2, 2])
assert_series_equal(s.all(level=0), Series([False, True, False]))
assert_series_equal(s.any(level=0), Series([False, True, True]))
# bool_only is not implemented with level option.
pytest.raises(NotImplementedError, s.any, bool_only=True, level=0)
pytest.raises(NotImplementedError, s.all, bool_only=True, level=0)
# bool_only is not implemented alone.
pytest.raises(NotImplementedError, s.any, bool_only=True)
pytest.raises(NotImplementedError, s.all, bool_only=True)
def test_modulo(self):
with np.errstate(all='ignore'):
# GH3590, modulo as ints
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] % p['second']
expected = Series(p['first'].values % p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.nan
assert_series_equal(result, expected)
result = p['first'] % 0
expected = Series(np.nan, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] % p['second']
expected = Series(p['first'].values % p['second'].values)
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] % p['second']
result2 = p['second'] % p['first']
assert not result.equals(result2)
# GH 9144
s = Series([0, 1])
result = s % 0
expected = Series([nan, nan])
assert_series_equal(result, expected)
result = 0 % s
expected = Series([nan, 0.0])
assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corr(self):
import scipy.stats as stats
# full overlap
tm.assert_almost_equal(self.ts.corr(self.ts), 1)
# partial overlap
tm.assert_almost_equal(self.ts[:15].corr(self.ts[5:]), 1)
assert isna(self.ts[:15].corr(self.ts[5:], min_periods=12))
ts1 = self.ts[:15].reindex(self.ts.index)
ts2 = self.ts[5:].reindex(self.ts.index)
assert isna(ts1.corr(ts2, min_periods=12))
# No overlap
assert np.isnan(self.ts[::2].corr(self.ts[1::2]))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
assert isna(cp.corr(cp))
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
result = A.corr(B)
expected, _ = stats.pearsonr(A, B)
tm.assert_almost_equal(result, expected)
@td.skip_if_no_scipy
def test_corr_rank(self):
import scipy
import scipy.stats as stats
# kendall and spearman
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
A[-5:] = A[:5]
result = A.corr(B, method='kendall')
expected = stats.kendalltau(A, B)[0]
tm.assert_almost_equal(result, expected)
result = A.corr(B, method='spearman')
expected = stats.spearmanr(A, B)[0]
tm.assert_almost_equal(result, expected)
# these methods got rewritten in 0.8
if LooseVersion(scipy.__version__) < LooseVersion('0.9'):
pytest.skip("skipping corr rank because of scipy version "
"{0}".format(scipy.__version__))
# results from R
A = Series(
[-0.89926396, 0.94209606, -1.03289164, -0.95445587, 0.76910310, -
0.06430576, -2.09704447, 0.40660407, -0.89926396, 0.94209606])
B = Series(
[-1.01270225, -0.62210117, -1.56895827, 0.59592943, -0.01680292,
1.17258718, -1.06009347, -0.10222060, -0.89076239, 0.89372375])
kexp = 0.4319297
sexp = 0.5853767
tm.assert_almost_equal(A.corr(B, method='kendall'), kexp)
tm.assert_almost_equal(A.corr(B, method='spearman'), sexp)
def test_cov(self):
# full overlap
tm.assert_almost_equal(self.ts.cov(self.ts), self.ts.std() ** 2)
# partial overlap
tm.assert_almost_equal(self.ts[:15].cov(self.ts[5:]),
self.ts[5:15].std() ** 2)
# No overlap
assert np.isnan(self.ts[::2].cov(self.ts[1::2]))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
assert isna(cp.cov(cp))
# min_periods
assert isna(self.ts[:15].cov(self.ts[5:], min_periods=12))
ts1 = self.ts[:15].reindex(self.ts.index)
ts2 = self.ts[5:].reindex(self.ts.index)
assert isna(ts1.cov(ts2, min_periods=12))
def test_count(self):
assert self.ts.count() == len(self.ts)
self.ts[::2] = np.NaN
assert self.ts.count() == np.isfinite(self.ts).sum()
mi = MultiIndex.from_arrays([list('aabbcc'), [1, 2, 2, nan, 1, 2]])
ts = Series(np.arange(len(mi)), index=mi)
left = ts.count(level=1)
right = Series([2, 3, 1], index=[1, 2, nan])
assert_series_equal(left, right)
ts.iloc[[0, 3, 5]] = nan
assert_series_equal(ts.count(level=1), right - 1)
def test_dot(self):
a = Series(np.random.randn(4), index=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'],
columns=['p', 'q', 'r', 's']).T
result = a.dot(b)
expected = Series(np.dot(a.values, b.values), index=['1', '2', '3'])
assert_series_equal(result, expected)
# Check index alignment
b2 = b.reindex(index=reversed(b.index))
result = a.dot(b)
assert_series_equal(result, expected)
# Check ndarray argument
result = a.dot(b.values)
assert np.all(result == expected.values)
assert_almost_equal(a.dot(b['2'].values), expected['2'])
# Check series argument
assert_almost_equal(a.dot(b['1']), expected['1'])
assert_almost_equal(a.dot(b2['1']), expected['1'])
pytest.raises(Exception, a.dot, a.values[:3])
pytest.raises(ValueError, a.dot, b.T)
@pytest.mark.skipif(not PY35,
reason='matmul supported for Python>=3.5')
def test_matmul(self):
# matmul test is for GH #10259
a = Series(np.random.randn(4), index=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'],
columns=['p', 'q', 'r', 's']).T
# Series @ DataFrame
result = operator.matmul(a, b)
expected = Series(np.dot(a.values, b.values), index=['1', '2', '3'])
assert_series_equal(result, expected)
# DataFrame @ Series
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values),
index=['1', '2', '3'])
assert_series_equal(result, expected)
# Series @ Series
result = operator.matmul(a, a)
expected = np.dot(a.values, a.values)
assert_almost_equal(result, expected)
# np.array @ Series (__rmatmul__)
result = operator.matmul(a.values, a)
expected = np.dot(a.values, a.values)
assert_almost_equal(result, expected)
# mixed dtype DataFrame @ Series
a['p'] = int(a.p)
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values),
index=['1', '2', '3'])
assert_series_equal(result, expected)
# different dtypes DataFrame @ Series
a = a.astype(int)
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values),
index=['1', '2', '3'])
assert_series_equal(result, expected)
pytest.raises(Exception, a.dot, a.values[:3])
pytest.raises(ValueError, a.dot, b.T)
def test_value_counts_nunique(self):
# basics.rst doc example
series = Series(np.random.randn(500))
series[20:500] = np.nan
series[10:20] = 5000
result = series.nunique()
assert result == 11
# GH 18051
s = pd.Series(pd.Categorical([]))
assert s.nunique() == 0
s = pd.Series(pd.Categorical([np.nan]))
assert s.nunique() == 0
def test_unique(self):
# 714 also, dtype=float
s = Series([1.2345] * 100)
s[::2] = np.nan
result = s.unique()
assert len(result) == 2
s = Series([1.2345] * 100, dtype='f4')
s[::2] = np.nan
result = s.unique()
assert len(result) == 2
# NAs in object arrays #714
s = Series(['foo'] * 100, dtype='O')
s[::2] = np.nan
result = s.unique()
assert len(result) == 2
# decision about None
s = Series([1, 2, 3, None, None, None], dtype=object)
result = s.unique()
expected = np.array([1, 2, 3, None], dtype=object)
tm.assert_numpy_array_equal(result, expected)
# GH 18051
s = pd.Series(pd.Categorical([]))
tm.assert_categorical_equal(s.unique(), pd.Categorical([]),
check_dtype=False)
s = pd.Series(pd.Categorical([np.nan]))
tm.assert_categorical_equal(s.unique(), pd.Categorical([np.nan]),
check_dtype=False)
@pytest.mark.parametrize(
"tc1, tc2",
[
(
Series([1, 2, 3, 3], dtype=np.dtype('int_')),
Series([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype('int_'))
),
(
Series([1, 2, 3, 3], dtype=np.dtype('uint')),
Series([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype('uint'))
),
(
Series([1, 2, 3, 3], dtype=np.dtype('float_')),
Series([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype('float_'))
),
(
Series([1, 2, 3, 3], dtype=np.dtype('unicode_')),
Series([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype('unicode_'))
)
]
)
def test_drop_duplicates_non_bool(self, tc1, tc2):
# Test case 1
expected = Series([False, False, False, True])
assert_series_equal(tc1.duplicated(), expected)
assert_series_equal(tc1.drop_duplicates(), tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(inplace=True)
assert_series_equal(sc, tc1[~expected])
expected = Series([False, False, True, False])
assert_series_equal(tc1.duplicated(keep='last'), expected)
assert_series_equal(tc1.drop_duplicates(keep='last'), tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(keep='last', inplace=True)
assert_series_equal(sc, tc1[~expected])
expected = Series([False, False, True, True])
assert_series_equal(tc1.duplicated(keep=False), expected)
assert_series_equal(tc1.drop_duplicates(keep=False), tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(keep=False, inplace=True)
assert_series_equal(sc, tc1[~expected])
# Test case 2
expected = Series([False, False, False, False, True, True, False])
assert_series_equal(tc2.duplicated(), expected)
assert_series_equal(tc2.drop_duplicates(), tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(inplace=True)
assert_series_equal(sc, tc2[~expected])
expected = Series([False, True, True, False, False, False, False])
assert_series_equal(tc2.duplicated(keep='last'), expected)
assert_series_equal(tc2.drop_duplicates(keep='last'), tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(keep='last', inplace=True)
assert_series_equal(sc, tc2[~expected])
expected = Series([False, True, True, False, True, True, False])
assert_series_equal(tc2.duplicated(keep=False), expected)
assert_series_equal(tc2.drop_duplicates(keep=False), tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(keep=False, inplace=True)
assert_series_equal(sc, tc2[~expected])
def test_drop_duplicates_bool(self):
tc = Series([True, False, True, False])
expected = Series([False, False, True, True])
assert_series_equal(tc.duplicated(), expected)
assert_series_equal(tc.drop_duplicates(), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(inplace=True)
assert_series_equal(sc, tc[~expected])
expected = Series([True, True, False, False])
assert_series_equal(tc.duplicated(keep='last'), expected)
assert_series_equal(tc.drop_duplicates(keep='last'), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(keep='last', inplace=True)
assert_series_equal(sc, tc[~expected])
expected = Series([True, True, True, True])
assert_series_equal(tc.duplicated(keep=False), expected)
assert_series_equal(tc.drop_duplicates(keep=False), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(keep=False, inplace=True)
assert_series_equal(sc, tc[~expected])
def test_clip(self):
val = self.ts.median()
assert self.ts.clip_lower(val).min() == val
assert self.ts.clip_upper(val).max() == val
assert self.ts.clip(lower=val).min() == val
assert self.ts.clip(upper=val).max() == val
result = self.ts.clip(-0.5, 0.5)
expected = np.clip(self.ts, -0.5, 0.5)
assert_series_equal(result, expected)
assert isinstance(expected, Series)
def test_clip_types_and_nulls(self):
sers = [Series([np.nan, 1.0, 2.0, 3.0]), Series([None, 'a', 'b', 'c']),
Series(pd.to_datetime(
[np.nan, 1, 2, 3], unit='D'))]
for s in sers:
thresh = s[2]
l = s.clip_lower(thresh)
u = s.clip_upper(thresh)
assert l[notna(l)].min() == thresh
assert u[notna(u)].max() == thresh
assert list(isna(s)) == list(isna(l))
assert list(isna(s)) == list(isna(u))
def test_clip_with_na_args(self):
"""Should process np.nan argument as None """
# GH # 17276
s = Series([1, 2, 3])
assert_series_equal(s.clip(np.nan), Series([1, 2, 3]))
assert_series_equal(s.clip(upper=[1, 1, np.nan]), Series([1, 2, 3]))
assert_series_equal(s.clip(lower=[1, np.nan, 1]), Series([1, 2, 3]))
assert_series_equal(s.clip(upper=np.nan, lower=np.nan),
Series([1, 2, 3]))
def test_clip_against_series(self):
# GH #6966
s = Series([1.0, 1.0, 4.0])
threshold = Series([1.0, 2.0, 3.0])
assert_series_equal(s.clip_lower(threshold), Series([1.0, 2.0, 4.0]))
assert_series_equal(s.clip_upper(threshold), Series([1.0, 1.0, 3.0]))
lower = Series([1.0, 2.0, 3.0])
upper = Series([1.5, 2.5, 3.5])
assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))
assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("upper", [[1, 2, 3], np.asarray([1, 2, 3])])
def test_clip_against_list_like(self, inplace, upper):
# GH #15390
original = pd.Series([5, 6, 7])
result = original.clip(upper=upper, inplace=inplace)
expected = pd.Series([1, 2, 3])
if inplace:
result = original
tm.assert_series_equal(result, expected, check_exact=True)
def test_clip_with_datetimes(self):
# GH 11838
# naive and tz-aware datetimes
t = Timestamp('2015-12-01 09:30:30')
s = Series([Timestamp('2015-12-01 09:30:00'),
Timestamp('2015-12-01 09:31:00')])
result = s.clip(upper=t)
expected = Series([Timestamp('2015-12-01 09:30:00'),
Timestamp('2015-12-01 09:30:30')])
assert_series_equal(result, expected)
t = Timestamp('2015-12-01 09:30:30', tz='US/Eastern')
s = Series([Timestamp('2015-12-01 09:30:00', tz='US/Eastern'),
Timestamp('2015-12-01 09:31:00', tz='US/Eastern')])
result = s.clip(upper=t)
expected = Series([Timestamp('2015-12-01 09:30:00', tz='US/Eastern'),
Timestamp('2015-12-01 09:30:30', tz='US/Eastern')])
assert_series_equal(result, expected)
def test_cummethods_bool(self):
# GH 6270
# looks like a buggy np.maximum.accumulate for numpy 1.6.1, py 3.2
def cummin(x):
return np.minimum.accumulate(x)
def cummax(x):
return np.maximum.accumulate(x)
a = pd.Series([False, False, False, True, True, False, False])
b = ~a
c = pd.Series([False] * len(b))
d = ~c
methods = {'cumsum': np.cumsum,
'cumprod': np.cumprod,
'cummin': cummin,
'cummax': cummax}
args = product((a, b, c, d), methods)
for s, method in args:
expected = Series(methods[method](s.values))
result = getattr(s, method)()
assert_series_equal(result, expected)
e = pd.Series([False, True, nan, False])
cse = pd.Series([0, 1, nan, 1], dtype=object)
cpe = pd.Series([False, 0, nan, 0])
cmin = pd.Series([False, False, nan, False])
cmax = pd.Series([False, True, nan, True])
expecteds = {'cumsum': cse,
'cumprod': cpe,
'cummin': cmin,
'cummax': cmax}
for method in methods:
res = getattr(e, method)()
assert_series_equal(res, expecteds[method])
def test_isin(self):
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
result = s.isin(['A', 'C'])
expected = Series([True, False, True, False, False, False, True, True])
assert_series_equal(result, expected)
# GH: 16012
# This specific issue has to have a series over 1e6 in len, but the
# comparison array (in_list) must be large enough so that numpy doesn't
# do a manual masking trick that will avoid this issue altogether
s = Series(list('abcdefghijk' * 10 ** 5))
# If numpy doesn't do the manual comparison/mask, these
# unorderable mixed types are what cause the exception in numpy
in_list = [-1, 'a', 'b', 'G', 'Y', 'Z', 'E',
'K', 'E', 'S', 'I', 'R', 'R'] * 6
assert s.isin(in_list).sum() == 200000
def test_isin_with_string_scalar(self):
# GH4763
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
with pytest.raises(TypeError):
s.isin('a')
with pytest.raises(TypeError):
s = Series(['aaa', 'b', 'c'])
s.isin('aaa')
def test_isin_with_i8(self):
# GH 5021
expected = Series([True, True, False, False, False])
expected2 = Series([False, True, False, False, False])
# datetime64[ns]
s = Series(date_range('jan-01-2013', 'jan-05-2013'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
result = s.isin(s[0:2].values)
assert_series_equal(result, expected)
# fails on dtype conversion in the first place
result = s.isin(s[0:2].values.astype('datetime64[D]'))
assert_series_equal(result, expected)
result = s.isin([s[1]])
assert_series_equal(result, expected2)
result = s.isin([np.datetime64(s[1])])
assert_series_equal(result, expected2)
result = s.isin(set(s[0:2]))
assert_series_equal(result, expected)
# timedelta64[ns]
s = Series(pd.to_timedelta(lrange(5), unit='d'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# see gh-16991
s = Series(["a", "b"])
expected = Series([False, False])
result = s.isin(empty)
tm.assert_series_equal(expected, result)
def test_timedelta64_analytics(self):
from pandas import date_range
# index min/max
td = Series(date_range('2012-1-1', periods=3, freq='D')) - \
Timestamp('20120101')
result = td.idxmin()
assert result == 0
result = td.idxmax()
assert result == 2
# GH 2982
# with NaT
td[0] = np.nan
result = td.idxmin()
assert result == 1
result = td.idxmax()
assert result == 2
# abs
s1 = Series(date_range('20120101', periods=3))
s2 = Series(date_range('20120102', periods=3))
expected = Series(s2 - s1)
# this fails as numpy returns timedelta64[us]
# result = np.abs(s1-s2)
# assert_frame_equal(result,expected)
result = (s1 - s2).abs()
assert_series_equal(result, expected)
# max/min
result = td.max()
expected = Timedelta('2 days')
assert result == expected
result = td.min()
expected = Timedelta('1 days')
assert result == expected
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isna check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
assert self.series[self.series.idxmin()] == self.series.min()
assert isna(self.series.idxmin(skipna=False))
# no NaNs
nona = self.series.dropna()
assert nona[nona.idxmin()] == nona.min()
assert (nona.index.values.tolist().index(nona.idxmin()) ==
nona.values.argmin())
# all NaNs
allna = self.series * nan
assert isna(allna.idxmin())
# datetime64[ns]
from pandas import date_range
s = Series(date_range('20130102', periods=6))
result = s.idxmin()
assert result == 0
s[0] = np.nan
result = s.idxmin()
assert result == 1
def test_numpy_argmin_deprecated(self):
# See gh-16830
data = np.arange(1, 11)
s = Series(data, index=data)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# The deprecation of Series.argmin also causes a deprecation
# warning when calling np.argmin. This behavior is temporary
# until the implementation of Series.argmin is corrected.
result = np.argmin(s)
assert result == 1
with tm.assert_produces_warning(FutureWarning):
# argmin is aliased to idxmin
result = s.argmin()
assert result == 1
if not _np_version_under1p10:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.argmin,
s, out=data)
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isna check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
assert self.series[self.series.idxmax()] == self.series.max()
assert isna(self.series.idxmax(skipna=False))
# no NaNs
nona = self.series.dropna()
assert nona[nona.idxmax()] == nona.max()
assert (nona.index.values.tolist().index(nona.idxmax()) ==
nona.values.argmax())
# all NaNs
allna = self.series * nan
assert isna(allna.idxmax())
from pandas import date_range
s = Series(date_range('20130102', periods=6))
result = s.idxmax()
assert result == 5
s[5] = np.nan
result = s.idxmax()
assert result == 4
# Float64Index
# GH 5914
s = pd.Series([1, 2, 3], [1.1, 2.1, 3.1])
result = s.idxmax()
assert result == 3.1
result = s.idxmin()
assert result == 1.1
s = pd.Series(s.index, s.index)
result = s.idxmax()
assert result == 3.1
result = s.idxmin()
assert result == 1.1
def test_numpy_argmax_deprecated(self):
# See gh-16830
data = np.arange(1, 11)
s = Series(data, index=data)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# The deprecation of Series.argmax also causes a deprecation
# warning when calling np.argmax. This behavior is temporary
# until the implementation of Series.argmax is corrected.
result = np.argmax(s)
assert result == 10
with tm.assert_produces_warning(FutureWarning):
# argmax is aliased to idxmax
result = s.argmax()
assert result == 10
if not _np_version_under1p10:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.argmax,
s, out=data)
def test_ptp(self):
N = 1000
arr = np.random.randn(N)
ser = Series(arr)
assert np.ptp(ser) == np.ptp(arr)
# GH11163
s = Series([3, 5, np.nan, -3, 10])
assert s.ptp() == 13
assert pd.isna(s.ptp(skipna=False))
mi = pd.MultiIndex.from_product([['a', 'b'], [1, 2, 3]])
s = pd.Series([1, np.nan, 7, 3, 5, np.nan], index=mi)
expected = pd.Series([6, 2], index=['a', 'b'], dtype=np.float64)
tm.assert_series_equal(s.ptp(level=0), expected)
expected = pd.Series([np.nan, np.nan], index=['a', 'b'])
tm.assert_series_equal(s.ptp(level=0, skipna=False), expected)
with pytest.raises(ValueError):
s.ptp(axis=1)
s = pd.Series(['a', 'b', 'c', 'd', 'e'])
with pytest.raises(TypeError):
s.ptp()
with pytest.raises(NotImplementedError):
s.ptp(numeric_only=True)
def test_empty_timeseries_redections_return_nat(self):
# covers #11245
for dtype in ('m8[ns]', 'm8[ns]', 'M8[ns]', 'M8[ns, UTC]'):
assert Series([], dtype=dtype).min() is pd.NaT
assert Series([], dtype=dtype).max() is pd.NaT
def test_unique_data_ownership(self):
# it works! #1807
Series(Series(["a", "c", "b"]).unique()).sort_values()
def test_repeat(self):
s = Series(np.random.randn(3), index=['a', 'b', 'c'])
reps = s.repeat(5)
exp = Series(s.values.repeat(5), index=s.index.values.repeat(5))
assert_series_equal(reps, exp)
with tm.assert_produces_warning(FutureWarning):
result = s.repeat(reps=5)
assert_series_equal(result, exp)
to_rep = [2, 3, 4]
reps = s.repeat(to_rep)
exp = Series(s.values.repeat(to_rep),
index=s.index.values.repeat(to_rep))
assert_series_equal(reps, exp)
def test_numpy_repeat(self):
s = Series(np.arange(3), name='x')
expected = Series(s.values.repeat(2), name='x',
index=s.index.values.repeat(2))
assert_series_equal(np.repeat(s, 2), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.repeat, s, 2, axis=0)
def test_searchsorted(self):
s = Series([1, 2, 3])
idx = s.searchsorted(1, side='left')
tm.assert_numpy_array_equal(idx, np.array([0], dtype=np.intp))
idx = s.searchsorted(1, side='right')
tm.assert_numpy_array_equal(idx, np.array([1], dtype=np.intp))
with tm.assert_produces_warning(FutureWarning):
idx = s.searchsorted(v=1, side='left')
tm.assert_numpy_array_equal(idx, np.array([0], dtype=np.intp))
def test_searchsorted_numeric_dtypes_scalar(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted(30)
e = 2
assert r == e
r = s.searchsorted([30])
e = np.array([2], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_searchsorted_numeric_dtypes_vector(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted([91, 2e6])
e = np.array([3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_search_sorted_datetime64_scalar(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
v = pd.Timestamp('20120102')
r = s.searchsorted(v)
e = 1
assert r == e
def test_search_sorted_datetime64_list(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
v = [pd.Timestamp('20120102'), pd.Timestamp('20120104')]
r = s.searchsorted(v)
e = np.array([1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_searchsorted_sorter(self):
# GH8490
s = Series([3, 1, 2])
r = s.searchsorted([0, 3], sorter=np.argsort(s))
e = np.array([0, 2], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_is_unique(self):
# GH11946
s = Series(np.random.randint(0, 10, size=1000))
assert not s.is_unique
s = Series(np.arange(1000))
assert s.is_unique
def test_is_unique_class_ne(self, capsys):
# GH 20661
class Foo(object):
def __init__(self, val):
self._value = val
def __ne__(self, other):
raise Exception("NEQ not supported")
li = [Foo(i) for i in range(5)]
s = pd.Series(li, index=[i for i in range(5)])
_, err = capsys.readouterr()
s.is_unique
_, err = capsys.readouterr()
assert len(err) == 0
def test_is_monotonic(self):
s = Series(np.random.randint(0, 10, size=1000))
assert not s.is_monotonic
s = Series(np.arange(1000))
assert s.is_monotonic
assert s.is_monotonic_increasing
s = Series(np.arange(1000, 0, -1))
assert s.is_monotonic_decreasing
s = Series(pd.date_range('20130101', periods=10))
assert s.is_monotonic
assert s.is_monotonic_increasing
s = Series(list(reversed(s.tolist())))
assert not s.is_monotonic
assert s.is_monotonic_decreasing
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
res = s.sort_index(level='A')
assert_series_equal(backwards, res)
res = s.sort_index(level=['A', 'B'])
assert_series_equal(backwards, res)
res = s.sort_index(level='A', sort_remaining=False)
assert_series_equal(s, res)
res = s.sort_index(level=['A', 'B'], sort_remaining=False)
assert_series_equal(s, res)
def test_apply_categorical(self):
values = pd.Categorical(list('ABBABCD'), categories=list('DCBA'),
ordered=True)
s = pd.Series(values, name='XX', index=list('abcdefg'))
result = s.apply(lambda x: x.lower())
# should be categorical dtype when the number of categories are
# the same
values = pd.Categorical(list('abbabcd'), categories=list('dcba'),
ordered=True)
exp = pd.Series(values, name='XX', index=list('abcdefg'))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp.values)
result = s.apply(lambda x: 'A')
exp = pd.Series(['A'] * 7, name='XX', index=list('abcdefg'))
tm.assert_series_equal(result, exp)
assert result.dtype == np.object
def test_shift_int(self):
ts = self.ts.astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
assert_series_equal(shifted, expected)
def test_shift_categorical(self):
# GH 9416
s = pd.Series(['a', 'b', 'c', 'd'], dtype='category')
assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).dropna())
sp1 = s.shift(1)
assert_index_equal(s.index, sp1.index)
assert np.all(sp1.values.codes[:1] == -1)
assert np.all(s.values.codes[:-1] == sp1.values.codes[1:])
sn2 = s.shift(-2)
assert_index_equal(s.index, sn2.index)
assert np.all(sn2.values.codes[-2:] == -1)
assert np.all(s.values.codes[2:] == sn2.values.codes[:-2])
assert_index_equal(s.values.categories, sp1.values.categories)
assert_index_equal(s.values.categories, sn2.values.categories)
def test_unstack(self):
from numpy import nan
index = MultiIndex(levels=[['bar', 'foo'], ['one', 'three', 'two']],
labels=[[1, 1, 0, 0], [0, 1, 0, 2]])
s = Series(np.arange(4.), index=index)
unstacked = s.unstack()
expected = DataFrame([[2., nan, 3.], [0., 1., nan]],
index=['bar', 'foo'],
columns=['one', 'three', 'two'])
assert_frame_equal(unstacked, expected)
unstacked = s.unstack(level=0)
assert_frame_equal(unstacked, expected.T)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
exp_index = MultiIndex(levels=[['one', 'two', 'three'], [0, 1]],
labels=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]])
expected = DataFrame({'bar': s.values},
index=exp_index).sort_index(level=0)
unstacked = s.unstack(0).sort_index()
assert_frame_equal(unstacked, expected)
# GH5873
idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]])
ts = pd.Series([1, 2], index=idx)
left = ts.unstack()
right = DataFrame([[nan, 1], [2, nan]], index=[101, 102],
columns=[nan, 3.5])
assert_frame_equal(left, right)
idx = pd.MultiIndex.from_arrays([['cat', 'cat', 'cat', 'dog', 'dog'
], ['a', 'a', 'b', 'a', 'b'],
[1, 2, 1, 1, np.nan]])
ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx)
right = DataFrame([[1.0, 1.3], [1.1, nan], [nan, 1.4], [1.2, nan]],
columns=['cat', 'dog'])
tpls = [('a', 1), ('a', 2), ('b', nan), ('b', 1)]
right.index = pd.MultiIndex.from_tuples(tpls)
assert_frame_equal(ts.unstack(level=0), right)
def test_value_counts_datetime(self):
# most dtypes are tested in test_base.py
values = [pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 10:00'),
pd.Timestamp('2011-01-01 11:00'),
pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 11:00')]
exp_idx = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 11:00',
'2011-01-01 10:00'])
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check DatetimeIndex outputs the same result
idx = pd.DatetimeIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_datetime_tz(self):
values = [pd.Timestamp('2011-01-01 09:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 11:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 09:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 09:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 11:00', tz='US/Eastern')]
exp_idx = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 11:00',
'2011-01-01 10:00'], tz='US/Eastern')
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
idx = pd.DatetimeIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_period(self):
values = [pd.Period('2011-01', freq='M'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-01', freq='M'),
pd.Period('2011-01', freq='M'),
pd.Period('2011-03', freq='M')]
exp_idx = pd.PeriodIndex(['2011-01', '2011-03', '2011-02'], freq='M')
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check DatetimeIndex outputs the same result
idx = pd.PeriodIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_categorical_ordered(self):
# most dtypes are tested in test_base.py
values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=True)
exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3],
ordered=True)
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check CategoricalIndex outputs the same result
idx = pd.CategoricalIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_categorical_not_ordered(self):
values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=False)
exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3],
ordered=False)
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check CategoricalIndex outputs the same result
idx = pd.CategoricalIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
@pytest.fixture
def s_main_dtypes():
df = pd.DataFrame(
{'datetime': pd.to_datetime(['2003', '2002',
'2001', '2002',
'2005']),
'datetimetz': pd.to_datetime(
['2003', '2002',
'2001', '2002',
'2005']).tz_localize('US/Eastern'),
'timedelta': pd.to_timedelta(['3d', '2d', '1d',
'2d', '5d'])})
for dtype in ['int8', 'int16', 'int32', 'int64',
'float32', 'float64',
'uint8', 'uint16', 'uint32', 'uint64']:
df[dtype] = Series([3, 2, 1, 2, 5], dtype=dtype)
return df
class TestMode(object):
@pytest.mark.parametrize('dropna, expected', [
(True, Series([], dtype=np.float64)),
(False, Series([], dtype=np.float64))
])
def test_mode_empty(self, dropna, expected):
s = Series([], dtype=np.float64)
result = s.mode(dropna)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('dropna, data, expected', [
(True, [1, 1, 1, 2], [1]),
(True, [1, 1, 1, 2, 3, 3, 3], [1, 3]),
(False, [1, 1, 1, 2], [1]),
(False, [1, 1, 1, 2, 3, 3, 3], [1, 3]),
])
@pytest.mark.parametrize(
'dt',
list(np.typecodes['AllInteger'] + np.typecodes['Float'])
)
def test_mode_numerical(self, dropna, data, expected, dt):
s = Series(data, dtype=dt)
result = s.mode(dropna)
expected = Series(expected, dtype=dt)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('dropna, expected', [
(True, [1.0]),
(False, [1, np.nan]),
])
def test_mode_numerical_nan(self, dropna, expected):
s = Series([1, 1, 2, np.nan, np.nan])
result = s.mode(dropna)
expected = Series(expected)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('dropna, expected1, expected2, expected3', [
(True, ['b'], ['bar'], ['nan']),
(False, ['b'], [np.nan], ['nan'])
])
def test_mode_str_obj(self, dropna, expected1, expected2, expected3):
# Test string and object types.
data = ['a'] * 2 + ['b'] * 3
s = Series(data, dtype='c')
result = s.mode(dropna)
expected1 = Series(expected1, dtype='c')
tm.assert_series_equal(result, expected1)
data = ['foo', 'bar', 'bar', np.nan, np.nan, np.nan]
s = Series(data, dtype=object)
result = s.mode(dropna)
expected2 = Series(expected2, dtype=object)
tm.assert_series_equal(result, expected2)
data = ['foo', 'bar', 'bar', np.nan, np.nan, np.nan]
s = Series(data, dtype=object).astype(str)
result = s.mode(dropna)
expected3 = Series(expected3, dtype=str)
tm.assert_series_equal(result, expected3)
@pytest.mark.parametrize('dropna, expected1, expected2', [
(True, ['foo'], ['foo']),
(False, ['foo'], [np.nan])
])
def test_mode_mixeddtype(self, dropna, expected1, expected2):
s = Series([1, 'foo', 'foo'])
result = s.mode(dropna)
expected = Series(expected1)
tm.assert_series_equal(result, expected)
s = Series([1, 'foo', 'foo', np.nan, np.nan, np.nan])
result = s.mode(dropna)
expected = Series(expected2, dtype=object)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('dropna, expected1, expected2', [
(True, ['1900-05-03', '2011-01-03', '2013-01-02'],
['2011-01-03', '2013-01-02']),
(False, [np.nan], [np.nan, '2011-01-03', '2013-01-02']),
])
def test_mode_datetime(self, dropna, expected1, expected2):
s = Series(['2011-01-03', '2013-01-02',
'1900-05-03', 'nan', 'nan'], dtype='M8[ns]')
result = s.mode(dropna)
expected1 = Series(expected1, dtype='M8[ns]')
tm.assert_series_equal(result, expected1)
s = Series(['2011-01-03', '2013-01-02', '1900-05-03',
'2011-01-03', '2013-01-02', 'nan', 'nan'],
dtype='M8[ns]')
result = s.mode(dropna)
expected2 = Series(expected2, dtype='M8[ns]')
tm.assert_series_equal(result, expected2)
@pytest.mark.parametrize('dropna, expected1, expected2', [
(True, ['-1 days', '0 days', '1 days'], ['2 min', '1 day']),
(False, [np.nan], [np.nan, '2 min', '1 day']),
])
def test_mode_timedelta(self, dropna, expected1, expected2):
# gh-5986: Test timedelta types.
s = Series(['1 days', '-1 days', '0 days', 'nan', 'nan'],
dtype='timedelta64[ns]')
result = s.mode(dropna)
expected1 = Series(expected1, dtype='timedelta64[ns]')
tm.assert_series_equal(result, expected1)
s = Series(['1 day', '1 day', '-1 day', '-1 day 2 min',
'2 min', '2 min', 'nan', 'nan'],
dtype='timedelta64[ns]')
result = s.mode(dropna)
expected2 = Series(expected2, dtype='timedelta64[ns]')
tm.assert_series_equal(result, expected2)
@pytest.mark.parametrize('dropna, expected1, expected2, expected3', [
(True, Categorical([1, 2], categories=[1, 2]),
Categorical(['a'], categories=[1, 'a']),
Categorical([3, 1], categories=[3, 2, 1], ordered=True)),
(False, Categorical([np.nan], categories=[1, 2]),
Categorical([np.nan, 'a'], categories=[1, 'a']),
Categorical([np.nan, 3, 1], categories=[3, 2, 1], ordered=True)),
])
def test_mode_category(self, dropna, expected1, expected2, expected3):
s = Series(Categorical([1, 2, np.nan, np.nan]))
result = s.mode(dropna)
expected1 = Series(expected1, dtype='category')
tm.assert_series_equal(result, expected1)
s = Series(Categorical([1, 'a', 'a', np.nan, np.nan]))
result = s.mode(dropna)
expected2 = Series(expected2, dtype='category')
tm.assert_series_equal(result, expected2)
s = Series(Categorical([1, 1, 2, 3, 3, np.nan, np.nan],
categories=[3, 2, 1], ordered=True))
result = s.mode(dropna)
expected3 = Series(expected3, dtype='category')
tm.assert_series_equal(result, expected3)
@pytest.mark.parametrize('dropna, expected1, expected2', [
(True, [2**63], [1, 2**63]),
(False, [2**63], [1, 2**63])
])
def test_mode_intoverflow(self, dropna, expected1, expected2):
# Test for uint64 overflow.
s = Series([1, 2**63, 2**63], dtype=np.uint64)
result = s.mode(dropna)
expected1 = Series(expected1, dtype=np.uint64)
tm.assert_series_equal(result, expected1)
s = Series([1, 2**63], dtype=np.uint64)
result = s.mode(dropna)
expected2 = Series(expected2, dtype=np.uint64)
tm.assert_series_equal(result, expected2)
@pytest.mark.skipif(not compat.PY3, reason="only PY3")
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
expected = Series(['foo', np.nan])
s = Series([1, 'foo', 'foo', np.nan, np.nan])
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = s.mode(dropna=False)
result = result.sort_values().reset_index(drop=True)
tm.assert_series_equal(result, expected)
def assert_check_nselect_boundary(vals, dtype, method):
# helper function for 'test_boundary_{dtype}' tests
s = Series(vals, dtype=dtype)
result = getattr(s, method)(3)
expected_idxr = [0, 1, 2] if method == 'nsmallest' else [3, 2, 1]
expected = s.loc[expected_idxr]
tm.assert_series_equal(result, expected)
class TestNLargestNSmallest(object):
@pytest.mark.parametrize(
"r", [Series([3., 2, 1, 2, '5'], dtype='object'),
Series([3., 2, 1, 2, 5], dtype='object'),
# not supported on some archs
# Series([3., 2, 1, 2, 5], dtype='complex256'),
Series([3., 2, 1, 2, 5], dtype='complex128'),
Series(list('abcde')),
Series(list('abcde'), dtype='category')])
def test_error(self, r):
dt = r.dtype
msg = ("Cannot use method 'n(larg|small)est' with "
"dtype {dt}".format(dt=dt))
args = 2, len(r), 0, -1
methods = r.nlargest, r.nsmallest
for method, arg in product(methods, args):
with tm.assert_raises_regex(TypeError, msg):
method(arg)
@pytest.mark.parametrize(
"s",
[v for k, v in s_main_dtypes().iteritems()])
def test_nsmallest_nlargest(self, s):
# float, int, datetime64 (use i8), timedelts64 (same),
# object that are numbers, object that are strings
assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]])
assert_series_equal(s.nsmallest(2, keep='last'), s.iloc[[2, 3]])
empty = s.iloc[0:0]
assert_series_equal(s.nsmallest(0), empty)
assert_series_equal(s.nsmallest(-1), empty)
assert_series_equal(s.nlargest(0), empty)
assert_series_equal(s.nlargest(-1), empty)
assert_series_equal(s.nsmallest(len(s)), s.sort_values())
assert_series_equal(s.nsmallest(len(s) + 1), s.sort_values())
assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]])
assert_series_equal(s.nlargest(len(s) + 1),
s.iloc[[4, 0, 1, 3, 2]])
def test_misc(self):
s = Series([3., np.nan, 1, 2, 5])
assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]])
assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]])
msg = 'keep must be either "first", "last"'
with tm.assert_raises_regex(ValueError, msg):
s.nsmallest(keep='invalid')
with tm.assert_raises_regex(ValueError, msg):
s.nlargest(keep='invalid')
# GH 15297
s = Series([1] * 5, index=[1, 2, 3, 4, 5])
expected_first = Series([1] * 3, index=[1, 2, 3])
expected_last = Series([1] * 3, index=[5, 4, 3])
result = s.nsmallest(3)
assert_series_equal(result, expected_first)
result = s.nsmallest(3, keep='last')
assert_series_equal(result, expected_last)
result = s.nlargest(3)
assert_series_equal(result, expected_first)
result = s.nlargest(3, keep='last')
assert_series_equal(result, expected_last)
@pytest.mark.parametrize('n', range(1, 5))
def test_n(self, n):
# GH 13412
s = Series([1, 4, 3, 2], index=[0, 0, 1, 1])
result = s.nlargest(n)
expected = s.sort_values(ascending=False).head(n)
assert_series_equal(result, expected)
result = s.nsmallest(n)
expected = s.sort_values().head(n)
assert_series_equal(result, expected)
def test_boundary_integer(self, nselect_method, any_int_dtype):
# GH 21426
dtype_info = np.iinfo(any_int_dtype)
min_val, max_val = dtype_info.min, dtype_info.max
vals = [min_val, min_val + 1, max_val - 1, max_val]
assert_check_nselect_boundary(vals, any_int_dtype, nselect_method)
def test_boundary_float(self, nselect_method, float_dtype):
# GH 21426
dtype_info = np.finfo(float_dtype)
min_val, max_val = dtype_info.min, dtype_info.max
min_2nd, max_2nd = np.nextafter(
[min_val, max_val], 0, dtype=float_dtype)
vals = [min_val, min_2nd, max_2nd, max_val]
assert_check_nselect_boundary(vals, float_dtype, nselect_method)
@pytest.mark.parametrize('dtype', ['datetime64[ns]', 'timedelta64[ns]'])
def test_boundary_datetimelike(self, nselect_method, dtype):
# GH 21426
# use int64 bounds and +1 to min_val since true minimum is NaT
# (include min_val/NaT at end to maintain same expected_idxr)
dtype_info = np.iinfo('int64')
min_val, max_val = dtype_info.min, dtype_info.max
vals = [min_val + 1, min_val + 2, max_val - 1, max_val, min_val]
assert_check_nselect_boundary(vals, dtype, nselect_method)
class TestCategoricalSeriesAnalytics(object):
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
assert result == 2
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
pytest.raises(TypeError, lambda: cat.min())
pytest.raises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
assert _min == "a"
assert _max == "d"
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
assert _min == "d"
assert _max == "a"
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
assert np.isnan(_min)
assert _max == "b"
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
assert np.isnan(_min)
assert _max == 1
def test_value_counts(self):
# GH 12835
cats = Categorical(list('abcccb'), categories=list('cabd'))
s = Series(cats, name='xxx')
res = s.value_counts(sort=False)
exp_index = CategoricalIndex(list('cabd'), categories=cats.categories)
exp = Series([3, 1, 2, 0], name='xxx', index=exp_index)
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp_index = CategoricalIndex(list('cbad'), categories=cats.categories)
exp = Series([3, 2, 1, 0], name='xxx', index=exp_index)
tm.assert_series_equal(res, exp)
# check object dtype handles the Series.name as the same
# (tested in test_base.py)
s = Series(["a", "b", "c", "c", "c", "b"], name='xxx')
res = s.value_counts()
exp = Series([3, 2, 1], name='xxx', index=["c", "b", "a"])
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# see gh-9443
# sanity check
s = Series(["a", "b", "a"], dtype="category")
exp = Series([2, 1], index=CategoricalIndex(["a", "b"]))
res = s.value_counts(dropna=True)
tm.assert_series_equal(res, exp)
res = s.value_counts(dropna=True)
tm.assert_series_equal(res, exp)
# same Series via two different constructions --> same behaviour
series = [
Series(["a", "b", None, "a", None, None], dtype="category"),
Series(Categorical(["a", "b", None, "a", None, None],
categories=["a", "b"]))
]
for s in series:
# None is a NaN value, so we exclude its count here
exp = Series([2, 1], index=CategoricalIndex(["a", "b"]))
res = s.value_counts(dropna=True)
tm.assert_series_equal(res, exp)
# we don't exclude the count of None and sort by counts
exp = Series([3, 2, 1], index=CategoricalIndex([np.nan, "a", "b"]))
res = s.value_counts(dropna=False)
tm.assert_series_equal(res, exp)
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
exp = Series([2, 1, 3], index=CategoricalIndex(["a", "b", np.nan]))
res = s.value_counts(dropna=False, sort=False)
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize(
"dtype",
["int_", "uint", "float_", "unicode_", "timedelta64[h]",
pytest.param("datetime64[D]",
marks=pytest.mark.xfail(reason="issue7996"))]
)
@pytest.mark.parametrize("is_ordered", [True, False])
def test_drop_duplicates_categorical_non_bool(self, dtype, is_ordered):
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
# Test case 1
input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))
tc1 = Series(Categorical(input1, categories=cat_array,
ordered=is_ordered))
expected = Series([False, False, False, True])
tm.assert_series_equal(tc1.duplicated(), expected)
tm.assert_series_equal(tc1.drop_duplicates(), tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(inplace=True)
tm.assert_series_equal(sc, tc1[~expected])
expected = Series([False, False, True, False])
tm.assert_series_equal(tc1.duplicated(keep='last'), expected)
tm.assert_series_equal(tc1.drop_duplicates(keep='last'),
tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(keep='last', inplace=True)
tm.assert_series_equal(sc, tc1[~expected])
expected = Series([False, False, True, True])
tm.assert_series_equal(tc1.duplicated(keep=False), expected)
tm.assert_series_equal(tc1.drop_duplicates(keep=False), tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(keep=False, inplace=True)
tm.assert_series_equal(sc, tc1[~expected])
# Test case 2
input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))
tc2 = Series(Categorical(
input2, categories=cat_array, ordered=is_ordered)
)
expected = Series([False, False, False, False, True, True, False])
tm.assert_series_equal(tc2.duplicated(), expected)
tm.assert_series_equal(tc2.drop_duplicates(), tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(inplace=True)
tm.assert_series_equal(sc, tc2[~expected])
expected = Series([False, True, True, False, False, False, False])
tm.assert_series_equal(tc2.duplicated(keep='last'), expected)
tm.assert_series_equal(tc2.drop_duplicates(keep='last'),
tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(keep='last', inplace=True)
tm.assert_series_equal(sc, tc2[~expected])
expected = Series([False, True, True, False, True, True, False])
tm.assert_series_equal(tc2.duplicated(keep=False), expected)
tm.assert_series_equal(tc2.drop_duplicates(keep=False), tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(keep=False, inplace=True)
tm.assert_series_equal(sc, tc2[~expected])
@pytest.mark.parametrize("is_ordered", [True, False])
def test_drop_duplicates_categorical_bool(self, is_ordered):
tc = Series(Categorical([True, False, True, False],
categories=[True, False], ordered=is_ordered))
expected = Series([False, False, True, True])
tm.assert_series_equal(tc.duplicated(), expected)
tm.assert_series_equal(tc.drop_duplicates(), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(inplace=True)
tm.assert_series_equal(sc, tc[~expected])
expected = Series([True, True, False, False])
tm.assert_series_equal(tc.duplicated(keep='last'), expected)
tm.assert_series_equal(tc.drop_duplicates(keep='last'), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(keep='last', inplace=True)
tm.assert_series_equal(sc, tc[~expected])
expected = Series([True, True, True, True])
tm.assert_series_equal(tc.duplicated(keep=False), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=False), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(keep=False, inplace=True)
tm.assert_series_equal(sc, tc[~expected])
| {
"content_hash": "97756c3ba4f78df10cb1939cfe5e3d4b",
"timestamp": "",
"source": "github",
"line_count": 2256,
"max_line_length": 79,
"avg_line_length": 36.82225177304964,
"alnum_prop": 0.5316416077812955,
"repo_name": "louispotok/pandas",
"id": "b9c7b837b8b817e8bfc1dd84e18c99ee0a88d4c5",
"size": "83121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/series/test_analytics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3847"
},
{
"name": "C",
"bytes": "432930"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "563"
},
{
"name": "PowerShell",
"bytes": "2970"
},
{
"name": "Python",
"bytes": "13452425"
},
{
"name": "Shell",
"bytes": "25056"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
} |
'''This tutorial does NOT require trigonometry, but if you know some,
you can automatically generate some fancier shapes like regular polygons and
stars, as illustrated here, after importing some trig from the math module.
(And you can copy in the part before main and use the functions.)
'''
from graphics import *
from math import pi, sin, cos
toRad = pi/180 # convert degrees to radians
def atAngle(x, y, distance, angleDegrees):
'''return the Point reached by going out from Point(x,y),
the given distance, at the given angle counterclockwise from horizontal.
'''
return Point(x + distance*cos(angleDegrees*toRad),
y + distance*sin(angleDegrees*toRad) )
def regularPolygon(center, radius, sides, rotationDegrees, win):
'''Draw and return a regular polygon in GraphWin win, given the
center Point, the radius from center to vertices,
the number of sides, and degrees of rotation (0 means a flat top).
'''
vertices = []
angle = 90 + 180/sides + rotationDegrees # flat side up with 0 rotation
for i in range(sides):
vertices.append(atAngle(center.getX(), center.getY(), radius, angle))
angle = angle + 360/sides
poly = Polygon(vertices)
poly.draw(win)
return poly
def star(center, radius, points, win):
'''Draw and return a regular points-pointed star in GraphWin win, given the
center Point, the radius from center to points.
'''
vertices = []
x = center.getX()
y = center.getY()
angle = 90 # start up
angleDiff = 180/points # radial angle between points
angPoint = (2 - (points % 2))*angleDiff/2 # 2 vert apart for even, one odd
innerRadius = (radius*sin(.5*angPoint*toRad)/ # using trig law of sines
sin(.5*(angleDiff+angPoint)*toRad) )
for i in range(points):
vertices.append(atAngle(x, y, radius, angle))
angle = angle + angleDiff
vertices.append(atAngle(x, y, innerRadius, angle))
angle = angle + angleDiff
poly = Polygon(vertices)
poly.draw(win)
return poly
def main():
win = GraphWin('Regular Polygons', 400, 300)
win.yUp() # make right side up coordinates!
regularPolygon(Point(40, 240), 30, 4, 45, win) #square rotated to diamond.
c8 = Point(250, 170)
p = regularPolygon(c8, 100, 8, 0, win)
p.setWidth(5)
p = star(c8, 80, 8, win)
p.setOutline('red')
c5 = Point(100, 80)
p = regularPolygon(c5, 70, 5, 36, win) # 36: vertex up
p.setFill('blue')
p = regularPolygon(c5, 50, 5, 0, win)
p.setFill('green')
p = star(c5, 40, 5, win)
p.setFill('yellow')
win.promptClose(240, 40)
main()
| {
"content_hash": "dbbf01ca04a6ecdcb6d00324df211400",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 79,
"avg_line_length": 35.01298701298701,
"alnum_prop": 0.6450296735905044,
"repo_name": "hwheeler01/comp150",
"id": "6fce9b37e3985952abb3be5acaca7285572e9581",
"size": "2696",
"binary": false,
"copies": "2",
"ref": "refs/heads/gh-pages",
"path": "examples/polygons.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "11466"
},
{
"name": "Batchfile",
"bytes": "28"
},
{
"name": "CSS",
"bytes": "121532"
},
{
"name": "HTML",
"bytes": "5858311"
},
{
"name": "JavaScript",
"bytes": "524"
},
{
"name": "Jupyter Notebook",
"bytes": "6422478"
},
{
"name": "Python",
"bytes": "365319"
}
],
"symlink_target": ""
} |
"""This pip smoke test verifies dependency files exist in the pip package.
This script runs bazel queries to see what python files are required by the
tests and ensures they are in the pip package superset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import subprocess
os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../..")))
PIP_PACKAGE_QUERY_EXPRESSION = (
"deps(//tensorflow/tools/pip_package:build_pip_package)")
def GetBuild(dir_base):
"""Get the list of BUILD file all targets recursively startind at dir_base."""
items = []
for root, _, files in os.walk(dir_base):
for name in files:
if (name == "BUILD" and
root.find("tensorflow/contrib/lite/examples/android") == -1):
items.append("//" + root + ":all")
return items
def BuildPyTestDependencies():
python_targets = GetBuild("tensorflow/python")
contrib_targets = GetBuild("tensorflow/contrib")
tensorboard_targets = GetBuild("tensorflow/contrib/tensorboard")
tensorflow_targets = GetBuild("tensorflow")
# Build list of test targets,
# python + contrib - tensorboard - attr(manual|pno_pip)
targets = " + ".join(python_targets)
for t in contrib_targets:
targets += " + " + t
for t in tensorboard_targets:
targets += " - " + t
targets += ' - attr(tags, "manual|no_pip", %s)' % " + ".join(
tensorflow_targets)
query_kind = "kind(py_test, %s)" % targets
# Skip benchmarks etc.
query_filter = 'filter("^((?!benchmark).)*$", %s)' % query_kind
# Get the dependencies
query_deps = "deps(%s, 1)" % query_filter
return python_targets, query_deps
PYTHON_TARGETS, PY_TEST_QUERY_EXPRESSION = BuildPyTestDependencies()
# Hard-coded blacklist of files if not included in pip package
# TODO(amitpatankar): Clean up blacklist.
BLACKLIST = [
"//tensorflow/python:extra_py_tests_deps",
"//tensorflow/cc/saved_model:saved_model_half_plus_two",
"//tensorflow:no_tensorflow_py_deps",
"//tensorflow/tools/pip_package:win_pip_package_marker",
"//tensorflow/python:test_ops_2",
"//tensorflow/python:tf_optimizer",
"//tensorflow/python:compare_test_proto_py",
"//tensorflow/core:image_testdata",
"//tensorflow/core:lmdb_testdata",
"//tensorflow/core/kernels/cloud:bigquery_reader_ops",
"//tensorflow/python/feature_column:vocabulary_testdata",
"//tensorflow/python:framework/test_file_system.so",
# contrib
"//tensorflow/contrib/session_bundle:session_bundle_half_plus_two",
"//tensorflow/contrib/keras:testing_utils",
"//tensorflow/contrib/lite/experimental/examples/lstm:tflite_lstm",
"//tensorflow/contrib/lite/experimental/examples/lstm:tflite_lstm.py",
"//tensorflow/contrib/lite/experimental/examples/lstm:unidirectional_sequence_lstm_test", # pylint:disable=line-too-long
"//tensorflow/contrib/lite/experimental/examples/lstm:unidirectional_sequence_lstm_test.py", # pylint:disable=line-too-long
"//tensorflow/contrib/lite/python:interpreter",
"//tensorflow/contrib/lite/python:interpreter_test",
"//tensorflow/contrib/lite/python:interpreter.py",
"//tensorflow/contrib/lite/python:interpreter_test.py",
"//tensorflow/contrib/ffmpeg:test_data",
"//tensorflow/contrib/fused_conv:fused_conv2d_bias_activation_op_test_base",
"//tensorflow/contrib/hadoop:test_data",
"//tensorflow/contrib/factorization/examples:mnist",
"//tensorflow/contrib/factorization/examples:mnist.py",
"//tensorflow/contrib/factorization:factorization_py_CYCLIC_DEPENDENCIES_THAT_NEED_TO_GO", # pylint:disable=line-too-long
"//tensorflow/contrib/framework:checkpoint_ops_testdata",
"//tensorflow/contrib/bayesflow:reinforce_simple_example",
"//tensorflow/contrib/bayesflow:examples/reinforce_simple/reinforce_simple_example.py", # pylint:disable=line-too-long
"//tensorflow/contrib/timeseries/examples:predict",
"//tensorflow/contrib/timeseries/examples:multivariate",
"//tensorflow/contrib/timeseries/examples:known_anomaly",
"//tensorflow/contrib/timeseries/examples:data/period_trend.csv", # pylint:disable=line-too-long
"//tensorflow/contrib/timeseries/python/timeseries:test_utils",
"//tensorflow/contrib/timeseries/python/timeseries/state_space_models:test_utils", # pylint:disable=line-too-long
"//tensorflow/contrib/image:sparse_image_warp_test_data",
]
def main():
"""This script runs the pip smoke test.
Raises:
RuntimeError: If any dependencies for py_tests exist in subSet
Prerequisites:
1. Bazel is installed.
2. Running in github repo of tensorflow.
3. Configure has been run.
"""
# pip_package_dependencies_list is the list of included files in pip packages
pip_package_dependencies = subprocess.check_output(
["bazel", "cquery", PIP_PACKAGE_QUERY_EXPRESSION])
pip_package_dependencies_list = pip_package_dependencies.strip().split("\n")
pip_package_dependencies_list = [
x.split()[0] for x in pip_package_dependencies_list
]
print("Pip package superset size: %d" % len(pip_package_dependencies_list))
# tf_py_test_dependencies is the list of dependencies for all python
# tests in tensorflow
tf_py_test_dependencies = subprocess.check_output(
["bazel", "cquery", PY_TEST_QUERY_EXPRESSION])
tf_py_test_dependencies_list = tf_py_test_dependencies.strip().split("\n")
tf_py_test_dependencies_list = [
x.split()[0] for x in tf_py_test_dependencies.strip().split("\n")
]
print("Pytest dependency subset size: %d" % len(tf_py_test_dependencies_list))
missing_dependencies = []
# File extensions and endings to ignore
ignore_extensions = ["_test", "_test.py"]
ignored_files = 0
blacklisted_files = len(BLACKLIST)
# Compare dependencies
for dependency in tf_py_test_dependencies_list:
if dependency and dependency.startswith("//tensorflow"):
ignore = False
# Ignore extensions
if any(dependency.endswith(ext) for ext in ignore_extensions):
ignore = True
ignored_files += 1
# Check if the dependency is in the pip package, the blacklist, or
# should be ignored because of its file extension
if not (ignore or dependency in pip_package_dependencies_list or
dependency in BLACKLIST):
missing_dependencies.append(dependency)
print("Ignored files: %d" % ignored_files)
print("Blacklisted files: %d" % blacklisted_files)
if missing_dependencies:
print("Missing the following dependencies from pip_packages:")
for missing_dependency in missing_dependencies:
print("\nMissing dependency: %s " % missing_dependency)
print("Affected Tests:")
rdep_query = ("rdeps(kind(py_test, %s), %s)" %
(" + ".join(PYTHON_TARGETS), missing_dependency))
affected_tests = subprocess.check_output(["bazel", "cquery", rdep_query])
affected_tests_list = affected_tests.split("\n")[:-2]
print("\n".join(affected_tests_list))
raise RuntimeError("""
One or more added test dependencies are not in the pip package.
If these test dependencies need to be in TensorFlow pip package, please add them to //tensorflow/tools/pip_package/BUILD.
Else either blacklist the dependencies in //tensorflow/tools/pip_package/pip_smoke_test.py
or add no_pip tag to the test.""")
else:
print("TEST PASSED")
if __name__ == "__main__":
main()
| {
"content_hash": "e470f7567a6d76167d2f4f1c37b8a91e",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 128,
"avg_line_length": 41.80898876404494,
"alnum_prop": 0.7037086804622413,
"repo_name": "girving/tensorflow",
"id": "45106b35fc2e2c5bb981b178785ebf3089582c4d",
"size": "8131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/tools/pip_package/pip_smoke_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "343258"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50036869"
},
{
"name": "CMake",
"bytes": "196127"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1254086"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "867313"
},
{
"name": "Jupyter Notebook",
"bytes": "2604735"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "58787"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "42041620"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "477299"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
_male_start_hi = ['https://media.giphy.com/media/dzaUX7CAG0Ihi/giphy-downsized.gif',
'https://media.giphy.com/media/oJiCqvIqPZE3u/giphy.gif']
_female_start_hi = ['https://media.giphy.com/media/a1QLZUUtCcgyA/giphy-downsized.gif',
'https://media.giphy.com/media/EPJZhOrStSpz2/giphy-downsized.gif']
import random
def get_start_hi(gender):
if gender == "male":
#return random.choice(_male_start_hi)
return _male_start_hi[1]
elif gender == "female":
#return random.choice(_female_start_hi)
return _female_start_hi[1]
else:
return random.choice(_male_start_hi) | {
"content_hash": "7f56036ef04ee2e2b9bdf3cf8b44eddc",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 86,
"avg_line_length": 40.375,
"alnum_prop": 0.6455108359133127,
"repo_name": "mayukh18/BlindChat",
"id": "c8688fee36421aa99493fb5277e4da3899d3b2bc",
"size": "646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/gifs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "41"
},
{
"name": "HTML",
"bytes": "3075"
},
{
"name": "Python",
"bytes": "59076"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import emission.storage.timeseries.abstract_timeseries as esta
import emission.core.wrapper.entry as ecwe
def get_last_entry(user_id, time_query, config_key):
user_ts = esta.TimeSeries.get_time_series(user_id)
# get the list of overrides for this time range. This should be non zero
# only if there has been an override since the last run, which needs to be
# saved back into the cache.
config_overrides = list(user_ts.find_entries([config_key], time_query))
logging.debug("Found %d user overrides for user %s" % (len(config_overrides), user_id))
if len(config_overrides) == 0:
logging.warning("No user defined overrides for key %s and user %s, early return" % (config_key, user_id))
return (None, None)
else:
# entries are sorted by the write_ts, we can take the last value
coe = ecwe.Entry(config_overrides[-1])
logging.debug("last entry is %s" % coe)
return (coe.data, coe.metadata.write_ts)
| {
"content_hash": "74dda49e463f37253f5a0f0c08f2ac0c",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 113,
"avg_line_length": 44.285714285714285,
"alnum_prop": 0.7064516129032258,
"repo_name": "shankari/e-mission-server",
"id": "3eb81c7ace07d472f74736aef826474837546f73",
"size": "1240",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "emission/analysis/configs/config_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "445"
},
{
"name": "CSS",
"bytes": "97039"
},
{
"name": "Dockerfile",
"bytes": "1306"
},
{
"name": "HTML",
"bytes": "64875"
},
{
"name": "JavaScript",
"bytes": "116761"
},
{
"name": "Jupyter Notebook",
"bytes": "4656584"
},
{
"name": "Python",
"bytes": "2209414"
},
{
"name": "SCSS",
"bytes": "41755"
},
{
"name": "Shell",
"bytes": "11419"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2016, Jose Dolz .All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Jose Dolz. Dec, 2016.
email: jose.dolz.upv@gmail.com
LIVIA Department, ETS, Montreal.
"""
import pdb
import numpy as np
# ----- Dice Score -----
def computeDice(autoSeg, groundTruth):
""" Returns
-------
DiceArray : floats array
Dice coefficient as a float on range [0,1].
Maximum similarity = 1
No similarity = 0 """
n_classes = int( np.max(groundTruth) + 1)
DiceArray = []
for c_i in xrange(1,n_classes):
idx_Auto = np.where(autoSeg.flatten() == c_i)[0]
idx_GT = np.where(groundTruth.flatten() == c_i)[0]
autoArray = np.zeros(autoSeg.size,dtype=np.bool)
autoArray[idx_Auto] = 1
gtArray = np.zeros(autoSeg.size,dtype=np.bool)
gtArray[idx_GT] = 1
dsc = dice(autoArray, gtArray)
#dice = np.sum(autoSeg[groundTruth==c_i])*2.0 / (np.sum(autoSeg) + np.sum(groundTruth))
DiceArray.append(dsc)
return DiceArray
def dice(im1, im2):
"""
Computes the Dice coefficient
----------
im1 : boolean array
im2 : boolean array
If they are not boolean, they will be converted.
-------
It returns the Dice coefficient as a float on the range [0,1].
1: Perfect overlapping
0: Not overlapping
"""
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.size != im2.size:
raise ValueError("Size mismatch between input arrays!!!")
im_sum = im1.sum() + im2.sum()
if im_sum == 0:
return 1.0
# Compute Dice
intersection = np.logical_and(im1, im2)
return 2. * intersection.sum() / im_sum
| {
"content_hash": "c97d891361794d5270f996c6fe1d1645",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 95,
"avg_line_length": 30.822222222222223,
"alnum_prop": 0.6398702235039654,
"repo_name": "josedolz/LiviaNET",
"id": "2bbf33c46f6886279972559af098cd9be5284764",
"size": "2774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/LiviaNet/Modules/General/Evaluation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "159013"
}
],
"symlink_target": ""
} |
"""File comparison methods for different file formats.
This module provides a main file comparison method, :meth:`file_cmp()`, that compares two files and returns
True if they are equivalent, False otherwise. The comparison is made differently depending on the file format.
For instance, two Newick files are considered equal if one tree is the result of a permutation of the other.
Typical usage examples::
file_cmp('a/homo_sapiens.fa', 'b/homo_sapiens.fa')
from pathlib import Path
file_cmp(Path('a', 'tree1.nw'), Path('b', 'tree2.nw'))
"""
__all__ = ['NEWICK_EXT', 'file_cmp']
import filecmp
from pathlib import Path
from Bio import Phylo
from .dircmp import PathLike
# File extensions that should be interpreted as the same file format:
NEWICK_EXT = {'.nw', '.nwk', '.newick', '.nh'}
def file_cmp(fpath1: PathLike, fpath2: PathLike) -> bool:
"""Returns True if files `fpath1` and `fpath2` are equivalent, False otherwise.
Args:
fpath1: First file path.
fpath2: Second file path.
"""
fext1 = Path(fpath1).suffix
fext2 = Path(fpath2).suffix
if (fext1 in NEWICK_EXT) and (fext2 in NEWICK_EXT):
return _tree_cmp(fpath1, fpath2)
# Resort to a shallow binary file comparison (files with identical os.stat() signatures are taken to be
# equal)
return filecmp.cmp(str(fpath1), str(fpath2))
def _tree_cmp(fpath1: PathLike, fpath2: PathLike, tree_format: str = 'newick') -> bool:
"""Returns True if trees stored in `fpath1` and `fpath2` are equivalent, False otherwise.
Args:
fpath1: First tree file path.
fpath2: Second tree file path.
tree_format: Tree format, i.e. ``newick``, ``nexus``, ``phyloxml`` or ``nexml``.
"""
ref_tree = Phylo.read(fpath1, tree_format)
target_tree = Phylo.read(fpath2, tree_format)
# Both trees are considered equal if they have the same leaves and the same distance from each to the root
ref_dists = {leaf.name: ref_tree.distance(leaf) for leaf in ref_tree.get_terminals()}
target_dists = {leaf.name: target_tree.distance(leaf) for leaf in target_tree.get_terminals()}
return ref_dists == target_dists
| {
"content_hash": "f792fe411e79d1fe8960b19c2d823144",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 110,
"avg_line_length": 35.68852459016394,
"alnum_prop": 0.6871841984382178,
"repo_name": "Ensembl/ensembl-compara",
"id": "bd758c35d56d3ad2144e28a64caf0e86d2fe9f7a",
"size": "2832",
"binary": false,
"copies": "1",
"ref": "refs/heads/release/108",
"path": "src/python/lib/ensembl/compara/filesys/filecmp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AngelScript",
"bytes": "854"
},
{
"name": "C",
"bytes": "1732"
},
{
"name": "HTML",
"bytes": "3156"
},
{
"name": "Java",
"bytes": "42160"
},
{
"name": "JavaScript",
"bytes": "14023"
},
{
"name": "Nextflow",
"bytes": "5551"
},
{
"name": "Perl",
"bytes": "7428049"
},
{
"name": "Python",
"bytes": "137625"
},
{
"name": "R",
"bytes": "18440"
},
{
"name": "Shell",
"bytes": "16970"
},
{
"name": "Visual Basic 6.0",
"bytes": "2722"
},
{
"name": "XS",
"bytes": "16045"
}
],
"symlink_target": ""
} |
from django.views.generic import ListView
from haystack.query import SearchQuerySet
from haystack.utils.geo import Point, D
from ..models import Store
from ..utils import caching_geo_lookup
class DistanceSearchView(ListView):
template_name = 'stores/store_search.html'
distance = 25
def get_location(self):
# TODO: geopy the location based on kwargs
location = self.request.GET.get('location')
lat = self.request.GET.get('lat')
lng = self.request.GET.get('lng')
if location:
name, geo = caching_geo_lookup(location)
elif lat and lng:
geo = (float(lat), float(lng))
else:
geo = None
self.location_geo = geo
return Point(geo[1], geo[0])
def get_distance(self):
return D(km=self.request.GET.get('distance', self.distance))
def get_queryset(self):
location = self.get_location()
if not location:
return SearchQuerySet.none
distance = self.get_distance()
print location, distance
return SearchQuerySet().dwithin('location', location, distance)\
.distance('location', location).order_by('-distance')
def get_context_data(self, **kwargs):
ctx = super(DistanceSearchView, self).get_context_data(**kwargs)
ctx.update({
'location': self.request.GET.get('location'),
'location_geo': self.location_geo,
})
return ctx | {
"content_hash": "35505e1cbcce03ff6f105bc98edea843",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 72,
"avg_line_length": 32,
"alnum_prop": 0.6195652173913043,
"repo_name": "nikdoof/vapemap",
"id": "2de0a70ebdf4b2a83dbc12d5638b55b6c5761098",
"size": "1472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/stores/views/search.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "107"
},
{
"name": "JavaScript",
"bytes": "3216"
},
{
"name": "Puppet",
"bytes": "7746"
},
{
"name": "Python",
"bytes": "204060"
}
],
"symlink_target": ""
} |
import T2
import sys
from prompt_toolkit import prompt
from prompt_toolkit.contrib.completers import WordCompleter
bird_person = T2.Hand("Bird Person", "bp@gmail.com")
rick = T2.Hand("Rick Sanches", "rs@gmail.com")
morty = T2.Hand("Morty", "mr@gmail.com")
b1 = T2.Book("Coders at Work: Reflections on the Craft of Programming",
"Peter Seibel", "Apress", 2009)
b2 = T2.Book("Code Complete: A Practical Handbook of Software Construction",
"Steve McConnell", " Microsoft Press", "2009")
b3 = T2.Book("The Mythical Man Month", "Frederick P. Brooks, Jr. Page",
"Addison-Wesley Professional", "1995")
b4 = T2.Book("Don’t Make Me Think, Revisited: A Common Sense Approach to Web Usability",
"Steve Krug", "New Riders", "2014")
b5 = T2.Book("The Pragmatic Programmer: From Journeyman to Master",
"Andrew Hunt", "Addison-Wesley Professional", "1999")
np1 = T2.Newspaper("The New York Times", "01-23-2017")
np2 = T2.Newspaper("The Wall Street Journal", "05-17-2017")
np3 = T2.Newspaper("Los Angeles Times", "03-10-2017")
np4 = T2.Newspaper("New York Post", "05-05-2017")
np5 = T2.Newspaper("Chicago Tribune", "06-07-2017")
my_archive = T2.Archive()
my_archive.add([bird_person, rick, morty])
my_archive.add([b1, b2, b3, b4, b5])
my_archive.add([np1, np2, np3, np4, np5])
def give():
print("==============================")
print("Choice customer from the list:")
print("==============================")
customers = {c[1].name: c[0] for c in enumerate(my_archive.get_items('customer'))}
customers_completer = WordCompleter(customers.keys())
for item in customers.keys():
print("%s" % item)
print()
customer = prompt('customer ~> ', completer=customers_completer)
customer_id = customers[customer]
print("===========================")
print("Choice which book you give:")
print("===========================")
books = {b.title: b for b in my_archive.get_items('book')}
books_completer = WordCompleter(books.keys())
for item in books.keys():
print("%s" % item)
print()
book = prompt('book ~> ', completer=books_completer)
try:
my_archive.give_item(books[book], customer_id)
except Exception as msg:
print("====>> %s" % msg)
def take():
pass
def stats():
print(my_archive)
def customers():
pass
commands = {"stats": stats, "customers": customers, "give": give, "take": take}
commands_completer = WordCompleter(commands.keys())
def help():
print("Commands available:")
output = ""
for c in commands.keys():
output += str(c) + " "
print(output)
while True:
try:
command = prompt('archive ~> ', completer=commands_completer)
commands[command]()
except (KeyError):
help()
except (KeyboardInterrupt, EOFError):
print("\n See ya!")
sys.exit(0)
| {
"content_hash": "1088ee814b4f6e6d5b9a6125c92a2dc0",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 88,
"avg_line_length": 29.19607843137255,
"alnum_prop": 0.5936870382807253,
"repo_name": "kvantos/intro_to_python_class",
"id": "bed1bbaf730db271792e28e3ef5c7256236e9265",
"size": "3028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Library.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "97807"
}
],
"symlink_target": ""
} |
"""
QUOTE函数返回列所对应的原生SQL表达式。
Ref: https://www.sqlite.org/lang_corefunc.html
"""
from sqlite4dummy.tests.basetest import BaseUnittest
from datetime import datetime, date
import unittest
class Unittest(BaseUnittest):
def setUp(self):
self.connect_database()
def test_all(self):
cursor = self.cursor
# insert some data
create_sql = \
"""
CREATE TABLE test
(
_id INTEGER PRIMARY KEY,
_str TEXT,
_bytes BLOB,
_date DATE,
_datetime TIMESTAMP
)
"""
insert_sql = "INSERT INTO test VALUES (?,?,?,?,?)"
cursor.execute(create_sql)
cursor.execute(insert_sql,
(
1,
r"""abc`~!@#$%^&*()_+-={}[]|\:;'"<>,.?/""",
"Hello World".encode("utf-8"),
date.today(),
datetime.now(),
)
)
select_sql = \
"""
SELECT
quote(_str), quote(_bytes), quote(_date), quote(_datetime)
FROM
test
"""
print(cursor.execute(select_sql).fetchone())
def test_usage(self):
"""QUOTE可以用来获得原生SQL表达式。
"""
cursor = self.cursor
print(cursor.execute("SELECT QUOTE(?)",
(r"""abc`~!@#$%^&*()_+-={}[]|\:;'"<>,.?/""", )).fetchone())
print(cursor.execute("SELECT QUOTE(?)",
("Hello World".encode("utf-8"), )).fetchone())
print(cursor.execute("SELECT QUOTE(?)",
(date.today(), )).fetchone())
print(cursor.execute("SELECT QUOTE(?)",
(datetime.now(), )).fetchone())
if __name__ == "__main__":
unittest.main() | {
"content_hash": "0e8b94c5d3cb16e88a3d6b80ced552f7",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 71,
"avg_line_length": 27.03076923076923,
"alnum_prop": 0.4632896983494593,
"repo_name": "MacHu-GWU/sqlite4dummy-project",
"id": "6a0a9698cbab91a25dcb4564f2fd0d4a756554ee",
"size": "1858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sqlite4dummy/tests/sqlite3_in_python/func/test_quote.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "8001"
},
{
"name": "Makefile",
"bytes": "7442"
},
{
"name": "Python",
"bytes": "269635"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import copy
import numpy
import theano
from theano import Variable, Constant
from theano import tensor
from theano.compile import SharedVariable
from theano.sandbox.cuda.type import CudaNdarrayType
try:
# We must do those import to be able to create the full doc when nvcc
# is not available
from theano.sandbox.cuda import filter as type_support_filter
from theano.sandbox.cuda.basic_ops import HostFromGpu, GpuFromHost
except ImportError:
pass
class _operators(tensor.basic._tensor_py_operators):
"""
Define a few properties and conversion methods for CudaNdarray Variables.
The default implementation of arithemetic operators is to build graphs of
TensorType variables.
The optimization pass (specialization) will insert pure GPU implementations.
This approach relieves the Cuda-Ops of having to deal with input argument
checking and gradients.
"""
def _as_TensorVariable(self):
return HostFromGpu()(self)
def _as_CudaNdarrayVariable(self):
return self
dtype = property(lambda s: 'float32')
broadcastable = property(lambda s: s.type.broadcastable)
ndim = property(lambda s: s.type.ndim)
class CudaNdarrayVariable(_operators, Variable):
pass
CudaNdarrayType.Variable = CudaNdarrayVariable
class CudaNdarrayConstantSignature(tensor.TensorConstantSignature):
pass
class CudaNdarrayConstant(_operators, Constant):
def signature(self):
return CudaNdarrayConstantSignature((self.type, numpy.asarray(self.data)))
def __str__(self):
if self.name is not None:
return self.name
try:
data = str(numpy.asarray(self.data))
except Exception as e:
data = "error while transferring the value: " + str(e)
return "CudaNdarrayConstant{"+data+"}"
CudaNdarrayType.Constant = CudaNdarrayConstant
class CudaNdarraySharedVariable(_operators, SharedVariable):
"""
Shared Variable interface to CUDA-allocated arrays.
"""
get_value_return_ndarray = True
def get_value(self, borrow=False, return_internal_type=False):
"""
Return the value of this SharedVariable's internal array.
Parameters
----------
borrow
Permit the return of internal storage, when used in conjunction with
``return_internal_type=True``.
return_internal_type
True to return the internal ``cuda_ndarray`` instance rather than a
``numpy.ndarray`` (Default False).
By default ``get_value()`` copies from the GPU to a ``numpy.ndarray``
and returns that host-allocated array.
``get_value(False,True)`` will return a GPU-allocated copy of the
original GPU array.
``get_value(True,True)`` will return the original GPU-allocated array
without any copying.
"""
if return_internal_type or not self.get_value_return_ndarray:
# return a cuda_ndarray
if borrow:
return self.container.value
else:
return copy.deepcopy(self.container.value)
else: # return an ndarray
return numpy.asarray(self.container.value)
def set_value(self, value, borrow=False):
"""
Assign `value` to the GPU-allocated array.
Parameters
----------
borrow : bool
``True`` permits reusing `value` itself, ``False`` requires that
this function copies `value` into internal storage.
Notes
-----
Prior to Theano 0.3.1, set_value did not work in-place on the GPU. This
meant that sometimes, GPU memory for the new value would be allocated
before the old memory was released. If you're running near the limits of
GPU memory, this could cause you to run out of GPU memory.
Beginning with Theano 0.3.1, set_value will work in-place on the GPU, if
the following conditions are met:
* The destination on the GPU must be c_contiguous.
* The source is on the CPU.
* The old value must have the same dtype as the new value
(which is a given for now, since only float32 is
supported).
* The old and new value must have the same shape.
* The old value is being completely replaced by the new
value (not partially modified, e.g. by replacing some
subtensor of it).
* You change the value of the shared variable via
set_value, not via the .value accessors. You should not
use the .value accessors anyway, since they will soon be
deprecated and removed.
It is also worth mentioning that, for efficient transfer to the GPU,
Theano will make the new data ``c_contiguous``. This can require an
extra copy of the data on the host.
The inplace on gpu memory work when borrow is either True or False.
"""
if not borrow:
# TODO: check for cuda_ndarray type
if not isinstance(value, numpy.ndarray):
# in case this is a cuda_ndarray, we copy it
value = copy.deepcopy(value)
self.container.value = value # this will copy a numpy ndarray
def __getitem__(self, *args):
# Defined to explicitly use the implementation from `_operators`, since
# the definition in `SharedVariable` is only meant to raise an error.
return _operators.__getitem__(self, *args)
CudaNdarrayType.SharedVariable = CudaNdarraySharedVariable
def cuda_shared_constructor(value, name=None, strict=False,
allow_downcast=None, borrow=False,
broadcastable=None, target='gpu'):
"""
SharedVariable Constructor for CudaNdarrayType.
"""
if target != 'gpu':
raise TypeError('not for gpu')
# THIS CONSTRUCTOR TRIES TO CAST VALUE TO A FLOAT32, WHICH THEN GOES ONTO THE CARD
# SO INT shared vars, float64 shared vars, etc. all end up on the card.
# THIS IS NOT THE DEFAULT BEHAVIOUR THAT WE WANT.
# SEE float32_shared_constructor
# TODO: what should strict mean in this context, since we always have to make a copy?
if strict:
_value = value
else:
_value = theano._asarray(value, dtype='float32')
if not isinstance(_value, numpy.ndarray):
raise TypeError('ndarray required')
if _value.dtype.num != CudaNdarrayType.typenum:
raise TypeError('float32 ndarray required')
if broadcastable is None:
broadcastable = (False,) * len(value.shape)
type = CudaNdarrayType(broadcastable=broadcastable)
print("trying to return?")
try:
rval = CudaNdarraySharedVariable(type=type, value=_value, name=name, strict=strict)
except Exception as e:
print("ERROR", e)
raise
return rval
def float32_shared_constructor(value, name=None, strict=False,
allow_downcast=None, borrow=False,
broadcastable=None, target='gpu'):
"""
SharedVariable Constructor for CudaNdarrayType from numpy.ndarray or
CudaNdarray.
"""
if target != 'gpu':
raise TypeError('not for gpu')
if theano.sandbox.cuda.use.device_number is None:
theano.sandbox.cuda.use("gpu",
force=True,
default_to_move_computation_to_gpu=False,
move_shared_float32_to_gpu=False,
enable_cuda=False)
# if value isn't a float32 ndarray, or a CudaNdarray then raise
if not isinstance(value, (numpy.ndarray, theano.sandbox.cuda.CudaNdarray)):
raise TypeError('ndarray or CudaNdarray required')
if isinstance(value, numpy.ndarray) and value.dtype.num != CudaNdarrayType.typenum:
raise TypeError('float32 ndarray required')
if broadcastable is None:
broadcastable = (False,) * len(value.shape)
type = CudaNdarrayType(broadcastable=broadcastable)
get_value_return_ndarray = True
if isinstance(value, theano.sandbox.cuda.CudaNdarray):
get_value_return_ndarray = False
if borrow:
deviceval = value
else:
deviceval = value.copy()
else:
# type.broadcastable is guaranteed to be a tuple, which this next
# function requires
deviceval = type_support_filter(value, type.broadcastable, False, None)
try:
rval = CudaNdarraySharedVariable(type=type, value=deviceval, name=name, strict=strict)
except Exception as e:
print("ERROR", e)
raise
rval.get_value_return_ndarray = get_value_return_ndarray
return rval
| {
"content_hash": "603753ba55c4496d11edc4141950ffce",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 94,
"avg_line_length": 35.344,
"alnum_prop": 0.6420325939339068,
"repo_name": "rizar/attention-lvcsr",
"id": "17501cc6cb50fa24ce0fe81b81ee059fb889188e",
"size": "8836",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "libs/Theano/theano/sandbox/cuda/var.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1288"
},
{
"name": "C",
"bytes": "156742"
},
{
"name": "C++",
"bytes": "209135"
},
{
"name": "CSS",
"bytes": "3500"
},
{
"name": "Cuda",
"bytes": "231732"
},
{
"name": "Gnuplot",
"bytes": "484"
},
{
"name": "HTML",
"bytes": "33356"
},
{
"name": "Jupyter Notebook",
"bytes": "191071"
},
{
"name": "Makefile",
"bytes": "973"
},
{
"name": "Python",
"bytes": "9313243"
},
{
"name": "Shell",
"bytes": "34454"
},
{
"name": "TeX",
"bytes": "102624"
}
],
"symlink_target": ""
} |
import os
import pwd
from st2common import log as logging
from st2common.models.system.action import RemoteAction
from st2common.util.shell import quote_unix
__all__ = [
'ParamikoRemoteCommandAction',
]
LOG = logging.getLogger(__name__)
LOGGED_USER_USERNAME = pwd.getpwuid(os.getuid())[0]
class ParamikoRemoteCommandAction(RemoteAction):
def get_full_command_string(self):
# Note: We pass -E to sudo because we want to preserve user provided
# environment variables
env_str = self._get_env_vars_export_string()
cwd = self.get_cwd()
if self.sudo:
if env_str:
command = quote_unix('%s && cd %s && %s' % (env_str, cwd, self.command))
else:
command = quote_unix('cd %s && %s' % (cwd, self.command))
command = 'sudo -E -- bash -c %s' % (command)
else:
if env_str:
command = '%s && cd %s && %s' % (env_str, cwd,
self.command)
else:
command = 'cd %s && %s' % (cwd, self.command)
LOG.debug('Command to run on remote host will be: %s', command)
return command
| {
"content_hash": "5177133ee4bf0ab194f82c228231821f",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 88,
"avg_line_length": 30.15,
"alnum_prop": 0.5530679933665008,
"repo_name": "punalpatel/st2",
"id": "b3bda0251e992cf8de9771896d7a17663975ad49",
"size": "1986",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "st2common/st2common/models/system/paramiko_command_action.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "41838"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "3729615"
},
{
"name": "Shell",
"bytes": "39063"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
from cdefs.heap import Heap
##
# @brief Gets a list of historical/logged file sources, ExternalDataListener will know the first timestamp for the events
# it's processing, this class will ask them to read the data and depending on whether there is any data of interest
# ( for instance it could have data but of securities that are not of interest ),
# compute the next_event_timestamp
#
# Once the sources have next_event_timestamp two choices here :
# (i) sort the events based on time and based on the event time call the corresponding ExternalDataListener to process that event
# and fetch the next_event_timestamp and resort if the next_event_timestamp is != 0
# (ii) all the sources that have some data in the channels, sequentially call them to process all data they have
# and call any listeners as and when they feel a need to
# downsides of method (ii) :
# (a) if the time we take in processing events is very high then events from other sources could be ignored.
# (b) events are sure to not be chronological in live and hence different in hist and live
#
# At the end HistoricalDispatcher collects all sources in prev_external_data_listener_vec_ and calls delete on them
#
class HistoricalDispatcher( object ):
def __init__( self ):
self.external_data_listener_list = []
self.prev_external_data_listener_list = []
self.first_event_enqueued = False
##
# @brief If needed this can seek the historical sources to skip processing event until a given time, i.e start of trading hours etc
#
# @param end_time time till which we want to seek forward the historical file sources
#
def seek_hist_file_sources_to( self, seek_time ):
external_data_listener_list_copy = self.external_data_listener_list[ : ] # Will iterate over copy and delete from original
if not self.first_event_enqueued:
for listener in external_data_listener_list_copy: # Iterate over copy
has_events = listener.seek_to_first_event_after( seek_time )
if not has_events:
self.prev_external_data_listener_list.append( listener )
self.external_data_listener_list.remove( listener ) # Delete from original # TODO check would this work
self.first_event_enqueued = True
##
# @brief Adds a listener for the data events, the listner also deals with its own data
# All it needs from historical dispatcher is a callback which notifies when to process the events and upto
# what time the events can be processed
#
# @param new_data_listener A base class pointer through which various dereived listener/filesources will be added
#
def add_external_data_listener( self, new_data_listener ):
self.external_data_listener_list.append( new_data_listener )
##
# @brief A busy loop which processes a bunch of data sources earlier added and distributes events to its listeners
# Once called, will keep on processing until all events have been comsumed
#
def run( self ):
external_data_listener_heap = Heap( self.external_data_listener_list, lambda x : x.next_event_timestamp )
# Only way to get out of this loop is when all sources have been removed. This is the only while loop in the program
while external_data_listener_heap.size( ) > 0: # only way to get out of this loop is when all sources have been removed. This is the only while loop in the program
top_edl = external_data_listener_heap.pop( )
if external_data_listener_heap.size( ) == 0:
top_edl.process_all_events( )
return
top_edl.process_events_till( external_data_listener_heap.top( ).next_event_timestamp ) # process all events in this source till the next event is older the one on the new top
next_event_timestamp_from_edl = top_edl.next_event_timestamp # ask the source to get the next_event_timestamp ( the first event that is older than the specified endtime )
source_has_events = next_event_timestamp_from_edl != 0 # if 0 then no events .. and in historical this means this source can be removed, since it has finished reading the file it was reading from
if source_has_events:
external_data_listener_heap.push( top_edl ) # Reinsert into heap
heap_size = external_data_listener_heap.size( )
if heap_size == 1:
external_data_listener_heap.top( ).process_all_events( ) # Since ComputeEarliestDataTimestamp () has been called, or ProcessEventsTill has been called, the next_event_timestamp will be valid
external_data_listener_heap.pop( )
return
if heap_size == 0:
return
| {
"content_hash": "3877c52adbe0d13ed977ba80fdd9908d",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 207,
"avg_line_length": 63.85333333333333,
"alnum_prop": 0.6915848820212989,
"repo_name": "ashesh-0/trade-analysis",
"id": "ac705e902dd7830e67ad4ed683255b38ee9732fa",
"size": "4789",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "event_processing/historical_dispatcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67822"
}
],
"symlink_target": ""
} |
from CTFd.models import Teams, Users
from CTFd.utils import set_config
from tests.helpers import (
create_ctfd,
destroy_ctfd,
login_as_user,
login_with_mlc,
register_user,
)
def test_oauth_not_configured():
"""Test that OAuth redirection fails if OAuth settings aren't configured"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.get("/oauth", follow_redirects=False)
assert r.location == "http://localhost/login"
r = client.get(r.location)
resp = r.get_data(as_text=True)
assert "OAuth Settings not configured" in resp
destroy_ctfd(app)
def test_oauth_configured_flow():
"""Test that MLC integration works properly but does not allow registration (account creation) if disabled"""
app = create_ctfd(user_mode="teams")
app.config.update(
{
"OAUTH_CLIENT_ID": "ctfd_testing_client_id",
"OAUTH_CLIENT_SECRET": "ctfd_testing_client_secret",
"OAUTH_AUTHORIZATION_ENDPOINT": "http://auth.localhost/oauth/authorize",
"OAUTH_TOKEN_ENDPOINT": "http://auth.localhost/oauth/token",
"OAUTH_API_ENDPOINT": "http://api.localhost/user",
}
)
with app.app_context():
set_config("registration_visibility", "private")
assert Users.query.count() == 1
assert Teams.query.count() == 0
client = login_with_mlc(app, raise_for_error=False)
assert Users.query.count() == 1
# Users shouldn't be able to register because registration is disabled
resp = client.get("http://localhost/login").get_data(as_text=True)
assert "Public registration is disabled" in resp
set_config("registration_visibility", "public")
client = login_with_mlc(app)
# Users should be able to register now
assert Users.query.count() == 2
user = Users.query.filter_by(email="user@ctfd.io").first()
assert user.oauth_id == 1337
assert user.team_id == 1
# Teams should be created
assert Teams.query.count() == 1
team = Teams.query.filter_by(id=1).first()
assert team.oauth_id == 1234
client.get("/logout")
# Users should still be able to login if registration is disabled
set_config("registration_visibility", "private")
client = login_with_mlc(app)
with client.session_transaction() as sess:
assert sess["id"]
assert sess["nonce"]
assert sess["hash"]
destroy_ctfd(app)
def test_oauth_login_upgrade():
"""Test that users who use MLC after having registered will be associated with their MLC account"""
app = create_ctfd(user_mode="teams")
app.config.update(
{
"OAUTH_CLIENT_ID": "ctfd_testing_client_id",
"OAUTH_CLIENT_SECRET": "ctfd_testing_client_secret",
"OAUTH_AUTHORIZATION_ENDPOINT": "http://auth.localhost/oauth/authorize",
"OAUTH_TOKEN_ENDPOINT": "http://auth.localhost/oauth/token",
"OAUTH_API_ENDPOINT": "http://api.localhost/user",
}
)
with app.app_context():
register_user(app)
assert Users.query.count() == 2
set_config("registration_visibility", "private")
# Users should still be able to login
client = login_as_user(app)
client.get("/logout")
user = Users.query.filter_by(id=2).first()
assert user.oauth_id is None
assert user.team_id is None
login_with_mlc(app)
assert Users.query.count() == 2
# Logging in with MLC should insert an OAuth ID and team ID
user = Users.query.filter_by(id=2).first()
assert user.oauth_id
assert user.verified
assert user.team_id
destroy_ctfd(app)
| {
"content_hash": "f4cce2e00a989b905841356e839e46b2",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 113,
"avg_line_length": 35.06363636363636,
"alnum_prop": 0.613689395903552,
"repo_name": "LosFuzzys/CTFd",
"id": "15e09852e2fa2eb9adbb8c1d0e71669c9b7d947a",
"size": "3904",
"binary": false,
"copies": "1",
"ref": "refs/heads/losctf",
"path": "tests/oauth/test_redirect.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1956"
},
{
"name": "Dockerfile",
"bytes": "932"
},
{
"name": "HTML",
"bytes": "314286"
},
{
"name": "JavaScript",
"bytes": "646022"
},
{
"name": "Makefile",
"bytes": "841"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "933857"
},
{
"name": "SCSS",
"bytes": "40023"
},
{
"name": "Shell",
"bytes": "2759"
},
{
"name": "Vue",
"bytes": "25361"
}
],
"symlink_target": ""
} |
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, FR
from holidays.constants import JAN, MAR, APR, MAY, AUG, OCT, NOV, DEC
from holidays.constants import MON, TUE, THU, WEEKEND
from holidays.holiday_base import HolidayBase
class Hungary(HolidayBase):
# https://en.wikipedia.org/wiki/Public_holidays_in_Hungary
# observed days off work around national holidays in the last 10 years:
# https://www.munkaugyiforum.hu/munkaugyi-segedanyagok/
# 2018-evi-munkaszuneti-napok-koruli-munkarend-9-2017-ngm-rendelet
# codification dates:
# - https://hvg.hu/gazdasag/
# 20170307_Megszavaztak_munkaszuneti_nap_lett_a_nagypentek
# - https://www.tankonyvtar.hu/hu/tartalom/historia/
# 92-10/ch01.html#id496839
country = "HU"
def __init__(self, **kwargs):
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# New years
self._add_with_observed_day_off(date(year, JAN, 1), "Újév", since=2014)
# National Day
if 1945 <= year <= 1950 or 1989 <= year:
self._add_with_observed_day_off(
date(year, MAR, 15), "Nemzeti ünnep"
)
# Soviet era
if 1950 <= year <= 1989:
# Proclamation of Soviet socialist governing system
self[
date(year, MAR, 21)
] = "A Tanácsköztársaság kikiáltásának ünnepe"
# Liberation Day
self[date(year, APR, 4)] = "A felszabadulás ünnepe"
# Memorial day of The Great October Soviet Socialist Revolution
if year not in (1956, 1989):
self[
date(year, NOV, 7)
] = "A nagy októberi szocialista forradalom ünnepe"
easter_date = easter(year)
# Good Friday
if 2017 <= year:
self[easter_date + rd(weekday=FR(-1))] = "Nagypéntek"
# Easter
self[easter_date] = "Húsvét"
# Second easter day
if 1955 != year:
self[easter_date + rd(days=1)] = "Húsvét Hétfő"
# Pentecost
self[easter_date + rd(days=49)] = "Pünkösd"
# Pentecost monday
if year <= 1952 or 1992 <= year:
self[easter_date + rd(days=50)] = "Pünkösdhétfő"
# International Workers' Day
if 1946 <= year:
self._add_with_observed_day_off(
date(year, MAY, 1), "A Munka ünnepe"
)
if 1950 <= year <= 1953:
self[date(year, MAY, 2)] = "A Munka ünnepe"
# State Foundation Day (1771-????, 1891-)
if 1950 <= year < 1990:
self[date(year, AUG, 20)] = "A kenyér ünnepe"
else:
self._add_with_observed_day_off(
date(year, AUG, 20), "Az államalapítás ünnepe"
)
# National Day
if 1991 <= year:
self._add_with_observed_day_off(
date(year, OCT, 23), "Nemzeti ünnep"
)
# All Saints' Day
if 1999 <= year:
self._add_with_observed_day_off(
date(year, NOV, 1), "Mindenszentek"
)
# Christmas Eve is not endorsed officially
# but nowadays it is usually a day off work
if (
self.observed
and 2010 <= year
and date(year, DEC, 24).weekday() not in WEEKEND
):
self[date(year, DEC, 24)] = "Szenteste"
# First christmas
self[date(year, DEC, 25)] = "Karácsony"
# Second christmas
if 1955 != year:
self._add_with_observed_day_off(
date(year, DEC, 26),
"Karácsony másnapja",
since=2013,
before=False,
after=True,
)
# New Year's Eve
if (
self.observed
and 2014 <= year
and date(year, DEC, 31).weekday() == MON
):
self[date(year, DEC, 31)] = "Szilveszter"
def _add_with_observed_day_off(
self, day, desc, since=2010, before=True, after=True
):
# Swapped days off were in place earlier but
# I haven't found official record yet.
self[day] = desc
# TODO: should it be a separate flag?
if self.observed and since <= day.year:
if day.weekday() == TUE and before:
self[day - rd(days=1)] = desc + " előtti pihenőnap"
elif day.weekday() == THU and after:
self[day + rd(days=1)] = desc + " utáni pihenőnap"
class HU(Hungary):
pass
class HUN(Hungary):
pass
| {
"content_hash": "27d657c33006a4a15898477edcfd65c0",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 79,
"avg_line_length": 31.513513513513512,
"alnum_prop": 0.5405231560891939,
"repo_name": "ryanss/holidays.py",
"id": "91e72170418fd32a756931dbf33032f21aecd9f0",
"size": "5210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "holidays/countries/hungary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "214061"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import CertificateMonitor, DomainMonitor, IpMonitor, IndicatorAlert, IndicatorTag, CertificateSubscription, DomainSubscription, IpSubscription
from profiles.models import Profile
def subscription_owner(obj):
if type(obj) == CertificateMonitor:
owner = Profile.objects.filter(id__in=CertificateSubscription.objects.filter(certificate_id=obj.certificate_value).values('owner'));
elif type(obj) == DomainMonitor:
owner = Profile.objects.filter(id__in=DomainSubscription.objects.filter(domain_name_id=obj.domain_name).values('owner'));
elif type(obj) == IpMonitor:
owner = Profile.objects.filter(id__in=IpSubscription.objects.filter(ip_address_id=obj.ip_address).values('owner'));
return owner
class CertificateMonitorAdmin(admin.ModelAdmin):
owner = subscription_owner
list_display = ('certificate_value', 'last_hosts', 'modified', owner)
class IpMonitorAdmin(admin.ModelAdmin):
list_display = ('ip_address', 'last_hosts', 'modified', 'owner')
class DomainMonitorAdmin(admin.ModelAdmin):
list_display = ('domain_name', 'last_hosts', 'modified', 'owner')
class IndicatorAlertAdmin(admin.ModelAdmin):
list_display = ('indicator', 'message', 'created', 'recipient')
class IndicatorTagAdmin(admin.ModelAdmin):
list_display = ('tag', 'owner')
admin.site.register(IndicatorTag, IndicatorTagAdmin)
admin.site.register(CertificateMonitor, CertificateMonitorAdmin)
admin.site.register(IpMonitor, IpMonitorAdmin)
admin.site.register(DomainMonitor, DomainMonitorAdmin)
admin.site.register(IndicatorAlert, IndicatorAlertAdmin) | {
"content_hash": "e079165f48dee924af0c17ff88b1a8a6",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 155,
"avg_line_length": 37.15909090909091,
"alnum_prop": 0.7620795107033639,
"repo_name": "LindaTNguyen/RAPID",
"id": "d5004f9a81ae25172962f90c015a5710ff0d0a36",
"size": "1635",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "monitors/admin.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "184203"
},
{
"name": "HTML",
"bytes": "7180410"
},
{
"name": "JavaScript",
"bytes": "1702997"
},
{
"name": "PHP",
"bytes": "55444"
},
{
"name": "Python",
"bytes": "313767"
},
{
"name": "Shell",
"bytes": "31724"
}
],
"symlink_target": ""
} |
def deprecation_warning():
print("""
=============================================================================
*** DEPRECATION WARNING ***
Cookiecutter data science is moving to v2 soon, which will entail using
the command `ccds ...` rather than `cookiecutter ...`. The cookiecutter command
will continue to work, and this version of the template will still be available.
To use the legacy template, you will need to explicitly use `-c v1` to select it.
Please update any scripts/automation you have to append the `-c v1` option,
which is available now.
For example:
cookiecutter -c v1 https://github.com/drivendata/cookiecutter-data-science
=============================================================================
""")
deprecation_warning()
| {
"content_hash": "f06aa523533c6f1193d6d8caaf11a48b",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 81,
"avg_line_length": 35,
"alnum_prop": 0.5909090909090909,
"repo_name": "drivendata/cookiecutter-data-science",
"id": "2bd49c083d871e53c199dfe0aa6f9ba410d8303f",
"size": "770",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hooks/pre_gen_project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5140"
},
{
"name": "Makefile",
"bytes": "10365"
},
{
"name": "Python",
"bytes": "15337"
},
{
"name": "Shell",
"bytes": "459"
}
],
"symlink_target": ""
} |
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.html import urlize as urlize
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlize_newtab(value, autoescape=True):
"""Converts URLs in plain text into clickable links that open in new tabs."""
url = urlize(value, nofollow=True, autoescape=autoescape)
url = url.replace("a href", 'a target="_blank" rel="noreferrer" href')
return mark_safe(url)
| {
"content_hash": "380f0f4ba6a5a48124381edeb23009c0",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 81,
"avg_line_length": 37.733333333333334,
"alnum_prop": 0.7632508833922261,
"repo_name": "pbanaszkiewicz/amy",
"id": "ed2a7e04824876b77dcae3a8045dda8ad8b8b63e",
"size": "566",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "amy/workshops/templatetags/links.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5850"
},
{
"name": "Dockerfile",
"bytes": "1034"
},
{
"name": "HTML",
"bytes": "313293"
},
{
"name": "JavaScript",
"bytes": "39427"
},
{
"name": "Makefile",
"bytes": "1780"
},
{
"name": "Python",
"bytes": "2707815"
}
],
"symlink_target": ""
} |
"""Contains convenience wrappers for creating variables in TF-Slim.
The variables module is typically used for defining model variables from the
ops routines (see slim.ops). Such variables are used for training, evaluation
and inference of models.
All the variables created through this module would be added to the
MODEL_VARIABLES collection, if you create a model variable outside slim, it can
be added with slim.variables.add_variable(external_variable, reuse).
Usage:
weights_initializer = tf.truncated_normal_initializer(stddev=0.01)
l2_regularizer = lambda t: losses.l2_loss(t, weight=0.0005)
weights = variables.variable('weights',
shape=[100, 100],
initializer=weights_initializer,
regularizer=l2_regularizer,
device='/cpu:0')
biases = variables.variable('biases',
shape=[100],
initializer=tf.zeros_initializer,
device='/cpu:0')
# More complex example.
net = slim.ops.conv2d(input, 32, [3, 3], scope='conv1')
net = slim.ops.conv2d(net, 64, [3, 3], scope='conv2')
with slim.arg_scope([variables.variable], restore=False):
net = slim.ops.conv2d(net, 64, [3, 3], scope='conv3')
# Get all model variables from all the layers.
model_variables = slim.variables.get_variables()
# Get all model variables from a specific the layer, i.e 'conv1'.
conv1_variables = slim.variables.get_variables('conv1')
# Get all weights from all the layers.
weights = slim.variables.get_variables_by_name('weights')
# Get all bias from all the layers.
biases = slim.variables.get_variables_by_name('biases')
# Get all variables to restore.
# (i.e. only those created by 'conv1' and 'conv2')
variables_to_restore = slim.variables.get_variables_to_restore()
************************************************
* Initializing model variables from a checkpoint
************************************************
# Create some variables.
v1 = slim.variables.variable(name="v1", ..., restore=False)
v2 = slim.variables.variable(name="v2", ...) # By default restore=True
...
# The list of variables to restore should only contain 'v2'.
variables_to_restore = slim.variables.get_variables_to_restore()
restorer = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
# Restore variables from disk.
restorer.restore(sess, "/tmp/model.ckpt")
print("Model restored.")
# Do some work with the model
...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.core.framework import graph_pb2
from inception.slim import scopes
# Collection containing all the variables created using slim.variables
MODEL_VARIABLES = '_model_variables_'
# Collection containing the slim.variables that are created with restore=True.
VARIABLES_TO_RESTORE = '_variables_to_restore_'
def add_variable(var, restore=True):
"""Adds a variable to the MODEL_VARIABLES collection.
Optionally it will add the variable to the VARIABLES_TO_RESTORE collection.
Args:
var: a variable.
restore: whether the variable should be added to the
VARIABLES_TO_RESTORE collection.
"""
collections = [MODEL_VARIABLES]
if restore:
collections.append(VARIABLES_TO_RESTORE)
for collection in collections:
if var not in tf.get_collection(collection):
tf.add_to_collection(collection, var)
def get_variables(scope=None, suffix=None):
"""Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a copied list of variables with scope and suffix.
"""
candidates = tf.get_collection(MODEL_VARIABLES, scope)[:]
if suffix is not None:
candidates = [var for var in candidates if var.op.name.endswith(suffix)]
return candidates
def get_variables_to_restore():
"""Gets the list of variables to restore.
Returns:
a copied list of variables.
"""
return tf.get_collection(VARIABLES_TO_RESTORE)[:]
def get_variables_by_name(given_name, scope=None):
"""Gets the list of variables that were given that name.
Args:
given_name: name given to the variable without scope.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and prefix.
"""
return get_variables(scope=scope, suffix=given_name)
def get_unique_variable(name):
"""Gets the variable uniquely identified by that name.
Args:
name: a name that uniquely identifies the variable.
Returns:
a tensorflow variable.
Raises:
ValueError: if no variable uniquely identified by the name exists.
"""
candidates = tf.get_collection(tf.GraphKeys.VARIABLES, name)
if not candidates:
raise ValueError('Couldnt find variable %s' % name)
for candidate in candidates:
if candidate.op.name == name:
return candidate
raise ValueError('Variable %s does not uniquely identify a variable', name)
class VariableDeviceChooser(object):
"""Slim device chooser for variables.
When using a parameter server it will assign them in a round-robin fashion.
When not using a parameter server it allows GPU:0 placement otherwise CPU:0.
"""
def __init__(self,
num_parameter_servers=0,
ps_device='/job:ps',
placement='CPU:0'):
"""Initialize VariableDeviceChooser.
Args:
num_parameter_servers: number of parameter servers.
ps_device: string representing the parameter server device.
placement: string representing the placement of the variable either CPU:0
or GPU:0. When using parameter servers forced to CPU:0.
"""
self._num_ps = num_parameter_servers
self._ps_device = ps_device
self._placement = placement if num_parameter_servers == 0 else 'CPU:0'
self._next_task_id = 0
def __call__(self, op):
device_string = ''
if self._num_ps > 0:
task_id = self._next_task_id
self._next_task_id = (self._next_task_id + 1) % self._num_ps
device_string = '%s/task:%d' % (self._ps_device, task_id)
device_string += '/%s' % self._placement
return device_string
# TODO(sguada) Remove once get_variable is able to colocate op.devices.
def variable_device(device, name):
"""Fix the variable device to colocate its ops."""
if callable(device):
var_name = tf.get_variable_scope().name + '/' + name
var_def = graph_pb2.NodeDef(name=var_name, op='Variable')
device = device(var_def)
if device is None:
device = ''
return device
@scopes.add_arg_scope
def global_step(device=''):
"""Returns the global step variable.
Args:
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
Returns:
the tensor representing the global step variable.
"""
global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP)
if global_step_ref:
return global_step_ref[0]
else:
collections = [
VARIABLES_TO_RESTORE,
tf.GraphKeys.VARIABLES,
tf.GraphKeys.GLOBAL_STEP,
]
# Get the device for the variable.
with tf.device(variable_device(device, 'global_step')):
return tf.get_variable('global_step', shape=[], dtype=tf.int64,
initializer=tf.zeros_initializer,
trainable=False, collections=collections)
@scopes.add_arg_scope
def variable(name, shape=None, dtype=tf.float32, initializer=None,
regularizer=None, trainable=True, collections=None, device='',
restore=True):
"""Gets an existing variable with these parameters or creates a new one.
It also add itself to a group with its name.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
collections: A list of collection names to which the Variable will be added.
Note that the variable is always also added to the tf.GraphKeys.VARIABLES
and MODEL_VARIABLES collections.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
restore: whether the variable should be added to the
VARIABLES_TO_RESTORE collection.
Returns:
The created or existing variable.
"""
collections = list(collections or [])
# Make sure variables are added to tf.GraphKeys.VARIABLES and MODEL_VARIABLES
collections += [tf.GraphKeys.VARIABLES, MODEL_VARIABLES]
# Add to VARIABLES_TO_RESTORE if necessary
if restore:
collections.append(VARIABLES_TO_RESTORE)
# Remove duplicates
collections = set(collections)
# Get the device for the variable.
with tf.device(variable_device(device, name)):
return tf.get_variable(name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable, collections=collections)
| {
"content_hash": "5fc60f69fb4e4e866246ff775ae18f2c",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 80,
"avg_line_length": 35.14855072463768,
"alnum_prop": 0.6836408617668281,
"repo_name": "nathansilberman/models",
"id": "d58bb5328d9a20fdfb0a2330156af68e2e977aa5",
"size": "10374",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "inception/inception/slim/variables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "604484"
},
{
"name": "Jupyter Notebook",
"bytes": "41812"
},
{
"name": "Makefile",
"bytes": "6049"
},
{
"name": "Protocol Buffer",
"bytes": "10515"
},
{
"name": "Python",
"bytes": "1082505"
},
{
"name": "Shell",
"bytes": "31462"
}
],
"symlink_target": ""
} |
from beltac import *
from beltacreader import load
from inserter import insert,version_imported,reject,setRefsDict,simple_dict_insert
import urllib2
from datetime import datetime,timedelta
import logging
import zipfile
from cStringIO import StringIO
from bs4 import BeautifulSoup
logger = logging.getLogger("importer")
url = 'http://beltac.tec-wl.be'
path = '/Current%20BLTAC/'
def getDataSource():
return { '1' : {
'operator_id' : 'TEC',
'name' : 'TEC beltec leveringen',
'description' : 'TEC beltec leveringen',
'email' : None,
'url' : None}}
def import_zip(path,filename,meta):
zip = zipfile.ZipFile(path+'/'+filename,'r')
count = 0
for name in zip.namelist():
unitcode = name.split('.')[0]
import_subzip(StringIO(zip.read(name)),filename,unitcode,remove_old=count==0)
count += 1
def import_subzip(zip,versionname,unitcode,remove_old=False):
meta,conn = load(zip)
conn.commit()
try:
data = {}
data['OPERATOR'] = {'TEC' : {'url' : 'http://www.infotec.be',
'language' : 'nl',
'phone' : '0',
'timezone' : 'Europe/Amsterdam',
'operator_id' : 'TEC',
'name' : 'TEC',
'privatecode' : 'TEC'}}
data['MERGESTRATEGY'] = []
if remove_old:
data['MERGESTRATEGY'].append({'type' : 'DATASOURCE', 'datasourceref' : '1'})
data['DATASOURCE'] = getDataSource()
data['VERSION'] = {}
data['VERSION']['1'] = getVersion(conn,versionname,prefix='TEC')
data['DESTINATIONDISPLAY'] = getDestinationDisplays(conn,prefix='TEC')
data['LINE'] = getLines(conn,prefix='TEC',operatorref='TEC',unitcode=unitcode[3:])
data['STOPPOINT'] = getStopPoints(conn,prefix='TEC')
data['STOPAREA'] = getStopAreas(conn,prefix='TEC')
data['AVAILABILITYCONDITION'] = getAvailabilityConditions(conn,prefix='TEC',unitcode=unitcode[3:])
data['PRODUCTCATEGORY'] = getProductCategories(conn)
data['ADMINISTRATIVEZONE'] = {}
timedemandGroupRefForJourney,data['TIMEDEMANDGROUP'] = calculateTimeDemandGroups(conn,prefix='TEC',unitcode=unitcode[3:])
routeRefForPattern,data['ROUTE'] = clusterPatternsIntoRoute(conn,getFakePool,prefix='TEC',unitcode=unitcode[3:])
data['JOURNEYPATTERN'] = getJourneyPatterns(routeRefForPattern,conn,data['ROUTE'],prefix='TEC')
data['JOURNEY'] = getJourneys(timedemandGroupRefForJourney,conn,prefix='TEC',unitcode=unitcode[3:])
data['NOTICEASSIGNMENT'] = getNoticeAssignments(conn,prefix='TEC')
data['NOTICE'] = getNotices(conn,prefix='TEC')
data['NOTICEGROUP'] = getNoticeGroups(conn,prefix='TEC')
conn.close()
insert(data)
except:
raise
def download(url,filename):
u = urllib2.urlopen(url)
f = open('/tmp/'+filename, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (filename, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
print
f.close()
import_zip('/tmp',filename,None)
def sync():
f = urllib2.urlopen(url+'/'+path)
soup = BeautifulSoup(f.read())
files = []
for link in soup.find_all('a'):
link = link.get('href')
filename = urllib2.unquote(link).split('/')[-1]
if '.zip' in link.lower():
if not version_imported('TEC:'+filename):
files.append((link,filename))
for link,filename in sorted(files):
try:
print 'FILE '+filename
logger.info('Importing :'+filename)
download(url+link,filename)
except Exception as e:
logger.error(filename,exc_info=True)
pass
| {
"content_hash": "9aa3872494851290031c82841d70dfaf",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 129,
"avg_line_length": 39.39090909090909,
"alnum_prop": 0.5735056542810986,
"repo_name": "bliksemlabs/bliksemintegration",
"id": "2a8bdec5bd1ad5111c6e522e53d86a6046d96cbd",
"size": "4333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "importers/tec.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PLSQL",
"bytes": "4719"
},
{
"name": "PLpgSQL",
"bytes": "15144"
},
{
"name": "Python",
"bytes": "494673"
},
{
"name": "Shell",
"bytes": "438"
}
],
"symlink_target": ""
} |
import itertools
import json
import re
import subprocess
import xml.etree.ElementTree as ET
from collections import defaultdict
from dataclasses import dataclass, field
from pathlib import Path
from typing import Set
RESULT = Path(__file__).parent / "../corpus/aosp.json"
DOWNLOADS = Path(__file__).parent / "../repos"
# List below generated by running the following snippet in the DevTools console
# on the following page:
# https://android.googlesource.com/
# console.log(Array.from(document.querySelectorAll(
# "a.RepoList-item[href*='/platform/packages/apps/']"
# )).map(a=>` (
# "${a.href.split('/').reverse()[1]}",
# "${a.href}",
# ),`).join('\n'))
APP_GIT_REPOS = [
(
"AccountsAndSyncSettings",
"https://android.googlesource.com/platform/packages/apps/AccountsAndSyncSettings/", # noqa: E501
),
(
"AlarmClock",
"https://android.googlesource.com/platform/packages/apps/AlarmClock/",
),
(
"BasicSmsReceiver",
"https://android.googlesource.com/platform/packages/apps/BasicSmsReceiver/",
),
(
"Benchmark",
"https://android.googlesource.com/platform/packages/apps/Benchmark/",
),
(
"Bluetooth",
"https://android.googlesource.com/platform/packages/apps/Bluetooth/",
),
(
"Browser",
"https://android.googlesource.com/platform/packages/apps/Browser/",
),
(
"Browser2",
"https://android.googlesource.com/platform/packages/apps/Browser2/",
),
(
"Calculator",
"https://android.googlesource.com/platform/packages/apps/Calculator/",
),
(
"Calendar",
"https://android.googlesource.com/platform/packages/apps/Calendar/",
),
(
"Camera",
"https://android.googlesource.com/platform/packages/apps/Camera/",
),
(
"Camera2",
"https://android.googlesource.com/platform/packages/apps/Camera2/",
),
(
"Calendar",
"https://android.googlesource.com/platform/packages/apps/Car/Calendar/",
),
(
"Cluster",
"https://android.googlesource.com/platform/packages/apps/Car/Cluster/",
),
(
"CompanionDeviceSupport",
"https://android.googlesource.com/platform/packages/apps/Car/CompanionDeviceSupport/", # noqa: E501
),
(
"Dialer",
"https://android.googlesource.com/platform/packages/apps/Car/Dialer/",
),
(
"externallibs",
"https://android.googlesource.com/platform/packages/apps/Car/externallibs/",
),
(
"Hvac",
"https://android.googlesource.com/platform/packages/apps/Car/Hvac/",
),
(
"LatinIME",
"https://android.googlesource.com/platform/packages/apps/Car/LatinIME/",
),
(
"Launcher",
"https://android.googlesource.com/platform/packages/apps/Car/Launcher/",
),
(
"LensPicker",
"https://android.googlesource.com/platform/packages/apps/Car/LensPicker/",
),
(
"libs",
"https://android.googlesource.com/platform/packages/apps/Car/libs/",
),
(
"LinkViewer",
"https://android.googlesource.com/platform/packages/apps/Car/LinkViewer/",
),
(
"LocalMediaPlayer",
"https://android.googlesource.com/platform/packages/apps/Car/LocalMediaPlayer/",
),
(
"Media",
"https://android.googlesource.com/platform/packages/apps/Car/Media/",
),
(
"Messenger",
"https://android.googlesource.com/platform/packages/apps/Car/Messenger/",
),
(
"Notification",
"https://android.googlesource.com/platform/packages/apps/Car/Notification/",
),
(
"Overview",
"https://android.googlesource.com/platform/packages/apps/Car/Overview/",
),
(
"Provision",
"https://android.googlesource.com/platform/packages/apps/Car/Provision/",
),
(
"Radio",
"https://android.googlesource.com/platform/packages/apps/Car/Radio/",
),
(
"RotaryController",
"https://android.googlesource.com/platform/packages/apps/Car/RotaryController/",
),
(
"Settings",
"https://android.googlesource.com/platform/packages/apps/Car/Settings/",
),
(
"Stream",
"https://android.googlesource.com/platform/packages/apps/Car/Stream/",
),
(
"SystemUpdater",
"https://android.googlesource.com/platform/packages/apps/Car/SystemUpdater/",
),
(
"Templates",
"https://android.googlesource.com/platform/packages/apps/Car/Templates/",
),
(
"tests",
"https://android.googlesource.com/platform/packages/apps/Car/tests/",
),
(
"UserManagement",
"https://android.googlesource.com/platform/packages/apps/Car/UserManagement/",
),
(
"CarrierConfig",
"https://android.googlesource.com/platform/packages/apps/CarrierConfig/",
),
(
"CellBroadcastReceiver",
"https://android.googlesource.com/platform/packages/apps/CellBroadcastReceiver/",
),
(
"CertInstaller",
"https://android.googlesource.com/platform/packages/apps/CertInstaller/",
),
(
"Contacts",
"https://android.googlesource.com/platform/packages/apps/Contacts/",
),
(
"ContactsCommon",
"https://android.googlesource.com/platform/packages/apps/ContactsCommon/",
),
(
"DeskClock",
"https://android.googlesource.com/platform/packages/apps/DeskClock/",
),
(
"DevCamera",
"https://android.googlesource.com/platform/packages/apps/DevCamera/",
),
(
"Dialer",
"https://android.googlesource.com/platform/packages/apps/Dialer/",
),
(
"DocumentsUI",
"https://android.googlesource.com/platform/packages/apps/DocumentsUI/",
),
(
"Email",
"https://android.googlesource.com/platform/packages/apps/Email/",
),
(
"EmergencyInfo",
"https://android.googlesource.com/platform/packages/apps/EmergencyInfo/",
),
(
"ExactCalculator",
"https://android.googlesource.com/platform/packages/apps/ExactCalculator/",
),
(
"Exchange",
"https://android.googlesource.com/platform/packages/apps/Exchange/",
),
(
"FMRadio",
"https://android.googlesource.com/platform/packages/apps/FMRadio/",
),
(
"Gallery",
"https://android.googlesource.com/platform/packages/apps/Gallery/",
),
(
"Gallery2",
"https://android.googlesource.com/platform/packages/apps/Gallery2/",
),
(
"Gallery3D",
"https://android.googlesource.com/platform/packages/apps/Gallery3D/",
),
(
"GlobalSearch",
"https://android.googlesource.com/platform/packages/apps/GlobalSearch/",
),
(
"GoogleSearch",
"https://android.googlesource.com/platform/packages/apps/GoogleSearch/",
),
(
"HTMLViewer",
"https://android.googlesource.com/platform/packages/apps/HTMLViewer/",
),
(
"IdentityCredentialSupport",
"https://android.googlesource.com/platform/packages/apps/IdentityCredentialSupport/", # noqa: E501
),
(
"IM",
"https://android.googlesource.com/platform/packages/apps/IM/",
),
(
"ImsServiceEntitlement",
"https://android.googlesource.com/platform/packages/apps/ImsServiceEntitlement/",
),
(
"InCallUI",
"https://android.googlesource.com/platform/packages/apps/InCallUI/",
),
(
"KeyChain",
"https://android.googlesource.com/platform/packages/apps/KeyChain/",
),
(
"Launcher",
"https://android.googlesource.com/platform/packages/apps/Launcher/",
),
(
"Launcher2",
"https://android.googlesource.com/platform/packages/apps/Launcher2/",
),
(
"Launcher3",
"https://android.googlesource.com/platform/packages/apps/Launcher3/",
),
(
"LegacyCamera",
"https://android.googlesource.com/platform/packages/apps/LegacyCamera/",
),
(
"ManagedProvisioning",
"https://android.googlesource.com/platform/packages/apps/ManagedProvisioning/",
),
(
"McLauncher",
"https://android.googlesource.com/platform/packages/apps/McLauncher/",
),
(
"Messaging",
"https://android.googlesource.com/platform/packages/apps/Messaging/",
),
(
"Mms",
"https://android.googlesource.com/platform/packages/apps/Mms/",
),
(
"Music",
"https://android.googlesource.com/platform/packages/apps/Music/",
),
(
"MusicFX",
"https://android.googlesource.com/platform/packages/apps/MusicFX/",
),
(
"Nfc",
"https://android.googlesource.com/platform/packages/apps/Nfc/",
),
(
"OnDeviceAppPrediction",
"https://android.googlesource.com/platform/packages/apps/OnDeviceAppPrediction/",
),
(
"OneTimeInitializer",
"https://android.googlesource.com/platform/packages/apps/OneTimeInitializer/",
),
(
"PackageInstaller",
"https://android.googlesource.com/platform/packages/apps/PackageInstaller/",
),
(
"Phone",
"https://android.googlesource.com/platform/packages/apps/Phone/",
),
(
"PhoneCommon",
"https://android.googlesource.com/platform/packages/apps/PhoneCommon/",
),
(
"Protips",
"https://android.googlesource.com/platform/packages/apps/Protips/",
),
(
"Provision",
"https://android.googlesource.com/platform/packages/apps/Provision/",
),
(
"QuickAccessWallet",
"https://android.googlesource.com/platform/packages/apps/QuickAccessWallet/",
),
(
"QuickSearchBox",
"https://android.googlesource.com/platform/packages/apps/QuickSearchBox/",
),
(
"RemoteProvisioner",
"https://android.googlesource.com/platform/packages/apps/RemoteProvisioner/",
),
(
"RetailDemo",
"https://android.googlesource.com/platform/packages/apps/RetailDemo/",
),
(
"SafetyRegulatoryInfo",
"https://android.googlesource.com/platform/packages/apps/SafetyRegulatoryInfo/",
),
(
"SampleLocationAttribution",
"https://android.googlesource.com/platform/packages/apps/SampleLocationAttribution/", # noqa: E501
),
(
"SecureElement",
"https://android.googlesource.com/platform/packages/apps/SecureElement/",
),
(
"Settings",
"https://android.googlesource.com/platform/packages/apps/Settings/",
),
(
"SettingsIntelligence",
"https://android.googlesource.com/platform/packages/apps/SettingsIntelligence/",
),
(
"SmartCardService",
"https://android.googlesource.com/platform/packages/apps/SmartCardService/",
),
(
"SoundRecorder",
"https://android.googlesource.com/platform/packages/apps/SoundRecorder/",
),
(
"SpareParts",
"https://android.googlesource.com/platform/packages/apps/SpareParts/",
),
(
"SpeechRecorder",
"https://android.googlesource.com/platform/packages/apps/SpeechRecorder/",
),
(
"Stk",
"https://android.googlesource.com/platform/packages/apps/Stk/",
),
(
"StorageManager",
"https://android.googlesource.com/platform/packages/apps/StorageManager/",
),
(
"Sync",
"https://android.googlesource.com/platform/packages/apps/Sync/",
),
(
"Tag",
"https://android.googlesource.com/platform/packages/apps/Tag/",
),
(
"Terminal",
"https://android.googlesource.com/platform/packages/apps/Terminal/",
),
(
"connectivity",
"https://android.googlesource.com/platform/packages/apps/Test/connectivity/",
),
(
"ThemePicker",
"https://android.googlesource.com/platform/packages/apps/ThemePicker/",
),
(
"TimeZoneData",
"https://android.googlesource.com/platform/packages/apps/TimeZoneData/",
),
(
"TimeZoneUpdater",
"https://android.googlesource.com/platform/packages/apps/TimeZoneUpdater/",
),
(
"Traceur",
"https://android.googlesource.com/platform/packages/apps/Traceur/",
),
(
"TV",
"https://android.googlesource.com/platform/packages/apps/TV/",
),
(
"TvSettings",
"https://android.googlesource.com/platform/packages/apps/TvSettings/",
),
(
"UnifiedEmail",
"https://android.googlesource.com/platform/packages/apps/UnifiedEmail/",
),
(
"UniversalMediaPlayer",
"https://android.googlesource.com/platform/packages/apps/UniversalMediaPlayer/",
),
(
"Updater",
"https://android.googlesource.com/platform/packages/apps/Updater/",
),
(
"VideoEditor",
"https://android.googlesource.com/platform/packages/apps/VideoEditor/",
),
(
"VoiceDialer",
"https://android.googlesource.com/platform/packages/apps/VoiceDialer/",
),
(
"WallpaperPicker",
"https://android.googlesource.com/platform/packages/apps/WallpaperPicker/",
),
(
"WallpaperPicker2",
"https://android.googlesource.com/platform/packages/apps/WallpaperPicker2/",
),
]
@dataclass
class Source:
apps: Set[str] = field(default_factory=set)
langs: Set[str] = field(default_factory=set)
def main():
download_sources()
strings = defaultdict(Source)
for app, lang, sentences in glob_read_strings_files():
for sentence in sentences:
strings[sentence].apps.add(app)
strings[sentence].langs.add(lang)
with open(RESULT, "w", encoding="utf-8") as fp:
json.dump(
dict(
sorted(
(
string,
{"apps": sorted(source.apps), "langs": sorted(source.langs)},
)
for string, source in strings.items()
)
),
fp,
ensure_ascii=False,
indent=2,
)
def download_sources():
for _name, repo in APP_GIT_REPOS:
folder = DOWNLOADS / Path(repo).name
folder.parent.mkdir(parents=True, exist_ok=True)
if not folder.exists():
git("clone", "--depth", "1", repo, folder)
def glob_read_strings_files():
for name, repo in APP_GIT_REPOS:
folder = DOWNLOADS / Path(repo).name
# Doc: https://developer.android.com/guide/topics/resources/string-resource
for path in folder.glob("**/strings.xml"):
lang = "en"
if match := re.match(r".*values-([^/\\]*)", str(path)):
lang = match.group(1)
# Some locales contain garbage data.
if "en-rXC" in lang:
continue
sentences = []
tree = ET.parse(path)
root = tree.getroot()
for string in itertools.chain(
root.findall(".//string"), root.findall(".//item")
):
for xliff_g in string.findall(
"./{urn:oasis:names:tc:xliff:document:1.2}g"
):
# Replace placeholders with example value if provided, otherwise kill
xliff_g.text = xliff_g.attrib.get("example", "")
s = "".join(string.itertext())
# Unquote. Each string might have several quoted bits
s = "".join(
part[1:-1] if part and (part[0] == part[-1] == '"') else
# Collapse whitespace in unquoted bits
re.sub(r"\s+", " ", part)
# Split string. The "delimiters" are quoted bits, that start
# and end with an unescaped double quote. There's a capturing
# group around the whole expression so that the delimiters
# are kept in the output.
for part in re.split(r'((?<!\\)"(?:[^"]|\\"|\n)*(?<!\\)")', s)
)
# Unescape various things
s = re.sub(r"""\\([@?nt'"]|u[0-9A-Fa-f]{4})""", unescape, s)
# Split by lines and strip each
# We're only interested in continuous lines (no breaks) for
# kerning measurement purposes.
for line in s.split("\n"):
line = line.strip()
if line:
sentences.append(line)
yield name, lang, sentences
def unescape(m):
g = m.group(1)
if g[0] == "u":
return chr(int(g[1:], base=16))
elif g == "n":
return "\n"
elif g == "t":
return "\t"
return g
def git(*args):
"""Execute the given git command and return the output."""
return subprocess.check_output(["git", *args])
if __name__ == "__main__":
main()
| {
"content_hash": "e558bc0f32ac9523d01d531b4cf9e78e",
"timestamp": "",
"source": "github",
"line_count": 572,
"max_line_length": 108,
"avg_line_length": 30.286713286713287,
"alnum_prop": 0.57486723620411,
"repo_name": "googlefonts/aosp-test-texts",
"id": "dd2b0a99c1f9c88b5732276bfd676959fb9a93ca",
"size": "17896",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/extract_strings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
from thrift.Thrift import TMessageType, TApplicationException, TType
def process_main(twisted=False):
"""Decorator for process method."""
def _decorator(func):
def nested(self, iprot, oprot, server_ctx=None):
(name, type, seqid) = iprot.readMessageBegin()
if sys.version_info[0] >= 3:
name = name.decode()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD,
'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
if twisted is True:
from twisted.internet import defer
return defer.succeed(None)
else:
ret = self._processMap[name](self, seqid, iprot, oprot,
server_ctx)
if twisted is True:
return ret
else:
return True
return nested
return _decorator
def process_method(argtype, oneway=False, twisted=False):
"""Decorator for process_xxx methods."""
def _decorator(func):
def nested(self, seqid, iprot, oprot, server_ctx):
fn_name = func.__name__.split('_', 1)[-1]
handler_ctx = self._event_handler.getHandlerContext(fn_name,
server_ctx)
args = argtype()
reply_type = TMessageType.REPLY
self._event_handler.preRead(handler_ctx, fn_name, args)
args.read(iprot)
iprot.readMessageEnd()
self._event_handler.postRead(handler_ctx, fn_name, args)
if twisted is True:
return func(self, args, handler_ctx, seqid, oprot)
elif oneway is True:
func(self, args, handler_ctx)
else:
result = func(self, args, handler_ctx)
if isinstance(result, TApplicationException):
reply_type = TMessageType.EXCEPTION
self._event_handler.preWrite(handler_ctx, fn_name, result)
oprot.writeMessageBegin(fn_name, reply_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
self._event_handler.postWrite(handler_ctx, fn_name, result)
return nested
return _decorator
def write_results_success_callback(func):
"""Decorator for twisted write_results_success_xxx methods.
No need to call func so it can be empty.
"""
def nested(self, success, result, seqid, oprot, handler_ctx):
fn_name = func.__name__.split('_', 3)[-1]
result.success = success
self._event_handler.preWrite(handler_ctx, fn_name, result)
oprot.writeMessageBegin(fn_name, TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
self._event_handler.postWrite(handler_ctx, fn_name, result)
return nested
def write_results_exception_callback(func):
"""Decorator for twisted write_results_exception_xxx methods."""
def nested(self, error, result, seqid, oprot, handler_ctx):
fn_name = func.__name__.split('_', 3)[-1]
# Call the decorated function
reply_type, result = func(self, error, result, handler_ctx)
self._event_handler.preWrite(handler_ctx, fn_name, result)
oprot.writeMessageBegin(fn_name, reply_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
self._event_handler.postWrite(handler_ctx, fn_name, result)
return nested
| {
"content_hash": "d28bca8725bd3bb010ac977275144ebd",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 79,
"avg_line_length": 38.38095238095238,
"alnum_prop": 0.586848635235732,
"repo_name": "soumith/fbthrift",
"id": "7299c8fd2c846bdf3955f3eb639f0a51dc7e532b",
"size": "4816",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "thrift/lib/py/util/Decorators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "161802"
},
{
"name": "C#",
"bytes": "28929"
},
{
"name": "C++",
"bytes": "6236211"
},
{
"name": "D",
"bytes": "667895"
},
{
"name": "Emacs Lisp",
"bytes": "5154"
},
{
"name": "Erlang",
"bytes": "23039"
},
{
"name": "Go",
"bytes": "288742"
},
{
"name": "Hack",
"bytes": "385207"
},
{
"name": "Haskell",
"bytes": "153197"
},
{
"name": "Java",
"bytes": "994432"
},
{
"name": "JavaScript",
"bytes": "4488"
},
{
"name": "LLVM",
"bytes": "11951"
},
{
"name": "Makefile",
"bytes": "14663"
},
{
"name": "OCaml",
"bytes": "32172"
},
{
"name": "Objective-C",
"bytes": "109934"
},
{
"name": "PHP",
"bytes": "246615"
},
{
"name": "Perl",
"bytes": "70682"
},
{
"name": "Protocol Buffer",
"bytes": "585"
},
{
"name": "Python",
"bytes": "904345"
},
{
"name": "Ruby",
"bytes": "323073"
},
{
"name": "Scala",
"bytes": "1266"
},
{
"name": "Shell",
"bytes": "21565"
},
{
"name": "Smalltalk",
"bytes": "22812"
},
{
"name": "TeX",
"bytes": "48707"
},
{
"name": "Thrift",
"bytes": "117234"
},
{
"name": "VimL",
"bytes": "2837"
},
{
"name": "Yacc",
"bytes": "38893"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoisfsite.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "05880bec7a41db36831495a58d7fdd92",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 77,
"avg_line_length": 26,
"alnum_prop": 0.717948717948718,
"repo_name": "letuananh/intsem.fx",
"id": "6502be787353cc0b8f07048bfbafbac96cfa06ee",
"size": "256",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev-0.2.3",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4388639"
},
{
"name": "Shell",
"bytes": "3771"
},
{
"name": "TSQL",
"bytes": "3718"
}
],
"symlink_target": ""
} |
import errno
import os
from contextlib import contextmanager
from pants.base.exceptions import TaskError
from pants.task.task import QuietTaskMixin, Task
from pants.util.dirutil import safe_open
from pants.util.meta import classproperty
class ConsoleTask(QuietTaskMixin, Task):
"""A task whose only job is to print information to the console.
ConsoleTasks are not intended to modify build state.
"""
@classproperty
def _register_console_transitivity_option(cls):
"""Some tasks register their own --transitive option, which act differently."""
return True
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--sep", default="\\n", metavar="<separator>", help="String to use to separate results."
)
register(
"--output-file", metavar="<path>", help="Write the console output to this file instead."
)
if cls._register_console_transitivity_option:
register(
"--transitive",
type=bool,
default=False,
fingerprint=True,
help="If True, use all targets in the build graph, else use only target roots.",
)
@property
def act_transitively(self):
# `Task` defaults to returning `True` in `act_transitively`, so we keep that default value.
return self.get_options().get("transitive", True)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._console_separator = self.get_options().sep.encode().decode("unicode_escape")
if self.get_options().output_file:
try:
self._outstream = safe_open(os.path.abspath(self.get_options().output_file), "wb")
except IOError as e:
raise TaskError(
"Error opening stream {out_file} due to"
" {error_str}".format(out_file=self.get_options().output_file, error_str=e)
)
else:
self._outstream = self.context.console_outstream
@contextmanager
def _guard_sigpipe(self):
try:
yield
except IOError as e:
# If the pipeline only wants to read so much, that's fine; otherwise, this error is probably
# legitimate.
if e.errno != errno.EPIPE:
raise e
def execute(self):
with self._guard_sigpipe():
try:
targets = self.get_targets() if self.act_transitively else self.context.target_roots
for value in self.console_output(targets) or tuple():
self._outstream.write(value.encode())
self._outstream.write(self._console_separator.encode())
finally:
self._outstream.flush()
if self.get_options().output_file:
self._outstream.close()
def console_output(self, targets):
raise NotImplementedError("console_output must be implemented by subclasses of ConsoleTask")
| {
"content_hash": "d2073a9d0c271a83b95f52bbd241ebd2",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 104,
"avg_line_length": 37.433734939759034,
"alnum_prop": 0.5947859671709044,
"repo_name": "wisechengyi/pants",
"id": "8ebc24a6583e92303436068ec74e4d2cc3722e3f",
"size": "3239",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/python/pants/task/console_task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "6634"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "44381"
},
{
"name": "Java",
"bytes": "507948"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "7608990"
},
{
"name": "Rust",
"bytes": "1005243"
},
{
"name": "Scala",
"bytes": "106520"
},
{
"name": "Shell",
"bytes": "105217"
},
{
"name": "Starlark",
"bytes": "489739"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
import json
import requests
from hanlder import RequestHandler as BaseRequestHandler
import tornado.web
from ..utils import deal_errors, get_local_time
from .forms import BlogWriterForm
class RequestHandler(BaseRequestHandler):
def q(self, query_string, query_variables={},headers={}):
sid = self.get_secure_cookie('SID')
url = "http://127.0.0.1:3000/graphql"
if sid:
if 'Authorization' not in headers:
headers['Authorization'] = 'OOC ' + sid.decode()
s = requests.Session()
r = s.post(url, json={"query": query_string, "variables": query_variables}, headers=headers)
print("r.text=",r.text)
return r.json()
class IndexHandler(RequestHandler):
BLOG_LIST_QUERY = '''
query Blogs(
$first: Int
$sort_by: String
$sort_direction: String
$after: String
){
blog{id,...F1}
}
fragment F1 on BlogApi{
articles(
first: $first
sort_by: $sort_by
sort_direction: $sort_direction
after: $after
) {
edges {
node {
id
author {
nickname
}
title
body
body_markup
user_id
updated
uid
tags {
name
}
}
}
pageInfo {
hasPreviousPage
startCursor
endCursor
hasNextPage
}
}
}
'''
def get(self):
bloglist_query_variables = {
"first": self.get_argument("first", 4),
"sort_by": "updated",
"sort_direction": "desc",
"after": self.get_argument("after", "")
}
bloglist_query_variables = json.dumps(bloglist_query_variables)
r = self.q(self.BLOG_LIST_QUERY, bloglist_query_variables)
print("index==>blog_list",r)
blog_list = r.get("data").get("blog").get("articles").get("edges")
self.render('index.html', blog_list=blog_list, get_local_time=get_local_time )
class BlogShowHandler(RequestHandler):
BLOG_SHOW_QUERY = '''
query Blog(
$uid: String!
){
blog{id,...F1}
}
fragment F1 on BlogApi{
article: article_u (
uid: $uid
) {
title
body: body_html
tags {
name
}
}
}
'''
def get(self, UID):
blogshow_query_variables = {
"uid": UID,
}
blogshow_query_variables = json.dumps(blogshow_query_variables)
r = self.q(self.BLOG_SHOW_QUERY, blogshow_query_variables)
# print('r--->',r)
blog = r.get("data").get("blog").get("article")
self.render('blog/blog_show.html', blog=blog)
| {
"content_hash": "ec998aebcb44eb1c120bead216666f1a",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 100,
"avg_line_length": 24.267857142857142,
"alnum_prop": 0.5342163355408388,
"repo_name": "nuanri/hiblog",
"id": "7b439a13bf2a49997d7235ce2774aa1317bb8fc8",
"size": "2735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/app/blog/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "308893"
},
{
"name": "HTML",
"bytes": "121225"
},
{
"name": "JavaScript",
"bytes": "457295"
},
{
"name": "Python",
"bytes": "52385"
}
],
"symlink_target": ""
} |
import platform
from support import Group
from support import meta_service
from clastic import Application
from clastic import render_basic
PORT = 8888
META_PORT = 8889
def home_handler():
return 'Welcome to SuPPort!'
def main():
app = Application([('/', home_handler, render_basic)])
meta_app = meta_service.create_meta_app()
wsgi_apps = [(app, ('0.0.0.0', PORT), False),
(meta_app, ('0.0.0.0', META_PORT), False)]
if platform.system() == 'Windows':
group = Group(wsgi_apps=wsgi_apps)
else:
group = Group(wsgi_apps=wsgi_apps,
prefork=True,
num_workers=2,
daemonize=True)
group.serve_forever()
if __name__ == '__main__':
main()
| {
"content_hash": "c1fa596a9a7fe0080b117ccd43c6f07f",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 59,
"avg_line_length": 23.272727272727273,
"alnum_prop": 0.5768229166666666,
"repo_name": "paypal/support",
"id": "33af43dca6093a23c8355f9685fe578ab52c20bf",
"size": "769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/basic_wsgi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1332"
},
{
"name": "Jupyter Notebook",
"bytes": "62314"
},
{
"name": "Python",
"bytes": "200095"
}
],
"symlink_target": ""
} |
"""
Unit tests for pyspark.sql; additional tests are implemented as doctests in
individual modules.
"""
import os
import sys
import pydoc
import shutil
import tempfile
import pickle
import functools
import time
import datetime
import py4j
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark.sql import SQLContext, HiveContext, Column, Row
from pyspark.sql.types import *
from pyspark.sql.types import UserDefinedType, _infer_type
from pyspark.tests import ReusedPySparkTestCase
from pyspark.sql.functions import UserDefinedFunction, sha2
from pyspark.sql.window import Window
from pyspark.sql.utils import AnalysisException, IllegalArgumentException
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
class SQLContextTests(ReusedPySparkTestCase):
def test_get_or_create(self):
sqlCtx = SQLContext.getOrCreate(self.sc)
self.assertTrue(SQLContext.getOrCreate(self.sc) is sqlCtx)
def test_new_session(self):
sqlCtx = SQLContext.getOrCreate(self.sc)
sqlCtx.setConf("test_key", "a")
sqlCtx2 = sqlCtx.newSession()
sqlCtx2.setConf("test_key", "b")
self.assertEqual(sqlCtx.getConf("test_key", ""), "a")
self.assertEqual(sqlCtx2.getConf("test_key", ""), "b")
class SQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.sqlCtx = SQLContext(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
rdd = cls.sc.parallelize(cls.testData, 2)
cls.df = rdd.toDF()
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_row_should_be_read_only(self):
row = Row(a=1, b=2)
self.assertEqual(1, row.a)
def foo():
row.a = 3
self.assertRaises(Exception, foo)
row2 = self.sqlCtx.range(10).first()
self.assertEqual(0, row2.id)
def foo2():
row2.id = 2
self.assertRaises(Exception, foo2)
def test_range(self):
self.assertEqual(self.sqlCtx.range(1, 1).count(), 0)
self.assertEqual(self.sqlCtx.range(1, 0, -1).count(), 1)
self.assertEqual(self.sqlCtx.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.sqlCtx.range(-2).count(), 0)
self.assertEqual(self.sqlCtx.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.sqlCtx.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_explode(self):
from pyspark.sql.functions import explode
d = [Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"})]
rdd = self.sc.parallelize(d)
data = self.sqlCtx.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
def test_and_in_expression(self):
self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
self.assertRaises(ValueError, lambda: not self.df.key == 1)
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.sqlCtx.createDataFrame(rdd)
class PlusFour:
def __call__(self, col):
if col is not None:
return col + 4
call = PlusFour()
pudf = UserDefinedFunction(call, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf_with_partial_function(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.sqlCtx.createDataFrame(rdd)
def some_func(col, param):
if col is not None:
return col + param
pfunc = functools.partial(some_func, param=4)
pudf = UserDefinedFunction(pfunc, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf(self):
self.sqlCtx.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.sqlCtx.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf2(self):
self.sqlCtx.registerFunction("strlen", lambda string: len(string), IntegerType())
self.sqlCtx.createDataFrame(self.sc.parallelize([Row(a="test")])).registerTempTable("test")
[res] = self.sqlCtx.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
def test_udf_with_array_type(self):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
self.sqlCtx.createDataFrame(rdd).registerTempTable("test")
self.sqlCtx.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType()))
self.sqlCtx.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.sqlCtx.sql("select copylist(l), maplen(d) from test").collect()
self.assertEqual(list(range(3)), l1)
self.assertEqual(1, l2)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.sqlCtx.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.sqlCtx.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.sqlCtx.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.sqlCtx.jsonRDD(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist()
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
df.registerTempTable("temp")
df = self.sqlCtx.sql("select foo from temp")
df.count()
df.collect()
def test_apply_schema_to_row(self):
df = self.sqlCtx.jsonRDD(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.sqlCtx.createDataFrame(df.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.sqlCtx.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.sqlCtx.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.sqlCtx.createDataFrame(rdd)
self.assertEqual([], df.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.map(lambda r: r.s).collect())
df.registerTempTable("test")
result = self.sqlCtx.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.sqlCtx.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.map(lambda r: r.s).collect())
df2.registerTempTable("test2")
result = self.sqlCtx.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.sqlCtx.inferSchema(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.sqlCtx.inferSchema(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.sqlCtx.inferSchema(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.sqlCtx.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_select_null_literal(self):
df = self.sqlCtx.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.sqlCtx.createDataFrame(rdd, schema)
results = df.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1, x.date1,
x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
df.registerTempTable("table2")
r = self.sqlCtx.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
from pyspark.sql.types import _parse_schema_abstract, _infer_schema_type
rdd = self.sc.parallelize([(127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3])])
abstract = "byte1 short1 float1 time1 map1{} struct1(b) list1[]"
schema = _parse_schema_abstract(abstract)
typedSchema = _infer_schema_type(rdd.first(), schema)
df = self.sqlCtx.createDataFrame(rdd, typedSchema)
r = (127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1), {"a": 1}, Row(b=2), [1, 2, 3])
self.assertEqual(r, tuple(df.first()))
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
df.registerTempTable("test")
row = self.sqlCtx.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _verify_type
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.sqlCtx._ssql_ctx.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_verify_type(ExamplePoint(1.0, 2.0), ExamplePointUDT())
self.assertRaises(ValueError, lambda: _verify_type([1.0, 2.0], ExamplePointUDT()))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_verify_type(PythonOnlyPoint(1.0, 2.0), PythonOnlyUDT())
self.assertRaises(ValueError, lambda: _verify_type([1.0, 2.0], PythonOnlyUDT()))
def test_infer_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.sqlCtx.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
df.registerTempTable("labeled_point")
point = self.sqlCtx.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.sqlCtx.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
df.registerTempTable("labeled_point")
point = self.sqlCtx.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.sqlCtx.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.sqlCtx.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.sqlCtx.createDataFrame([row])
self.assertEqual(1.0, df.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.sqlCtx.createDataFrame([row])
self.assertEqual(1.0, df.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.sqlCtx.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.sqlCtx.parquetFile(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.sqlCtx.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.sqlCtx.parquetFile(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_column_operators(self):
ci = self.df.key
cs = self.df.value
c = ci == cs
self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))
css = cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(), cs.startswith('a'), cs.endswith('a')
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())
self.assertEqual(self.testData, df.select(df.key, df.value).collect())
self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr("a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov("a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab("a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_struct_type(self):
from pyspark.sql.types import StructType, StringType, StructField
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
try:
struct1 = StructType().add("name")
self.assertEqual(1, 0)
except ValueError:
self.assertEqual(1, 1)
def test_save_and_load(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.sqlCtx.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.sqlCtx.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.json(tmpPath, "overwrite")
actual = self.sqlCtx.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.save(format="json", mode="overwrite", path=tmpPath,
noUse="this options will not be used in save.")
actual = self.sqlCtx.read.load(format="json", path=tmpPath,
noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.sqlCtx.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.sqlCtx.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.sqlCtx.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.sqlCtx.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_save_and_load_builder(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.sqlCtx.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.sqlCtx.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.mode("overwrite").json(tmpPath)
actual = self.sqlCtx.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.mode("overwrite").options(noUse="this options will not be used in save.")\
.option("noUse", "this option will not be used in save.")\
.format("json").save(path=tmpPath)
actual =\
self.sqlCtx.read.format("json")\
.load(path=tmpPath, noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.sqlCtx.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.sqlCtx.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.sqlCtx.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.sqlCtx.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.sqlCtx.jsonRDD(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_access_column(self):
df = self.df
self.assertTrue(isinstance(df.key, Column))
self.assertTrue(isinstance(df['key'], Column))
self.assertTrue(isinstance(df[0], Column))
self.assertRaises(IndexError, lambda: df[2])
self.assertRaises(AnalysisException, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])
def test_column_name_with_non_ascii(self):
df = self.sqlCtx.createDataFrame([(1,)], ["数量"])
self.assertEqual(StructType([StructField("数量", LongType(), True)]), df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", 'bigint')], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_field_accessor(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.r["a"]).first()[0])
self.assertEqual(1, df.select(df["r.a"]).first()[0])
self.assertEqual("b", df.select(df.r["b"]).first()[0])
self.assertEqual("b", df.select(df["r.b"]).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.saveAsParquetFile(output_dir)
df1 = self.sqlCtx.parquetFile(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.sqlCtx.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.sqlCtx.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.sql.tests import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.sqlCtx.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.sqlCtx.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.sqlCtx.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.sqlCtx.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.sqlCtx.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.sqlCtx.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.sqlCtx.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.sqlCtx.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.sqlCtx.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.sqlCtx.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.sqlCtx.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.sqlCtx.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.sqlCtx.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.sqlCtx.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.sqlCtx.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# fillna shouldn't change non-null values
row = self.sqlCtx.createDataFrame([(u'Alice', 10, 80.1)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.sqlCtx.createDataFrame([(u'Alice', None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.sqlCtx.createDataFrame([(u'Alice', None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with string
row = self.sqlCtx.createDataFrame([(None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.sqlCtx.createDataFrame(
[(None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
# fillna with subset specified for numeric cols
row = self.sqlCtx.createDataFrame(
[(None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
def test_bitwise_operations(self):
from pyspark.sql import functions
row = Row(a=170, b=75)
df = self.sqlCtx.createDataFrame([row])
result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()
self.assertEqual(170 & 75, result['(a & b)'])
result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()
self.assertEqual(170 | 75, result['(a | b)'])
result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()
self.assertEqual(170 ^ 75, result['(a ^ b)'])
result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()
self.assertEqual(~75, result['~b'])
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.sqlCtx.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["'length(a)"])
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.sqlCtx.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.sqlCtx.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.sqlCtx.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.sqlCtx.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.sqlCtx.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.sqlCtx.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.sqlCtx.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
def test_capture_analysis_exception(self):
self.assertRaises(AnalysisException, lambda: self.sqlCtx.sql("select abc"))
self.assertRaises(AnalysisException, lambda: self.df.selectExpr("a + b"))
# RuntimeException should not be captured
self.assertRaises(py4j.protocol.Py4JJavaError, lambda: self.sqlCtx.sql("abc"))
def test_capture_illegalargument_exception(self):
self.assertRaisesRegexp(IllegalArgumentException, "Setting negative mapred.reduce.tasks",
lambda: self.sqlCtx.sql("SET mapred.reduce.tasks=-1"))
df = self.sqlCtx.createDataFrame([(1, 2)], ["a", "b"])
self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values",
lambda: df.select(sha2(df.a, 1024)).collect())
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.sqlCtx.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.sqlCtx.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.join(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
except TypeError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
os.unlink(cls.tempdir.name)
_scala_HiveContext =\
cls.sc._jvm.org.apache.spark.sql.hive.test.TestHiveContext(cls.sc._jsc.sc())
cls.sqlCtx = HiveContext(cls.sc, _scala_HiveContext)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.sqlCtx.createExternalTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.sqlCtx.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.sqlCtx.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.sqlCtx.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.sqlCtx.createExternalTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.sqlCtx.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.sqlCtx.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.sqlCtx.sql("DROP TABLE savedJsonTable")
self.sqlCtx.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.sqlCtx.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.sqlCtx.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.sqlCtx.createExternalTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.sqlCtx.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.sqlCtx.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.sqlCtx.sql("DROP TABLE savedJsonTable")
self.sqlCtx.sql("DROP TABLE externalJsonTable")
self.sqlCtx.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.sqlCtx.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.rowNumber().over(w),
F.rank().over(w),
F.denseRank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.sqlCtx.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.rowNumber().over(w),
F.rank().over(w),
F.denseRank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "65d7e1c7fcc50da158aa39473656fa88",
"timestamp": "",
"source": "github",
"line_count": 1207,
"max_line_length": 100,
"avg_line_length": 43.34051367025683,
"alnum_prop": 0.5859458632818474,
"repo_name": "pronix/spark",
"id": "f465e1fa209419478f10b6d7944a008e0f1f72ba",
"size": "53147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyspark/sql/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "26730"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "15314"
},
{
"name": "Groff",
"bytes": "5379"
},
{
"name": "Java",
"bytes": "1736009"
},
{
"name": "JavaScript",
"bytes": "69325"
},
{
"name": "Makefile",
"bytes": "7767"
},
{
"name": "Python",
"bytes": "1625694"
},
{
"name": "R",
"bytes": "474164"
},
{
"name": "Scala",
"bytes": "14642450"
},
{
"name": "Shell",
"bytes": "140679"
},
{
"name": "Thrift",
"bytes": "2016"
}
],
"symlink_target": ""
} |
from trex_stl_lib.api import *
from trex_stl_lib.utils import parsing_opts, text_tables
import threading
import tempfile
import select
from distutils import spawn
from subprocess import Popen
import subprocess
# defines a generic monitor writer
class CaptureMonitorWriter(object):
def deinit(self):
# by default - nothing to deinit
pass
def handle_pkts (self, pkts):
raise NotImplementedError
def periodic_check (self):
# by default - nothing to check
pass
# a stdout monitor
class CaptureMonitorWriterStdout(CaptureMonitorWriter):
def __init__ (self, logger, is_brief, start_ts):
self.logger = logger
self.is_brief = is_brief
self.start_ts = start_ts
# unicode arrows
self.RX_ARROW = u'\u25c0\u2500\u2500'
self.TX_ARROW = u'\u2500\u2500\u25b6'
# decode issues with Python 2
if sys.version_info < (3,0):
self.RX_ARROW = self.RX_ARROW.encode('utf-8')
self.TX_ARROW = self.TX_ARROW.encode('utf-8')
self.logger.pre_cmd("Starting stdout capture monitor - verbose: '{0}'".format('low' if self.is_brief else 'high'))
self.logger.post_cmd(RC_OK())
self.logger.log(format_text("\n*** use 'capture monitor stop' to abort capturing... ***\n", 'bold'))
def get_scapy_name (self, pkt_scapy):
layer = pkt_scapy
while layer.payload and layer.payload.name not in('Padding', 'Raw'):
layer = layer.payload
return layer.name
def format_origin (self, origin):
if origin == 'RX':
return '{0} {1}'.format(self.RX_ARROW, 'RX')
elif origin == 'TX':
return '{0} {1}'.format(self.TX_ARROW, 'TX')
else:
return '{0}'.format(origin)
def __handle_pkt (self, pkt):
pkt_bin = base64.b64decode(pkt['binary'])
pkt_scapy = Ether(pkt_bin)
self.logger.log(format_text('\n\n#{} Port: {} {}\n'.format(pkt['index'], pkt['port'], self.format_origin(pkt['origin'])), 'bold', ''))
self.logger.log(format_text(' Type: {}, Size: {} B, TS: {:.2f} [sec]\n'.format(self.get_scapy_name(pkt_scapy), len(pkt_bin), pkt['ts'] - self.start_ts), 'bold'))
if self.is_brief:
self.logger.log(' {0}'.format(pkt_scapy.command()))
else:
pkt_scapy.show(label_lvl = ' ')
self.logger.log('')
return len(pkt_bin)
def handle_pkts (self, pkts):
try:
byte_count = 0
for pkt in pkts:
byte_count += self.__handle_pkt(pkt)
return byte_count
finally:
# make sure to restore the logger
self.logger.prompt_redraw()
# a pipe based monitor
class CaptureMonitorWriterPipe(CaptureMonitorWriter):
def __init__ (self, logger, start_ts):
self.logger = logger
self.fifo = None
self.start_ts = start_ts
# generate a temp fifo pipe
self.fifo_name = tempfile.mktemp()
self.wireshark_pid = None
try:
self.logger.pre_cmd('Starting pipe capture monitor')
os.mkfifo(self.fifo_name)
self.logger.post_cmd(RC_OK())
# try to locate wireshark on the machine
self.wireshark_exe = self.locate_wireshark()
# we found wireshark - try to launch a process
if self.wireshark_exe:
self.wireshark_pid = self.launch_wireshark()
# did we succeed ?
if not self.wireshark_pid:
self.logger.log(format_text("*** Please manually run 'wireshark -k -i {0}' ***".format(self.fifo_name), 'bold'))
# blocks until pipe is connected
self.logger.pre_cmd("Waiting for Wireshark pipe connection")
self.fifo = os.open(self.fifo_name, os.O_WRONLY)
self.logger.post_cmd(RC_OK())
self.logger.log(format_text('\n*** Capture monitoring started ***\n', 'bold'))
# open for write using a PCAP writer
self.writer = RawPcapWriter(self.fifo_name, linktype = 1, sync = True)
self.writer._write_header(None)
# register a poller
self.poll = select.poll()
self.poll.register(self.fifo, select.EPOLLERR)
self.is_init = True
except KeyboardInterrupt as e:
self.deinit()
self.logger.post_cmd(RC_ERR(""))
raise STLError("*** pipe monitor aborted...cleaning up")
except OSError as e:
self.deinit()
self.logger.post_cmd(RC_ERR(""))
raise STLError("failed to create pipe {0}\n{1}".format(self.fifo_name, str(e)))
def locate_wireshark (self):
self.logger.pre_cmd('Trying to locate Wireshark')
wireshark_exe = spawn.find_executable('wireshark')
self.logger.post_cmd(RC_OK() if wireshark_exe else RC_ERR())
if not wireshark_exe:
return None
dumpcap = os.path.join(os.path.dirname(wireshark_exe), 'dumpcap')
self.logger.pre_cmd("Checking permissions on '{}'".format(dumpcap))
if not os.access(dumpcap, os.X_OK):
self.logger.post_cmd(RC_ERR('bad permissions on dumpcap'))
return None
self.logger.post_cmd(RC_OK())
return wireshark_exe
# try to launch wireshark... returns true on success
def launch_wireshark (self):
cmd = '{0} -k -i {1}'.format(self.wireshark_exe, self.fifo_name)
self.logger.pre_cmd("Launching '{0}'".format(cmd))
try:
devnull = open(os.devnull, 'w')
self.wireshark_pid = Popen(cmd.split(),
stdout = devnull,
stderr = devnull,
stdin = subprocess.PIPE,
preexec_fn = os.setpgrp,
close_fds = True)
self.logger.post_cmd(RC_OK())
return True
except OSError as e:
self.wireshark_pid = None
self.logger.post_cmd(RC_ERR())
return False
def deinit (self):
try:
if self.fifo:
os.close(self.fifo)
self.fifo = None
if self.fifo_name:
os.unlink(self.fifo_name)
self.fifo_name = None
except OSError:
pass
def periodic_check (self):
self.check_pipe()
def check_pipe (self):
if self.poll.poll(0):
raise STLError('pipe has been disconnected')
def handle_pkts (self, pkts):
# first check the pipe is alive
self.check_pipe()
return self.handle_pkts_internal(pkts)
def handle_pkts_internal (self, pkts):
byte_count = 0
for pkt in pkts:
pkt_bin = base64.b64decode(pkt['binary'])
ts_sec, ts_usec = sec_split_usec(pkt['ts'] - self.start_ts)
try:
self.writer._write_packet(pkt_bin, sec = ts_sec, usec = ts_usec)
except Exception as e:
raise STLError('fail to write packets to pipe: {}'.format(str(e)))
byte_count += len(pkt_bin)
return byte_count
# capture monitor - a live capture
class CaptureMonitor(object):
def __init__ (self, client, cmd_lock, tx_port_list, rx_port_list, rate_pps, mon_type, bpf_filter):
self.client = client
self.logger = client.logger
self.cmd_lock = cmd_lock
self.t = None
self.writer = None
self.capture_id = None
self.tx_port_list = tx_port_list
self.rx_port_list = rx_port_list
self.rate_pps = rate_pps
self.mon_type = mon_type
self.bpf_filter = bpf_filter
# try to launch
try:
self.__start()
except Exception as e:
self.__stop()
raise
def __start (self):
# create a capture on the server
with self.logger.supress():
data = self.client.start_capture(self.tx_port_list,
self.rx_port_list,
limit = self.rate_pps,
mode = 'cyclic',
bpf_filter = self.bpf_filter)
self.capture_id = data['id']
start_ts = data['ts']
# create a writer
if self.mon_type == 'compact':
self.writer = CaptureMonitorWriterStdout(self.logger, True, start_ts)
elif self.mon_type == 'verbose':
self.writer = CaptureMonitorWriterStdout(self.logger, False, start_ts)
elif self.mon_type == 'pipe':
self.writer = CaptureMonitorWriterPipe(self.logger, start_ts)
else:
raise STLError('Internal error: unknown writer type')
# start the fetching thread
self.t = threading.Thread(target = self.__thread_cb)
self.t.setDaemon(True)
self.active = True
self.t.start()
# internal stop
def __stop (self):
# stop the thread
if self.t and self.t.is_alive():
self.active = False
self.t.join()
self.t = None
# deinit the writer
if self.writer:
self.writer.deinit()
self.writer = None
# user call for stop (adds log)
def stop (self):
self.logger.pre_cmd("Stopping capture monitor")
try:
self.__stop()
except Exception as e:
self.logger.post_cmd(RC_ERR(""))
raise
self.logger.post_cmd(RC_OK())
def get_mon_row (self):
return [self.capture_id,
format_text('ACTIVE' if self.t.is_alive() else 'DEAD', 'bold'),
format_num(self.matched, compact = False),
self.pkt_count,
format_num(self.byte_count, suffix = 'B'),
', '.join([str(x) for x in self.tx_port_list] if self.tx_port_list else '-'),
', '.join([str(x) for x in self.rx_port_list] if self.rx_port_list else '-'),
self.bpf_filter or '-',
]
def is_active (self):
return self.active
def get_capture_id (self):
return self.capture_id
# sleeps with high freq checks for active
def __sleep (self):
for _ in range(5):
if not self.active:
return False
time.sleep(0.1)
return True
def __lock (self):
while True:
rc = self.cmd_lock.acquire(False)
if rc:
return True
if not self.active:
return False
time.sleep(0.1)
def __unlock (self):
self.cmd_lock.release()
def __thread_cb (self):
try:
self.__thread_main_loop()
# common errors
except STLError as e:
self.logger.log(format_text("\n\nMonitor has encountered the following error: '{}'\n".format(e.brief()), 'bold'))
self.logger.log(format_text("\n*** monitor is inactive - please restart the monitor ***\n", 'bold'))
self.logger.prompt_redraw()
# unexpected errors
except Exception as e:
self.logger.log("\n\n*** A fatal internal error has occurred: '{}'\n".format(str(e)))
self.logger.log(format_text("\n*** monitor is inactive - please restart the monitor ***\n", 'bold'))
self.logger.prompt_redraw()
finally:
try: # to remove the capture as best effort
with self.logger.supress():
self.client.stop_capture(self.capture_id)
except:
pass
if self.writer:
self.writer.deinit()
self.writer = None
def __thread_main_loop (self):
self.pkt_count = 0
self.byte_count = 0
while self.active:
# sleep - if interrupt by graceful shutdown - go out
if not self.__sleep():
return
# check that the writer is ok
self.writer.periodic_check()
# try to lock - if interrupt by graceful shutdown - go out
if not self.__lock():
return
try:
if not self.client.is_connected():
raise STLError('client has been disconnected')
rc = self.client._transmit("capture", params = {'command': 'fetch', 'capture_id': self.capture_id, 'pkt_limit': 10})
if not rc:
raise STLError(rc)
finally:
self.__unlock()
# no packets - skip
pkts = rc.data()['pkts']
if not pkts:
continue
byte_count = self.writer.handle_pkts(pkts)
self.pkt_count += len(pkts)
self.byte_count += byte_count
# main class
class CaptureManager(object):
def __init__ (self, client, cmd_lock):
self.c = client
self.cmd_lock = cmd_lock
self.logger = client.logger
self.monitor = None
# install parsers
self.parser = parsing_opts.gen_parser(self, "capture", self.parse_line_internal.__doc__)
self.subparsers = self.parser.add_subparsers(title = "commands", dest="commands")
self.install_record_parser()
self.install_monitor_parser()
# show
self.show_parser = self.subparsers.add_parser('show', help = "show all active captures")
# reset
self.clear_parser = self.subparsers.add_parser('clear', help = "remove all active captures")
# register handlers
self.cmds = {'record': self.parse_record, 'monitor' : self.parse_monitor, 'clear': self.parse_clear, 'show' : self.parse_show}
def install_record_parser (self):
# recording
self.record_parser = self.subparsers.add_parser('record', help = "PCAP recording")
record_sub = self.record_parser.add_subparsers(title = 'commands', dest = 'record_cmd')
self.record_start_parser = record_sub.add_parser('start', help = "starts a new buffered capture")
self.record_stop_parser = record_sub.add_parser('stop', help = "stops an active buffered capture")
# start
self.record_start_parser.add_arg_list(parsing_opts.TX_PORT_LIST,
parsing_opts.RX_PORT_LIST,
parsing_opts.LIMIT,
parsing_opts.BPF_FILTER)
# stop
self.record_stop_parser.add_arg_list(parsing_opts.CAPTURE_ID,
parsing_opts.OUTPUT_FILENAME)
def install_monitor_parser (self):
# monitor
self.monitor_parser = self.subparsers.add_parser('monitor', help = 'live monitoring')
monitor_sub = self.monitor_parser.add_subparsers(title = 'commands', dest = 'mon_cmd')
self.monitor_start_parser = monitor_sub.add_parser('start', help = 'starts a monitor')
self.monitor_stop_parser = monitor_sub.add_parser('stop', help = 'stops an active monitor')
self.monitor_start_parser.add_arg_list(parsing_opts.TX_PORT_LIST,
parsing_opts.RX_PORT_LIST,
parsing_opts.MONITOR_TYPE,
parsing_opts.BPF_FILTER)
def stop (self):
if self.monitor:
self.monitor.stop()
self.monitor = None
# main entry point for parsing commands from console
def parse_line (self, line):
try:
self.parse_line_internal(line)
except STLError as e:
self.logger.log("\nAction has failed with the following error:\n\n" + format_text(e.brief() + "\n", 'bold'))
return RC_ERR(e.brief())
def parse_line_internal (self, line):
'''Manage PCAP recorders'''
# default
if not line:
line = "show"
opts = self.parser.parse_args(line.split())
if not opts:
return opts
# call the handler
self.cmds[opts.commands](opts)
# record methods
def parse_record (self, opts):
if opts.record_cmd == 'start':
self.parse_record_start(opts)
elif opts.record_cmd == 'stop':
self.parse_record_stop(opts)
else:
self.record_parser.formatted_error("too few arguments")
def parse_record_start (self, opts):
if not opts.tx_port_list and not opts.rx_port_list:
self.record_start_parser.formatted_error('please provide either --tx or --rx')
return
rc = self.c.start_capture(opts.tx_port_list, opts.rx_port_list, opts.limit, mode = 'fixed', bpf_filter = opts.filter)
self.logger.log(format_text("*** Capturing ID is set to '{0}' ***".format(rc['id']), 'bold'))
self.logger.log(format_text("*** Please call 'capture record stop --id {0} -o <out.pcap>' when done ***\n".format(rc['id']), 'bold'))
def parse_record_stop (self, opts):
ids = self.c.get_capture_status().keys()
if self.monitor and (opts.capture_id == self.monitor.get_capture_id()):
self.record_stop_parser.formatted_error("'{0}' is a monitor, please use 'capture monitor stop'".format(opts.capture_id))
return
if opts.capture_id not in ids:
self.record_stop_parser.formatted_error("'{0}' is not an active capture ID".format(opts.capture_id))
return
self.c.stop_capture(opts.capture_id, opts.output_filename)
# monitor methods
def parse_monitor (self, opts):
if opts.mon_cmd == 'start':
self.parse_monitor_start(opts)
elif opts.mon_cmd == 'stop':
self.parse_monitor_stop(opts)
else:
self.monitor_parser.formatted_error("too few arguments")
def parse_monitor_start (self, opts):
mon_type = 'compact'
if opts.verbose:
mon_type = 'verbose'
elif opts.pipe:
mon_type = 'pipe'
if not opts.tx_port_list and not opts.rx_port_list:
self.monitor_start_parser.formatted_error('please provide either --tx or --rx')
return
if self.monitor:
if self.monitor.is_active():
self.logger.log(format_text('*** Stopping old monitor to open new one. ***', 'bold'))
self.monitor.stop()
self.monitor = None
self.monitor = CaptureMonitor(self.c, self.cmd_lock, opts.tx_port_list, opts.rx_port_list, 100, mon_type, opts.filter)
def parse_monitor_stop (self, opts):
if self.monitor:
self.monitor.stop()
self.monitor = None
def parse_clear (self, opts):
if self.monitor:
self.monitor.stop()
self.monitor = None
self.c.remove_all_captures()
def parse_show (self, opts):
data = self.c.get_capture_status()
# captures
cap_table = text_tables.TRexTextTable()
cap_table.set_cols_align(["c"] * 8)
cap_table.set_cols_width([15] * 8)
# monitor
mon_table = text_tables.TRexTextTable()
mon_table.set_cols_align(["c"] * 8)
mon_table.set_cols_width([15] * 8)
for capture_id, elem in data.items():
if self.monitor and (self.monitor.get_capture_id() == capture_id):
self.monitor.matched = elem['matched']
row = self.monitor.get_mon_row()
mon_table.add_rows([row], header=False)
else:
row = [capture_id,
format_text(elem['state'], 'bold'),
format_num(elem['matched'], compact = False),
'[{0}/{1}]'.format(elem['count'], elem['limit']),
format_num(elem['bytes'], suffix = 'B'),
bitfield_to_str(elem['filter']['tx']),
bitfield_to_str(elem['filter']['rx']),
elem['filter']['bpf'] or '-']
cap_table.add_rows([row], header=False)
cap_table.header(['ID', 'Status', 'Matched', 'Packets', 'Bytes', 'TX Ports', 'RX Ports', 'BPF Filter'])
mon_table.header(['ID', 'Status', 'Matched', 'Packets Seen', 'Bytes Seen', 'TX Ports', 'RX Ports', 'BPF Filter'])
if cap_table._rows:
text_tables.print_table_with_header(cap_table, '\nActive Recorders')
if mon_table._rows:
text_tables.print_table_with_header(mon_table, '\nActive Monitor')
| {
"content_hash": "c48845a61d6e4ec1fed27e216c336f4f",
"timestamp": "",
"source": "github",
"line_count": 655,
"max_line_length": 172,
"avg_line_length": 33.966412213740455,
"alnum_prop": 0.5084501977705861,
"repo_name": "kisel/trex-core",
"id": "5673adc74bc83cc7e4371e9db32f2480de2ad902",
"size": "22248",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/automation/trex_control_plane/stl/console/trex_capture.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "16355010"
},
{
"name": "C++",
"bytes": "4832431"
},
{
"name": "CMake",
"bytes": "8882"
},
{
"name": "CSS",
"bytes": "333"
},
{
"name": "HTML",
"bytes": "5012"
},
{
"name": "JavaScript",
"bytes": "1234"
},
{
"name": "Makefile",
"bytes": "163741"
},
{
"name": "Python",
"bytes": "12389428"
},
{
"name": "Shell",
"bytes": "22573"
}
],
"symlink_target": ""
} |
import copy
import os
import re
import shutil
import tempfile
import unittest
from functools import partial
from pathlib import Path
import pytest
import requests
import yaml
from huggingface_hub import (
DatasetCard,
DatasetCardData,
EvalResult,
ModelCard,
ModelCardData,
metadata_eval_result,
metadata_load,
metadata_save,
metadata_update,
)
from huggingface_hub.constants import REPOCARD_NAME
from huggingface_hub.file_download import hf_hub_download
from huggingface_hub.hf_api import HfApi
from huggingface_hub.repocard import RepoCard
from huggingface_hub.repocard_data import CardData
from huggingface_hub.repository import Repository
from huggingface_hub.utils import is_jinja_available, logging
from .testing_constants import (
ENDPOINT_STAGING,
ENDPOINT_STAGING_BASIC_AUTH,
TOKEN,
USER,
)
from .testing_utils import (
expect_deprecation,
repo_name,
retry_endpoint,
set_write_permission_and_retry,
)
SAMPLE_CARDS_DIR = Path(__file__).parent / "fixtures/cards"
ROUND_TRIP_MODELCARD_CASE = """
---
language: no
datasets: CLUECorpusSmall
widget:
- text: 北京是[MASK]国的首都。
---
# Title
"""
DUMMY_MODELCARD = """
Hi
---
license: mit
datasets:
- foo
- bar
---
Hello
"""
DUMMY_MODELCARD_TARGET = """
Hi
---
meaning_of_life: 42
---
Hello
"""
DUMMY_MODELCARD_TARGET_NO_YAML = """---
meaning_of_life: 42
---
Hello
"""
DUMMY_NEW_MODELCARD_TARGET = """---
meaning_of_life: 42
---
"""
DUMMY_MODELCARD_TARGET_NO_TAGS = """
Hello
"""
DUMMY_MODELCARD_EVAL_RESULT = """---
model-index:
- name: RoBERTa fine-tuned on ReactionGIF
results:
- task:
type: text-classification
name: Text Classification
dataset:
name: ReactionGIF
type: julien-c/reactiongif
config: default
split: test
metrics:
- type: accuracy
value: 0.2662102282047272
name: Accuracy
config: default
verified: false
---
"""
DUMMY_MODELCARD_NO_TEXT_CONTENT = """---
license: cc-by-sa-4.0
---
"""
DUMMY_MODELCARD_EVAL_RESULT_BOTH_VERIFIED_AND_UNVERIFIED = """---
model-index:
- name: RoBERTa fine-tuned on ReactionGIF
results:
- task:
type: text-classification
name: Text Classification
dataset:
name: ReactionGIF
type: julien-c/reactiongif
config: default
split: test
metrics:
- type: accuracy
value: 0.2662102282047272
name: Accuracy
config: default
verified: false
- task:
type: text-classification
name: Text Classification
dataset:
name: ReactionGIF
type: julien-c/reactiongif
config: default
split: test
metrics:
- type: accuracy
value: 0.6666666666666666
name: Accuracy
config: default
verified: true
---
This is a test model card.
"""
logger = logging.get_logger(__name__)
REPOCARD_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "fixtures/repocard"
)
repo_name = partial(repo_name, prefix="dummy-hf-hub")
def require_jinja(test_case):
"""
Decorator marking a test that requires Jinja2.
These tests are skipped when Jinja2 is not installed.
"""
if not is_jinja_available():
return unittest.skip("test requires Jinja2.")(test_case)
else:
return test_case
class RepocardMetadataTest(unittest.TestCase):
def setUp(self):
os.makedirs(REPOCARD_DIR, exist_ok=True)
def tearDown(self) -> None:
if os.path.exists(REPOCARD_DIR):
shutil.rmtree(REPOCARD_DIR, onerror=set_write_permission_and_retry)
logger.info(f"Does {REPOCARD_DIR} exist: {os.path.exists(REPOCARD_DIR)}")
def test_metadata_load(self):
filepath = Path(REPOCARD_DIR) / REPOCARD_NAME
filepath.write_text(DUMMY_MODELCARD)
data = metadata_load(filepath)
self.assertDictEqual(data, {"license": "mit", "datasets": ["foo", "bar"]})
def test_metadata_save(self):
filename = "dummy_target.md"
filepath = Path(REPOCARD_DIR) / filename
filepath.write_text(DUMMY_MODELCARD)
metadata_save(filepath, {"meaning_of_life": 42})
content = filepath.read_text()
self.assertEqual(content, DUMMY_MODELCARD_TARGET)
def test_metadata_save_from_file_no_yaml(self):
filename = "dummy_target_2.md"
filepath = Path(REPOCARD_DIR) / filename
filepath.write_text("Hello\n")
metadata_save(filepath, {"meaning_of_life": 42})
content = filepath.read_text()
self.assertEqual(content, DUMMY_MODELCARD_TARGET_NO_YAML)
def test_metadata_save_new_file(self):
filename = "new_dummy_target.md"
filepath = Path(REPOCARD_DIR) / filename
metadata_save(filepath, {"meaning_of_life": 42})
content = filepath.read_text()
self.assertEqual(content, DUMMY_NEW_MODELCARD_TARGET)
def test_no_metadata_returns_none(self):
filename = "dummy_target_3.md"
filepath = Path(REPOCARD_DIR) / filename
filepath.write_text(DUMMY_MODELCARD_TARGET_NO_TAGS)
data = metadata_load(filepath)
self.assertEqual(data, None)
def test_metadata_eval_result(self):
data = metadata_eval_result(
model_pretty_name="RoBERTa fine-tuned on ReactionGIF",
task_pretty_name="Text Classification",
task_id="text-classification",
metrics_pretty_name="Accuracy",
metrics_id="accuracy",
metrics_value=0.2662102282047272,
metrics_config="default",
metrics_verified=False,
dataset_pretty_name="ReactionGIF",
dataset_id="julien-c/reactiongif",
dataset_config="default",
dataset_split="test",
)
filename = "eval_results.md"
filepath = Path(REPOCARD_DIR) / filename
metadata_save(filepath, data)
content = filepath.read_text().splitlines()
self.assertEqual(content, DUMMY_MODELCARD_EVAL_RESULT.splitlines())
class RepocardMetadataUpdateTest(unittest.TestCase):
_api = HfApi(endpoint=ENDPOINT_STAGING, token=TOKEN)
@classmethod
@expect_deprecation("set_access_token")
def setUpClass(cls):
"""
Share this valid token in all tests below.
"""
cls._token = TOKEN
cls._api.set_access_token(TOKEN)
@retry_endpoint
def setUp(self) -> None:
self.repo_path = Path(tempfile.mkdtemp())
self.REPO_NAME = repo_name()
self.repo_id = f"{USER}/{self.REPO_NAME}"
self._api.create_repo(self.repo_id)
self._api.upload_file(
path_or_fileobj=DUMMY_MODELCARD_EVAL_RESULT.encode(),
repo_id=self.repo_id,
path_in_repo="README.md",
commit_message="Add README to main branch",
)
self.repo = Repository(
self.repo_path / self.REPO_NAME,
clone_from=self.repo_id,
use_auth_token=self._token,
git_user="ci",
git_email="ci@dummy.com",
)
self.existing_metadata = yaml.safe_load(
DUMMY_MODELCARD_EVAL_RESULT.strip().strip("-")
)
def tearDown(self) -> None:
self._api.delete_repo(repo_id=self.repo_id)
shutil.rmtree(self.repo_path)
def test_update_dataset_name(self):
new_datasets_data = {"datasets": ["test/test_dataset"]}
metadata_update(self.repo_id, new_datasets_data, token=self._token)
self.repo.git_pull()
updated_metadata = metadata_load(self.repo_path / self.REPO_NAME / "README.md")
expected_metadata = copy.deepcopy(self.existing_metadata)
expected_metadata.update(new_datasets_data)
self.assertDictEqual(updated_metadata, expected_metadata)
def test_update_existing_result_with_overwrite(self):
new_metadata = copy.deepcopy(self.existing_metadata)
new_metadata["model-index"][0]["results"][0]["metrics"][0][
"value"
] = 0.2862102282047272
metadata_update(self.repo_id, new_metadata, token=self._token, overwrite=True)
self.repo.git_pull()
updated_metadata = metadata_load(self.repo_path / self.REPO_NAME / "README.md")
self.assertDictEqual(updated_metadata, new_metadata)
def test_metadata_update_upstream(self):
new_metadata = copy.deepcopy(self.existing_metadata)
new_metadata["model-index"][0]["results"][0]["metrics"][0]["value"] = 0.1
path = hf_hub_download(
self.repo_id,
filename=REPOCARD_NAME,
use_auth_token=self._token,
)
metadata_update(self.repo_id, new_metadata, token=self._token, overwrite=True)
self.assertNotEqual(metadata_load(path), new_metadata)
self.assertEqual(metadata_load(path), self.existing_metadata)
def test_update_existing_result_without_overwrite(self):
new_metadata = copy.deepcopy(self.existing_metadata)
new_metadata["model-index"][0]["results"][0]["metrics"][0][
"value"
] = 0.2862102282047272
with pytest.raises(
ValueError,
match=(
"You passed a new value for the existing metric 'name: Accuracy, type:"
" accuracy'. Set `overwrite=True` to overwrite existing metrics."
),
):
metadata_update(
self.repo_id, new_metadata, token=self._token, overwrite=False
)
def test_update_existing_field_without_overwrite(self):
new_datasets_data = {"datasets": "['test/test_dataset']"}
metadata_update(self.repo_id, new_datasets_data, token=self._token)
with pytest.raises(
ValueError,
match=(
"You passed a new value for the existing meta data field 'datasets'."
" Set `overwrite=True` to overwrite existing metadata."
),
):
new_datasets_data = {"datasets": "['test/test_dataset_2']"}
metadata_update(
self.repo_id,
new_datasets_data,
token=self._token,
overwrite=False,
)
def test_update_new_result_existing_dataset(self):
new_result = metadata_eval_result(
model_pretty_name="RoBERTa fine-tuned on ReactionGIF",
task_pretty_name="Text Classification",
task_id="text-classification",
metrics_pretty_name="Recall",
metrics_id="recall",
metrics_value=0.7762102282047272,
metrics_config="default",
metrics_verified=False,
dataset_pretty_name="ReactionGIF",
dataset_id="julien-c/reactiongif",
dataset_config="default",
dataset_split="test",
)
metadata_update(self.repo_id, new_result, token=self._token, overwrite=False)
expected_metadata = copy.deepcopy(self.existing_metadata)
expected_metadata["model-index"][0]["results"][0]["metrics"].append(
new_result["model-index"][0]["results"][0]["metrics"][0]
)
self.repo.git_pull()
updated_metadata = metadata_load(self.repo_path / self.REPO_NAME / "README.md")
self.assertDictEqual(updated_metadata, expected_metadata)
def test_update_new_result_new_dataset(self):
new_result = metadata_eval_result(
model_pretty_name="RoBERTa fine-tuned on ReactionGIF",
task_pretty_name="Text Classification",
task_id="text-classification",
metrics_pretty_name="Accuracy",
metrics_id="accuracy",
metrics_value=0.2662102282047272,
metrics_config="default",
metrics_verified=False,
dataset_pretty_name="ReactionJPEG",
dataset_id="julien-c/reactionjpeg",
dataset_config="default",
dataset_split="test",
)
metadata_update(self.repo_id, new_result, token=self._token, overwrite=False)
expected_metadata = copy.deepcopy(self.existing_metadata)
expected_metadata["model-index"][0]["results"].append(
new_result["model-index"][0]["results"][0]
)
self.repo.git_pull()
updated_metadata = metadata_load(self.repo_path / self.REPO_NAME / "README.md")
self.assertDictEqual(updated_metadata, expected_metadata)
def test_update_metadata_on_empty_text_content(self) -> None:
"""Test `update_metadata` on a model card that has metadata but no text content
Regression test for https://github.com/huggingface/huggingface_hub/issues/1010
"""
# Create modelcard with metadata but empty text content
with self.repo.commit("Add README to main branch"):
with open("README.md", "w") as f:
f.write(DUMMY_MODELCARD_NO_TEXT_CONTENT)
metadata_update(self.repo_id, {"tag": "test"}, token=self._token)
# Check update went fine
self.repo.git_pull()
updated_metadata = metadata_load(self.repo_path / self.REPO_NAME / "README.md")
expected_metadata = {"license": "cc-by-sa-4.0", "tag": "test"}
self.assertDictEqual(updated_metadata, expected_metadata)
def test_update_with_existing_name(self):
new_metadata = copy.deepcopy(self.existing_metadata)
new_metadata["model-index"][0].pop("name")
new_metadata["model-index"][0]["results"][0]["metrics"][0][
"value"
] = 0.2862102282047272
metadata_update(self.repo_id, new_metadata, token=self._token, overwrite=True)
card_data = ModelCard.load(self.repo_id, token=self._token)
self.assertEqual(
card_data.data.model_name, self.existing_metadata["model-index"][0]["name"]
)
def test_update_without_existing_name(self):
# delete existing metadata
self._api.upload_file(
path_or_fileobj="# Test".encode(),
repo_id=self.repo_id,
path_in_repo="README.md",
)
new_metadata = copy.deepcopy(self.existing_metadata)
new_metadata["model-index"][0].pop("name")
metadata_update(self.repo_id, new_metadata, token=self._token, overwrite=True)
card_data = ModelCard.load(self.repo_id, token=self._token)
self.assertEqual(card_data.data.model_name, self.repo_id)
def test_update_with_both_verified_and_unverified_metric(self):
"""Regression test for #1185.
See https://github.com/huggingface/huggingface_hub/issues/1185.
"""
self._api.upload_file(
path_or_fileobj=DUMMY_MODELCARD_EVAL_RESULT_BOTH_VERIFIED_AND_UNVERIFIED.encode(),
repo_id=self.repo_id,
path_in_repo="README.md",
)
card = ModelCard.load(self.repo_id)
metadata = card.data.to_dict()
metadata_update(self.repo_id, metadata=metadata, overwrite=True, token=TOKEN)
card_data = ModelCard.load(self.repo_id, token=self._token)
self.assertEqual(len(card_data.data.eval_results), 2)
first_result = card_data.data.eval_results[0]
second_result = card_data.data.eval_results[1]
# One is verified, the other not
self.assertFalse(first_result.verified)
self.assertTrue(second_result.verified)
# Result values are different
self.assertEqual(first_result.metric_value, 0.2662102282047272)
self.assertEqual(second_result.metric_value, 0.6666666666666666)
class TestMetadataUpdateOnMissingCard(unittest.TestCase):
def setUp(self) -> None:
"""
Share this valid token in all tests below.
"""
self._token = TOKEN
self._api = HfApi(endpoint=ENDPOINT_STAGING, token=TOKEN)
self._repo_id = f"{USER}/{repo_name()}"
def test_metadata_update_missing_readme_on_model(self) -> None:
self._api.create_repo(self._repo_id)
metadata_update(self._repo_id, {"tag": "this_is_a_test"}, token=self._token)
model_card = ModelCard.load(self._repo_id, token=self._token)
# Created a card with default template + metadata
self.assertIn("# Model Card for Model ID", str(model_card))
self.assertEqual(model_card.data.to_dict(), {"tag": "this_is_a_test"})
self._api.delete_repo(self._repo_id)
def test_metadata_update_missing_readme_on_dataset(self) -> None:
self._api.create_repo(self._repo_id, repo_type="dataset")
metadata_update(
self._repo_id,
{"tag": "this is a dataset test"},
token=self._token,
repo_type="dataset",
)
dataset_card = DatasetCard.load(self._repo_id, token=self._token)
# Created a card with default template + metadata
self.assertIn("# Dataset Card for Dataset Name", str(dataset_card))
self.assertEqual(dataset_card.data.to_dict(), {"tag": "this is a dataset test"})
self._api.delete_repo(self._repo_id, repo_type="dataset")
def test_metadata_update_missing_readme_on_space(self) -> None:
self._api.create_repo(self._repo_id, repo_type="space", space_sdk="static")
self._api.delete_file("README.md", self._repo_id, repo_type="space")
with self.assertRaises(ValueError):
# Cannot create a default readme on a space repo (should be automatically
# created on the Hub).
metadata_update(
self._repo_id,
{"tag": "this is a space test"},
token=self._token,
repo_type="space",
)
self._api.delete_repo(self._repo_id, repo_type="space")
class TestCaseWithCapLog(unittest.TestCase):
_api = HfApi(endpoint=ENDPOINT_STAGING, token=TOKEN)
@pytest.fixture(autouse=True)
def inject_fixtures(self, caplog):
"""Assign pytest caplog as attribute so we can use captured log messages in tests below."""
self.caplog = caplog
class RepoCardTest(TestCaseWithCapLog):
def test_load_repocard_from_file(self):
sample_path = SAMPLE_CARDS_DIR / "sample_simple.md"
card = RepoCard.load(sample_path)
self.assertEqual(
card.data.to_dict(),
{
"language": ["en"],
"license": "mit",
"library_name": "pytorch-lightning",
"tags": ["pytorch", "image-classification"],
"datasets": ["beans"],
"metrics": ["acc"],
},
)
self.assertTrue(
card.text.strip().startswith("# my-cool-model"),
"Card text not loaded properly",
)
def test_change_repocard_data(self):
sample_path = SAMPLE_CARDS_DIR / "sample_simple.md"
card = RepoCard.load(sample_path)
card.data.language = ["fr"]
with tempfile.TemporaryDirectory() as tempdir:
updated_card_path = Path(tempdir) / "updated.md"
card.save(updated_card_path)
updated_card = RepoCard.load(updated_card_path)
self.assertEqual(
updated_card.data.language, ["fr"], "Card data not updated properly"
)
@require_jinja
def test_repo_card_from_default_template(self):
card = RepoCard.from_template(
card_data=CardData(
language="en",
license="mit",
library_name="pytorch",
tags=["image-classification", "resnet"],
datasets="imagenet",
metrics=["acc", "f1"],
),
model_id=None,
)
self.assertIsInstance(card, RepoCard)
self.assertTrue(
card.text.strip().startswith("# Model Card for Model ID"),
"Default model name not set correctly",
)
@require_jinja
def test_repo_card_from_default_template_with_model_id(self):
card = RepoCard.from_template(
card_data=CardData(
language="en",
license="mit",
library_name="pytorch",
tags=["image-classification", "resnet"],
datasets="imagenet",
metrics=["acc", "f1"],
),
model_id="my-cool-model",
)
self.assertTrue(
card.text.strip().startswith("# Model Card for my-cool-model"),
"model_id not properly set in card template",
)
@require_jinja
def test_repo_card_from_custom_template(self):
template_path = SAMPLE_CARDS_DIR / "sample_template.md"
card = RepoCard.from_template(
card_data=CardData(
language="en",
license="mit",
library_name="pytorch",
tags="text-classification",
datasets="glue",
metrics="acc",
),
template_path=template_path,
some_data="asdf",
)
self.assertTrue(
card.text.endswith("asdf"),
"Custom template didn't set jinja variable correctly",
)
def test_repo_card_data_must_be_dict(self):
sample_path = SAMPLE_CARDS_DIR / "sample_invalid_card_data.md"
with pytest.raises(
ValueError, match="repo card metadata block should be a dict"
):
RepoCard(sample_path.read_text())
def test_repo_card_without_metadata(self):
sample_path = SAMPLE_CARDS_DIR / "sample_no_metadata.md"
with self.caplog.at_level(logging.WARNING):
card = RepoCard(sample_path.read_text())
self.assertIn(
"Repo card metadata block was not found. Setting CardData to empty.",
self.caplog.text,
)
self.assertEqual(card.data, CardData())
def test_validate_repocard(self):
sample_path = SAMPLE_CARDS_DIR / "sample_simple.md"
card = RepoCard.load(sample_path)
card.validate()
card.data.license = "asdf"
with pytest.raises(ValueError, match='- Error: "license" must be one of'):
card.validate()
def test_push_to_hub(self):
repo_id = f"{USER}/{repo_name('push-card')}"
self._api.create_repo(repo_id)
card_data = CardData(
language="en",
license="mit",
library_name="pytorch",
tags=["text-classification"],
datasets="glue",
metrics="acc",
)
# Mock what RepoCard.from_template does so we can test w/o Jinja2
content = f"---\n{card_data.to_yaml()}\n---\n\n# MyModel\n\nHello, world!"
card = RepoCard(content)
url = f"{ENDPOINT_STAGING_BASIC_AUTH}/{repo_id}/resolve/main/README.md"
# Check this file doesn't exist (sanity check)
with pytest.raises(requests.exceptions.HTTPError):
r = requests.get(url)
r.raise_for_status()
# Push the card up to README.md in the repo
card.push_to_hub(repo_id, token=TOKEN)
# No error should occur now, as README.md should exist
r = requests.get(url)
r.raise_for_status()
self._api.delete_repo(repo_id=repo_id)
def test_push_and_create_pr(self):
repo_id = f"{USER}/{repo_name('pr-card')}"
self._api.create_repo(repo_id)
card_data = CardData(
language="en",
license="mit",
library_name="pytorch",
tags=["text-classification"],
datasets="glue",
metrics="acc",
)
# Mock what RepoCard.from_template does so we can test w/o Jinja2
content = f"---\n{card_data.to_yaml()}\n---\n\n# MyModel\n\nHello, world!"
card = RepoCard(content)
url = f"{ENDPOINT_STAGING_BASIC_AUTH}/api/models/{repo_id}/discussions"
r = requests.get(url)
data = r.json()
self.assertEqual(data["count"], 0)
card.push_to_hub(repo_id, token=TOKEN, create_pr=True)
r = requests.get(url)
data = r.json()
self.assertEqual(data["count"], 1)
self._api.delete_repo(repo_id=repo_id)
def test_preserve_windows_linebreaks(self):
card_path = SAMPLE_CARDS_DIR / "sample_windows_line_breaks.md"
card = RepoCard.load(card_path)
self.assertIn("\r\n", str(card))
def test_updating_text_updates_content(self):
sample_path = SAMPLE_CARDS_DIR / "sample_simple.md"
card = RepoCard.load(sample_path)
card.text = "Hello, world!"
self.assertEqual(
card.content, f"---\n{card.data.to_yaml()}\n---\nHello, world!"
)
class ModelCardTest(TestCaseWithCapLog):
def test_model_card_with_invalid_model_index(self):
"""
Test that when loading a card that has invalid model-index, no eval_results are added + it logs a warning
"""
sample_path = SAMPLE_CARDS_DIR / "sample_invalid_model_index.md"
with self.caplog.at_level(logging.WARNING):
card = ModelCard.load(sample_path)
self.assertIn(
"Invalid model-index. Not loading eval results into CardData.",
self.caplog.text,
)
self.assertIsNone(card.data.eval_results)
def test_load_model_card_from_file(self):
sample_path = SAMPLE_CARDS_DIR / "sample_simple.md"
card = ModelCard.load(sample_path)
self.assertIsInstance(card, ModelCard)
self.assertEqual(
card.data.to_dict(),
{
"language": ["en"],
"license": "mit",
"library_name": "pytorch-lightning",
"tags": ["pytorch", "image-classification"],
"datasets": ["beans"],
"metrics": ["acc"],
},
)
self.assertTrue(
card.text.strip().startswith("# my-cool-model"),
"Card text not loaded properly",
)
@require_jinja
def test_model_card_from_custom_template(self):
template_path = SAMPLE_CARDS_DIR / "sample_template.md"
card = ModelCard.from_template(
card_data=ModelCardData(
language="en",
license="mit",
library_name="pytorch",
tags="text-classification",
datasets="glue",
metrics="acc",
),
template_path=template_path,
some_data="asdf",
)
self.assertIsInstance(card, ModelCard)
self.assertTrue(
card.text.endswith("asdf"),
"Custom template didn't set jinja variable correctly",
)
@require_jinja
def test_model_card_from_template_eval_results(self):
template_path = SAMPLE_CARDS_DIR / "sample_template.md"
card = ModelCard.from_template(
card_data=ModelCardData(
eval_results=[
EvalResult(
task_type="text-classification",
task_name="Text Classification",
dataset_type="julien-c/reactiongif",
dataset_name="ReactionGIF",
dataset_config="default",
dataset_split="test",
metric_type="accuracy",
metric_value=0.2662102282047272,
metric_name="Accuracy",
metric_config="default",
verified=False,
),
],
model_name="RoBERTa fine-tuned on ReactionGIF",
),
template_path=template_path,
some_data="asdf",
)
self.assertIsInstance(card, ModelCard)
self.assertTrue(card.text.endswith("asdf"))
self.assertTrue(card.data.to_dict().get("eval_results") is None)
self.assertEqual(
str(card)[: len(DUMMY_MODELCARD_EVAL_RESULT)], DUMMY_MODELCARD_EVAL_RESULT
)
class DatasetCardTest(TestCaseWithCapLog):
def test_load_datasetcard_from_file(self):
sample_path = SAMPLE_CARDS_DIR / "sample_datasetcard_simple.md"
card = DatasetCard.load(sample_path)
self.assertEqual(
card.data.to_dict(),
{
"annotations_creators": ["crowdsourced", "expert-generated"],
"language_creators": ["found"],
"language": ["en"],
"license": ["bsd-3-clause"],
"multilinguality": ["monolingual"],
"size_categories": ["n<1K"],
"task_categories": ["image-segmentation"],
"task_ids": ["semantic-segmentation"],
"pretty_name": "Sample Segmentation",
},
)
self.assertIsInstance(card, DatasetCard)
self.assertIsInstance(card.data, DatasetCardData)
self.assertTrue(card.text.strip().startswith("# Dataset Card for"))
@require_jinja
def test_dataset_card_from_default_template(self):
card_data = DatasetCardData(
language="en",
license="mit",
)
# Here we check default title when pretty_name not provided.
card = DatasetCard.from_template(card_data)
self.assertTrue(card.text.strip().startswith("# Dataset Card for Dataset Name"))
card_data = DatasetCardData(
language="en",
license="mit",
pretty_name="My Cool Dataset",
)
# Here we pass the card data as kwargs as well so template picks up pretty_name.
card = DatasetCard.from_template(card_data, **card_data.to_dict())
self.assertTrue(
card.text.strip().startswith("# Dataset Card for My Cool Dataset")
)
self.assertIsInstance(card, DatasetCard)
@require_jinja
def test_dataset_card_from_default_template_with_template_variables(self):
card_data = DatasetCardData(
language="en",
license="mit",
pretty_name="My Cool Dataset",
)
# Here we pass the card data as kwargs as well so template picks up pretty_name.
card = DatasetCard.from_template(
card_data,
homepage_url="https://huggingface.co",
repo_url="https://github.com/huggingface/huggingface_hub",
paper_url="https://arxiv.org/pdf/1910.03771.pdf",
point_of_contact="https://huggingface.co/nateraw",
dataset_summary=(
"This is a test dataset card to check if the template variables "
"in the dataset card template are working."
),
)
self.assertTrue(
card.text.strip().startswith("# Dataset Card for My Cool Dataset")
)
self.assertIsInstance(card, DatasetCard)
matches = re.findall(r"Homepage:\*\* https:\/\/huggingface\.co", str(card))
self.assertEqual(matches[0], "Homepage:** https://huggingface.co")
@require_jinja
def test_dataset_card_from_custom_template(self):
card = DatasetCard.from_template(
card_data=DatasetCardData(
language="en",
license="mit",
pretty_name="My Cool Dataset",
),
template_path=SAMPLE_CARDS_DIR / "sample_datasetcard_template.md",
pretty_name="My Cool Dataset",
some_data="asdf",
)
self.assertIsInstance(card, DatasetCard)
# Title this time is just # {{ pretty_name }}
self.assertTrue(card.text.strip().startswith("# My Cool Dataset"))
# some_data is at the bottom of the template, so should end with whatever we passed to it
self.assertTrue(card.text.strip().endswith("asdf"))
| {
"content_hash": "9031b42e907a075e4772932245aab29c",
"timestamp": "",
"source": "github",
"line_count": 923,
"max_line_length": 113,
"avg_line_length": 34.360780065005414,
"alnum_prop": 0.5912659624783225,
"repo_name": "huggingface/huggingface_hub",
"id": "f035226811d061b57db526c3117679fa7766505b",
"size": "32337",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_repocard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "338"
},
{
"name": "Python",
"bytes": "1086946"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from user_contacts.models import (
Person,
Phone)
class PersonTest(TestCase):
def test_unicode(self):
person = Person()
first_name = "fred"
last_name = "smith"
person.first_name = first_name
person.last_name = last_name
expected = "%s, %s" % (last_name, first_name)
actual = person.__unicode__()
self.assertEquals(expected, actual)
class PhoneTest(TestCase):
def test_unicode(self):
phone = Phone()
number = "8675309"
phone.number = number
expected = number
actual = phone.__unicode__()
self.assertEquals(expected, actual)
| {
"content_hash": "1af9e458ba58f7c4f913d974fcbd55a3",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 53,
"avg_line_length": 24.642857142857142,
"alnum_prop": 0.5942028985507246,
"repo_name": "Victory/realpython-tdd",
"id": "2d1f8fc7c55a92c8d1b8d54e68804548c64e7cfa",
"size": "690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contacts/user_contacts/tests/test_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Python",
"bytes": "30133"
},
{
"name": "Shell",
"bytes": "1111"
}
],
"symlink_target": ""
} |
import arcpy
from arcpy.sa import *
import os
def init(input_shp):
#arcpy.env.workspace = arcpy.env.scratchFolder
arcpy.CheckOutExtension('Spatial')
arcpy.AddMessage(arcpy.CheckExtension('Spatial'))
# adding new shapefile to ArcMap
mxd = arcpy.mapping.MapDocument("CURRENT")
df = arcpy.mapping.ListDataFrames(mxd, "*")[0]
newLayer = arcpy.mapping.Layer(input_shp)
arcpy.ApplySymbologyFromLayer_management(newLayer, "points_new_style.lyr")
arcpy.mapping.AddLayer(df, newLayer, "TOP")
arcpy.env.workspace = arcpy.env.scratchGDB
arcpy.env.workspace = arcpy.env.scratchFolder
arcpy.AddMessage(arcpy.env.workspace)
densityOutput = PointDensity(input_shp, 'accuracy')
densityOutput.save('density.tif')
densityLayer = arcpy.mapping.Layer('density.tif')
arcpy.ApplySymbologyFromLayer_management(densityLayer, "density_style.lyr")
arcpy.mapping.AddLayer(df, densityLayer, "BOTTOM")
| {
"content_hash": "32db66aa704362187fdbb88991cd115d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 79,
"avg_line_length": 32.666666666666664,
"alnum_prop": 0.710204081632653,
"repo_name": "mdragunski/BottleNcr",
"id": "ad101983da039c3d744a32283ad2ebf85aeb8a9d",
"size": "980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visualizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9499"
}
],
"symlink_target": ""
} |
"""
SymPy is a Python library for symbolic mathematics. It aims to become a
full-featured computer algebra system (CAS) while keeping the code as
simple as possible in order to be comprehensible and easily extensible.
SymPy is written entirely in Python and does not require any external
libraries, except optionally for plotting support.
See the webpage for more information and documentation:
http://code.google.com/p/sympy/"""
from __future__ import absolute_import, print_function
__version__ = "0.7.3"
import sys
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
raise ImportError("Python Version 2.6 or above is required for SymPy.")
else: # Python 3
pass
# Here we can also check for specific Python 3 versions, if needed
del sys
def __sympy_debug():
# helper function so we don't import os globally
import os
return eval(os.getenv('SYMPY_DEBUG', 'False'))
SYMPY_DEBUG = __sympy_debug()
from .core import *
from .logic import *
from .assumptions import *
from .polys import *
from .series import *
from .functions import *
from .ntheory import *
from .concrete import *
from .simplify import *
from .sets import *
from .solvers import *
from .matrices import *
from .geometry import *
from .utilities import *
from .integrals import *
from .tensor import *
from .parsing import *
# Adds about .04-.05 seconds of import time
# from combinatorics import *
# This module is slow to import:
#from physics import units
from .plotting import plot, Plot, textplot, plot_backends, plot_implicit
from .printing import pretty, pretty_print, pprint, pprint_use_unicode, \
pprint_try_use_unicode, print_gtk, print_tree, pager_print, TableForm
from .printing import ccode, fcode, jscode, latex, preview
from .printing import python, print_python, srepr, sstr, sstrrepr
from .interactive import init_session, init_printing
evalf._create_evalf_table()
# This is slow to import:
#import abc
| {
"content_hash": "4a0776defa533d53f256825ce9c90d97",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 75,
"avg_line_length": 30.682539682539684,
"alnum_prop": 0.743921365752716,
"repo_name": "kmacinnis/sympy",
"id": "b81cea2b36ad0ec3e43c67181f9b92d8ebd1257d",
"size": "1933",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sympy/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13573973"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "1284"
},
{
"name": "TeX",
"bytes": "8790"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
from ..parsers import RegisterParser
def test_parse_register_proper_data(Request):
parser = RegisterParser()
test_data = {
'email': 'test_email',
'password': 'test_password',
}
data = parser.parse_args(req=Request(test_data))
assert data == test_data
def test_parse_register_missing_email(parser_failer):
parser_failer(RegisterParser(), {'password': 'test_password'},
expected_errors={'email': 'Email is required'})
def test_parse_register_missing_password(parser_failer):
parser_failer(RegisterParser(), {'email': 'test_email'},
expected_errors={'password': 'Password is required'})
| {
"content_hash": "2b8de38e0c39a342851d6ab3df68058a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 29.217391304347824,
"alnum_prop": 0.6488095238095238,
"repo_name": "kszarlej/libkeep",
"id": "44e2e856ce528f54a14c3afdffb07d806c0f1a70",
"size": "672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/project/user/tests/test_parsers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "909"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
def load_data(apps, schema_editor):
AccountType = apps.get_model("profiles", "AccountType")
AccountType.objects.get_or_create(
name="GITHUB",
display_name="Github",
social_auth_provider_name="github",
link_to_account_with_param="https://github.com/{account_name}",
link_to_avatar_with_params="https://github.com/{account_name}.png?size={size}"
)
AccountType.objects.get_or_create(
name="STEEM",
display_name="Steem",
social_auth_provider_name="steemconnect",
link_to_account_with_param="https://steemit.com/@{account_name}",
link_to_avatar_with_params="https://steemitimages.com/u/{account_name}/avatar"
)
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_initial'),
]
operations = [
migrations.RunPython(load_data)
]
| {
"content_hash": "93e7e0ebf7e56fcd498f45fd7d23a47c",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 86,
"avg_line_length": 28.38235294117647,
"alnum_prop": 0.6435233160621762,
"repo_name": "noisy/steemprojects.com",
"id": "0e9362c775464cf324fe4b2876dca0d70f3d40d6",
"size": "965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profiles/migrations/0002_accounttype_data_migration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "63399"
},
{
"name": "Dockerfile",
"bytes": "1731"
},
{
"name": "HTML",
"bytes": "155684"
},
{
"name": "Makefile",
"bytes": "2785"
},
{
"name": "Python",
"bytes": "431771"
},
{
"name": "Shell",
"bytes": "5227"
}
],
"symlink_target": ""
} |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .tektronixMDO4000 import *
class tektronixMDO4054B(tektronixMDO4000):
"Tektronix MDO4054B IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MDO4054B')
super(tektronixMDO4054B, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 500e6
self._init_channels()
| {
"content_hash": "d01ea40720a1dcb2fa15d06f722478ad",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 86,
"avg_line_length": 39.19047619047619,
"alnum_prop": 0.7545565006075334,
"repo_name": "Diti24/python-ivi",
"id": "13904508d6be7e8e0af2d6c949d5824e776dfef7",
"size": "1646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ivi/tektronix/tektronixMDO4054B.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1992462"
}
],
"symlink_target": ""
} |
from threading import Event
import os
from sys import exc_info
import netrc
import errno
from offlineimap.repository.Base import BaseRepository
from offlineimap import folder, imaputil, imapserver, OfflineImapError
from offlineimap.folder.UIDMaps import MappedIMAPFolder
from offlineimap.threadutil import ExitNotifyThread
from offlineimap.utils.distro import get_os_sslcertfile, get_os_sslcertfile_searchpath
class IMAPRepository(BaseRepository):
def __init__(self, reposname, account):
"""Initialize an IMAPRepository object."""
BaseRepository.__init__(self, reposname, account)
# self.ui is being set by the BaseRepository
self._host = None
self._oauth2_request_url = None
self.imapserver = imapserver.IMAPServer(self)
self.folders = None
# Only set the newmail_hook in an IMAP repository.
if self.config.has_option(self.getsection(), 'newmail_hook'):
self.newmail_hook = self.localeval.eval(
self.getconf('newmail_hook'))
if self.getconf('sep', None):
self.ui.info("The 'sep' setting is being ignored for IMAP "
"repository '%s' (it's autodetected)"% self)
def startkeepalive(self):
keepalivetime = self.getkeepalive()
if not keepalivetime: return
self.kaevent = Event()
self.kathread = ExitNotifyThread(target = self.imapserver.keepalive,
name = "Keep alive " + self.getname(),
args = (keepalivetime, self.kaevent))
self.kathread.setDaemon(1)
self.kathread.start()
def stopkeepalive(self):
if not hasattr(self, 'kaevent'):
# Keepalive is not active.
return
self.kaevent.set()
del self.kathread
del self.kaevent
def holdordropconnections(self):
if not self.getholdconnectionopen():
self.dropconnections()
def dropconnections(self):
self.imapserver.close()
def getholdconnectionopen(self):
if self.getidlefolders():
return 1
return self.getconfboolean("holdconnectionopen", 0)
def getkeepalive(self):
num = self.getconfint("keepalive", 0)
if num == 0 and self.getidlefolders():
return 29*60
else:
return num
def getsep(self):
"""Return the folder separator for the IMAP repository
This requires that self.imapserver has been initialized with an
acquireconnection() or it will still be `None`"""
assert self.imapserver.delim != None, "'%s' " \
"repository called getsep() before the folder separator was " \
"queried from the server"% self
return self.imapserver.delim
def gethost(self):
"""Return the configured hostname to connect to
:returns: hostname as string or throws Exception"""
if self._host: # use cached value if possible
return self._host
# 1) check for remotehosteval setting
if self.config.has_option(self.getsection(), 'remotehosteval'):
host = self.getconf('remotehosteval')
try:
host = self.localeval.eval(host)
except Exception as e:
raise OfflineImapError("remotehosteval option for repository "
"'%s' failed:\n%s"% (self, e), OfflineImapError.ERROR.REPO), \
None, exc_info()[2]
if host:
self._host = host
return self._host
# 2) check for plain remotehost setting
host = self.getconf('remotehost', None)
if host != None:
self._host = host
return self._host
# no success
raise OfflineImapError("No remote host for repository "
"'%s' specified."% self, OfflineImapError.ERROR.REPO)
def get_remote_identity(self):
"""Remote identity is used for certain SASL mechanisms
(currently -- PLAIN) to inform server about the ID
we want to authorize as instead of our login name."""
return self.getconf('remote_identity', default=None)
def get_auth_mechanisms(self):
supported = ["GSSAPI", "XOAUTH2", "CRAM-MD5", "PLAIN", "LOGIN"]
# Mechanisms are ranged from the strongest to the
# weakest ones.
# TODO: we need DIGEST-MD5, it must come before CRAM-MD5
# TODO: due to the chosen-plaintext resistance.
default = ["GSSAPI", "XOAUTH2", "CRAM-MD5", "PLAIN", "LOGIN"]
mechs = self.getconflist('auth_mechanisms', r',\s*',
default)
for m in mechs:
if m not in supported:
raise OfflineImapError("Repository %s: "% self + \
"unknown authentication mechanism '%s'"% m,
OfflineImapError.ERROR.REPO)
self.ui.debug('imap', "Using authentication mechanisms %s" % mechs)
return mechs
def getuser(self):
user = None
localeval = self.localeval
if self.config.has_option(self.getsection(), 'remoteusereval'):
user = self.getconf('remoteusereval')
if user != None:
return localeval.eval(user)
if self.config.has_option(self.getsection(), 'remoteuser'):
user = self.getconf('remoteuser')
if user != None:
return user
try:
netrcentry = netrc.netrc().authenticators(self.gethost())
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
else:
if netrcentry:
return netrcentry[0]
try:
netrcentry = netrc.netrc('/etc/netrc').authenticators(self.gethost())
except IOError as inst:
if inst.errno not in (errno.ENOENT, errno.EACCES):
raise
else:
if netrcentry:
return netrcentry[0]
def getport(self):
port = None
if self.config.has_option(self.getsection(), 'remoteporteval'):
port = self.getconf('remoteporteval')
if port != None:
return self.localeval.eval(port)
return self.getconfint('remoteport', None)
def getipv6(self):
return self.getconfboolean('ipv6', None)
def getssl(self):
return self.getconfboolean('ssl', 1)
def getsslclientcert(self):
xforms = [os.path.expanduser, os.path.expandvars, os.path.abspath]
return self.getconf_xform('sslclientcert', xforms, None)
def getsslclientkey(self):
xforms = [os.path.expanduser, os.path.expandvars, os.path.abspath]
return self.getconf_xform('sslclientkey', xforms, None)
def getsslcacertfile(self):
"""Determines CA bundle.
Returns path to the CA bundle. It is either explicitely specified
or requested via "OS-DEFAULT" value (and we will search known
locations for the current OS and distribution).
If search via "OS-DEFAULT" route yields nothing, we will throw an
exception to make our callers distinguish between not specified
value and non-existent default CA bundle.
It is also an error to specify non-existent file via configuration:
it will error out later, but, perhaps, with less verbose explanation,
so we will also throw an exception. It is consistent with
the above behaviour, so any explicitely-requested configuration
that doesn't result in an existing file will give an exception.
"""
xforms = [os.path.expanduser, os.path.expandvars, os.path.abspath]
cacertfile = self.getconf_xform('sslcacertfile', xforms, None)
if self.getconf('sslcacertfile', None) == "OS-DEFAULT":
cacertfile = get_os_sslcertfile()
if cacertfile == None:
searchpath = get_os_sslcertfile_searchpath()
if searchpath:
reason = "Default CA bundle was requested, "\
"but no existing locations available. "\
"Tried %s." % (", ".join(searchpath))
else:
reason = "Default CA bundle was requested, "\
"but OfflineIMAP doesn't know any for your "\
"current operating system."
raise OfflineImapError(reason, OfflineImapError.ERROR.REPO)
if cacertfile is None:
return None
if not os.path.isfile(cacertfile):
reason = "CA certfile for repository '%s' couldn't be found. "\
"No such file: '%s'" % (self.name, cacertfile)
raise OfflineImapError(reason, OfflineImapError.ERROR.REPO)
return cacertfile
def gettlslevel(self):
return self.getconf('tls_level', 'tls_compat')
def getsslversion(self):
return self.getconf('ssl_version', None)
def get_ssl_fingerprint(self):
"""Return array of possible certificate fingerprints.
Configuration item cert_fingerprint can contain multiple
comma-separated fingerprints in hex form."""
value = self.getconf('cert_fingerprint', "")
return [f.strip().lower() for f in value.split(',') if f]
def getoauth2_request_url(self):
if self._oauth2_request_url: # Use cached value if possible.
return self._oauth2_request_url
oauth2_request_url = self.getconf('oauth2_request_url', None)
if oauth2_request_url != None:
self._oauth2_request_url = oauth2_request_url
return self._oauth2_request_url
#raise OfflineImapError("No remote oauth2_request_url for repository "
#"'%s' specified."% self, OfflineImapError.ERROR.REPO)
def getoauth2_refresh_token(self):
return self.getconf('oauth2_refresh_token', None)
def getoauth2_access_token(self):
return self.getconf('oauth2_access_token', None)
def getoauth2_client_id(self):
return self.getconf('oauth2_client_id', None)
def getoauth2_client_secret(self):
return self.getconf('oauth2_client_secret', None)
def getpreauthtunnel(self):
return self.getconf('preauthtunnel', None)
def gettransporttunnel(self):
return self.getconf('transporttunnel', None)
def getreference(self):
return self.getconf('reference', '')
def getdecodefoldernames(self):
return self.getconfboolean('decodefoldernames', 0)
def getidlefolders(self):
localeval = self.localeval
return localeval.eval(self.getconf('idlefolders', '[]'))
def getmaxconnections(self):
num1 = len(self.getidlefolders())
num2 = self.getconfint('maxconnections', 1)
return max(num1, num2)
def getexpunge(self):
return self.getconfboolean('expunge', 1)
def getpassword(self):
"""Return the IMAP password for this repository.
It tries to get passwords in the following order:
1. evaluate Repository 'remotepasseval'
2. read password from Repository 'remotepass'
3. read password from file specified in Repository 'remotepassfile'
4. read password from ~/.netrc
5. read password from /etc/netrc
On success we return the password.
If all strategies fail we return None."""
# 1. evaluate Repository 'remotepasseval'
passwd = self.getconf('remotepasseval', None)
if passwd != None:
return self.localeval.eval(passwd)
# 2. read password from Repository 'remotepass'
password = self.getconf('remotepass', None)
if password != None:
return password
# 3. read password from file specified in Repository 'remotepassfile'
passfile = self.getconf('remotepassfile', None)
if passfile != None:
fd = open(os.path.expanduser(passfile))
password = fd.readline().strip()
fd.close()
return password
# 4. read password from ~/.netrc
try:
netrcentry = netrc.netrc().authenticators(self.gethost())
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
else:
if netrcentry:
user = self.getuser()
if user == None or user == netrcentry[0]:
return netrcentry[2]
# 5. read password from /etc/netrc
try:
netrcentry = netrc.netrc('/etc/netrc').authenticators(self.gethost())
except IOError as inst:
if inst.errno not in (errno.ENOENT, errno.EACCES):
raise
else:
if netrcentry:
user = self.getuser()
if user == None or user == netrcentry[0]:
return netrcentry[2]
# no strategy yielded a password!
return None
def getfolder(self, foldername):
"""Return instance of OfflineIMAP representative folder."""
return self.getfoldertype()(self.imapserver, foldername, self)
def getfoldertype(self):
return folder.IMAP.IMAPFolder
def connect(self):
imapobj = self.imapserver.acquireconnection()
self.imapserver.releaseconnection(imapobj)
def forgetfolders(self):
self.folders = None
def getfolders(self):
"""Return a list of instances of OfflineIMAP representative folder."""
if self.folders != None:
return self.folders
retval = []
imapobj = self.imapserver.acquireconnection()
# check whether to list all folders, or subscribed only
listfunction = imapobj.list
if self.getconfboolean('subscribedonly', False):
listfunction = imapobj.lsub
try:
listresult = listfunction(directory = self.imapserver.reference)[1]
finally:
self.imapserver.releaseconnection(imapobj)
for s in listresult:
if s == None or \
(isinstance(s, basestring) and s == ''):
# Bug in imaplib: empty strings in results from
# literals. TODO: still relevant?
continue
flags, delim, name = imaputil.imapsplit(s)
flaglist = [x.lower() for x in imaputil.flagsplit(flags)]
if '\\noselect' in flaglist:
continue
foldername = imaputil.dequote(name)
retval.append(self.getfoldertype()(self.imapserver, foldername,
self))
# Add all folderincludes
if len(self.folderincludes):
imapobj = self.imapserver.acquireconnection()
try:
for foldername in self.folderincludes:
try:
imapobj.select(foldername, readonly = True)
except OfflineImapError as e:
# couldn't select this folderinclude, so ignore folder.
if e.severity > OfflineImapError.ERROR.FOLDER:
raise
self.ui.error(e, exc_info()[2],
'Invalid folderinclude:')
continue
retval.append(self.getfoldertype()(
self.imapserver, foldername, self))
finally:
self.imapserver.releaseconnection(imapobj)
if self.foldersort is None:
# default sorting by case insensitive transposed name
retval.sort(key=lambda x: str.lower(x.getvisiblename()))
else:
# do foldersort in a python3-compatible way
# http://bytes.com/topic/python/answers/844614-python-3-sorting-comparison-function
def cmp2key(mycmp):
"""Converts a cmp= function into a key= function
We need to keep cmp functions for backward compatibility"""
class K:
def __init__(self, obj, *args):
self.obj = obj
def __cmp__(self, other):
return mycmp(self.obj.getvisiblename(), other.obj.getvisiblename())
return K
retval.sort(key=cmp2key(self.foldersort))
self.folders = retval
return self.folders
def makefolder(self, foldername):
"""Create a folder on the IMAP server
This will not update the list cached in :meth:`getfolders`. You
will need to invoke :meth:`forgetfolders` to force new caching
when you are done creating folders yourself.
:param foldername: Full path of the folder to be created."""
if self.getreference():
foldername = self.getreference() + self.getsep() + foldername
if not foldername: # Create top level folder as folder separator
foldername = self.getsep()
self.ui.makefolder(self, foldername)
if self.account.dryrun:
return
imapobj = self.imapserver.acquireconnection()
try:
result = imapobj.create(foldername)
if result[0] != 'OK':
raise OfflineImapError("Folder '%s'[%s] could not be created. "
"Server responded: %s"% (foldername, self, str(result)),
OfflineImapError.ERROR.FOLDER)
finally:
self.imapserver.releaseconnection(imapobj)
class MappedIMAPRepository(IMAPRepository):
def getfoldertype(self):
return MappedIMAPFolder
| {
"content_hash": "040af6d6c6a9390032265b72b35c5b7b",
"timestamp": "",
"source": "github",
"line_count": 462,
"max_line_length": 95,
"avg_line_length": 38.18614718614719,
"alnum_prop": 0.593583493934928,
"repo_name": "frioux/offlineimap",
"id": "60d5a08f666eb345e6dcf63ca2d340f19c23c528",
"size": "18458",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "offlineimap/repository/IMAP.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2022"
},
{
"name": "Python",
"bytes": "543162"
},
{
"name": "Shell",
"bytes": "12224"
}
],
"symlink_target": ""
} |
"""
The setup script for Flask-OpenAPI.
"""
from setuptools import find_packages
from setuptools import setup
with open('README.rst') as f:
readme = f.read()
setup(
name='Flask-OpenAPI',
version='0.1.0a1',
author='Remco Haszing',
author_email='remcohaszing@gmail.com',
description='Generate a swagger.json handler from a Flask app',
long_description=readme,
license='MIT',
packages=find_packages(),
install_requires=[
'flask ~= 0.11',
'jsonschema ~= 2.5',
'pyyaml ~= 3.11'
],
zip_safe=True)
| {
"content_hash": "5a80fa00ef7d00a77a6bc5b1a0d38572",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 67,
"avg_line_length": 21.037037037037038,
"alnum_prop": 0.6267605633802817,
"repo_name": "remcohaszing/flask-openapi",
"id": "92bdb0f3e0c26089c385e328328164a9628a6edf",
"size": "591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36700"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_rhino
from compas.geometry import add_vectors
from compas.artists import PrimitiveArtist
from compas.colors import Color
from .artist import RhinoArtist
class CircleArtist(RhinoArtist, PrimitiveArtist):
"""Artist for drawing circles.
Parameters
----------
circle : :class:`~compas.geometry.Circle`
A COMPAS circle.
layer : str, optional
The layer that should contain the drawing.
**kwargs : dict, optional
Additional keyword arguments.
For more info, see :class:`RhinoArtist` and :class:`PrimitiveArtist`.
"""
def __init__(self, circle, layer=None, **kwargs):
super(CircleArtist, self).__init__(primitive=circle, layer=layer, **kwargs)
def draw(self, color=None, show_point=False, show_normal=False):
"""Draw the circle.
Parameters
----------
color : tuple[int, int, int] | tuple[float, float, float] | :class:`~compas.colors.Color`, optional
The RGB color of the circle.
Default is :attr:`compas.artists.PrimitiveArtist.color`.
show_point : bool, optional
If True, draw the center point of the circle.
show_normal : bool, optional
If True, draw the normal vector of the circle.
Returns
-------
list[System.Guid]
The GUIDs of the created Rhino objects.
"""
color = Color.coerce(color) or self.color
color = color.rgb255
point = list(self.primitive.plane.point)
normal = list(self.primitive.plane.normal)
plane = point, normal
radius = self.primitive.radius
guids = []
if show_point:
points = [{"pos": point, "color": color, "name": self.primitive.name}]
guids += compas_rhino.draw_points(points, layer=self.layer, clear=False, redraw=False)
if show_normal:
lines = [
{
"start": point,
"end": add_vectors(point, normal),
"arrow": "end",
"color": color,
"name": self.primitive.name,
}
]
guids += compas_rhino.draw_lines(lines, layer=self.layer, clear=False, redraw=False)
circles = [
{
"plane": plane,
"radius": radius,
"color": color,
"name": self.primitive.name,
}
]
guids += compas_rhino.draw_circles(circles, layer=self.layer, clear=False, redraw=False)
return guids
| {
"content_hash": "e1236ff6499742f88bb5878c64740131",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 107,
"avg_line_length": 34.20253164556962,
"alnum_prop": 0.5684678016284234,
"repo_name": "compas-dev/compas",
"id": "48755a94bf7d86282e9069c6e4699b2b81884857",
"size": "2702",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/compas_rhino/artists/circleartist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3181804"
}
],
"symlink_target": ""
} |
"""The tests for Roller shutter platforms."""
| {
"content_hash": "7ca625ffb1150ae72deec0a581783df6",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 45,
"avg_line_length": 46,
"alnum_prop": 0.717391304347826,
"repo_name": "Julian/home-assistant",
"id": "4fc6ddee8a9e6afdc57fbda698c8df2d68025057",
"size": "46",
"binary": false,
"copies": "17",
"ref": "refs/heads/py2",
"path": "tests/components/rollershutter/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1354942"
},
{
"name": "Python",
"bytes": "2755966"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "6430"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand
from django.utils.translation import ugettext_lazy
from onadata.apps.logger.models.instance import Instance
from onadata.apps.viewer.models.parsed_instance import xform_instances,\
datetime_from_str
from onadata.libs.utils.common_tags import DELETEDAT, ID
class Command(BaseCommand):
help = ugettext_lazy("Update deleted records from mongo to sql instances")
def handle(self, *args, **kwargs):
q = {"$and": [{"_deleted_at": {"$exists": True}},
{"_deleted_at": {"$ne": None}}]}
cursor = xform_instances.find(q)
c = 0
for record in cursor:
date_deleted = datetime_from_str(record[DELETEDAT])
id = record[ID]
if Instance.set_deleted_at(id, deleted_at=date_deleted):
c += 1
print "deleted on ", date_deleted
print "-------------------------------"
print "Updated %d records." % c
| {
"content_hash": "5353fdaff9f0f11fe51512c505b43a4d",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 78,
"avg_line_length": 38.76,
"alnum_prop": 0.6150670794633643,
"repo_name": "piqoni/onadata",
"id": "273b80a56e63b8d1f44b36f29e118ebf6ab91635",
"size": "969",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "onadata/apps/viewer/management/commands/update_delete_from_mongo.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "74590"
},
{
"name": "Gettext Catalog",
"bytes": "558412"
},
{
"name": "HTML",
"bytes": "248856"
},
{
"name": "JavaScript",
"bytes": "904742"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Python",
"bytes": "2569475"
},
{
"name": "Shell",
"bytes": "11725"
}
],
"symlink_target": ""
} |
from shop.models.productmodel import Product
from shop.views import ShopDetailView
class ProductDetailView(ShopDetailView):
"""
This view handles displaying the right template for the subclasses of
Product.
It will look for a template at the normal (conventional) place, but will
fallback to using the default product template in case no template is
found for the subclass.
"""
model=Product # It must be the biggest ancestor of the inheritence tree.
generic_template = 'shop/product_detail.html'
def get_template_names(self):
ret = super(ProductDetailView, self).get_template_names()
if not self.generic_template in ret:
ret.append(self.generic_template)
return ret | {
"content_hash": "c434d22488b96b0eccd4e2a37166c738",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 76,
"avg_line_length": 39.421052631578945,
"alnum_prop": 0.7156208277703605,
"repo_name": "ojii/django-shop",
"id": "39be2b61257c71964db948eb3cae8d355121589a",
"size": "773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shop/views/product.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1122"
},
{
"name": "Python",
"bytes": "282793"
},
{
"name": "Shell",
"bytes": "5030"
}
],
"symlink_target": ""
} |
from conf import settings
from core import accounts
from core import logger
#transaction logger
def make_transaction(log_obj,account_data,tran_type,amount,**others):
'''
deal all the user transactions
:param account_data: user account data
:param tran_type: transaction type
:param amount: transaction amount
:param others: mainly for logging usage
:return:
'''
amount = float(amount)
if tran_type in settings.TRANSACTION_TYPE:
interest = amount * settings.TRANSACTION_TYPE[tran_type]['interest']
old_balance = account_data['balance']
if settings.TRANSACTION_TYPE[tran_type]['action'] == 'plus':
new_balance = old_balance + amount + interest
elif settings.TRANSACTION_TYPE[tran_type]['action'] == 'minus':
new_balance = old_balance - amount - interest
#check credit
if new_balance <0:
print('''\033[31;1mYour credit [%s] is not enough for this transaction [-%s], your current balance is
[%s]''' %(account_data['credit'],(amount + interest), old_balance ))
return
account_data['balance'] = new_balance
accounts.dump_account(account_data) #save the new balance back to file
log_obj.info("account:%s action:%s amount:%s interest:%s" %
(account_data['id'], tran_type, amount,interest) )
return account_data
else:
print("\033[31;1mTransaction type [%s] is not exist!\033[0m" % tran_type)
| {
"content_hash": "c0f5e401dfc05a9d189bc767826801fa",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 117,
"avg_line_length": 42.611111111111114,
"alnum_prop": 0.622555410691004,
"repo_name": "dianshen/python_day",
"id": "1144e04a82ae3f3a2ca6d00601b65757ec5d1cbb",
"size": "1578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day5-atm 2/alex_atm/core/transaction.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2505"
},
{
"name": "HTML",
"bytes": "74003"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "317154"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0041_create_attachments_for_old_messages'),
]
operations = [
migrations.AlterField(
model_name='attachment',
name='file_name',
field=models.TextField(db_index=True),
),
]
| {
"content_hash": "36cfb6157881b26bbc50ba4acdd61c23",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 63,
"avg_line_length": 22.5,
"alnum_prop": 0.5888888888888889,
"repo_name": "timabbott/zulip",
"id": "84f1da7717a342d19a0ed2674d94540d88eb5c77",
"size": "360",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "zerver/migrations/0042_attachment_file_name_length.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "429356"
},
{
"name": "Dockerfile",
"bytes": "2939"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "844217"
},
{
"name": "JavaScript",
"bytes": "3259448"
},
{
"name": "Perl",
"bytes": "8594"
},
{
"name": "Puppet",
"bytes": "74427"
},
{
"name": "Python",
"bytes": "7825440"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "123706"
},
{
"name": "TSQL",
"bytes": "314"
},
{
"name": "TypeScript",
"bytes": "22102"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
# Setup version
VERSION = '0.6.0'
# Read description
with open('README.rst', 'r') as readme:
README_TEXT = readme.read()
def write_version_py():
filename = os.path.join(
os.path.dirname(__file__),
'tornadowebapi',
'version.py')
ver = "__version__ = '{}'\n"
with open(filename, 'w') as fh:
fh.write("# Autogenerated by setup.py\n")
fh.write(ver.format(VERSION))
write_version_py()
# main setup configuration class
setup(
name='tornadowebapi',
version=VERSION,
author='SimPhoNy Project',
license='BSD',
description='Tornado-based WebAPI framework',
install_requires=[
"setuptools>=21.0",
"tornado>=4.3"
],
packages=find_packages(),
include_package_data=True,
zip_safe=False
)
| {
"content_hash": "e43155a1333b26a5b7eca38112e2192f",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 49,
"avg_line_length": 21.3,
"alnum_prop": 0.6150234741784038,
"repo_name": "simphony/tornado-webapi",
"id": "352f37ca90e7be09ddefdcac13d47a5ec8ba0f20",
"size": "852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "71184"
}
],
"symlink_target": ""
} |
"""
Build index by path into a content-keyed data store.
"""
# import binascii
# from nlhtree import NLHLeaf
# from xlattice import SHA1_BIN_NONE, SHA2_BIN_NONE
# from xlcrypto import (
# AES_BLOCK_BYTES, addPKCS7Padding, stripPKCS7Padding)
__all__ = ['__version__', '__version_date__',
]
__version__ = '0.0.24'
__version_date__ = '2018-02-27'
| {
"content_hash": "829cfca06d157054fb640fba59b1a48b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 58,
"avg_line_length": 22.6875,
"alnum_prop": 0.6446280991735537,
"repo_name": "jddixon/bindex",
"id": "920c2a3103ca8a675fae3542be89686dee9d8582",
"size": "386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/bindex/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6648"
},
{
"name": "Python",
"bytes": "1524"
},
{
"name": "Shell",
"bytes": "1507"
}
],
"symlink_target": ""
} |
from netforce.model import Model, fields, get_model
import time
import re
from netforce.access import get_active_company, get_active_user, check_permission_other
class SplitProduction(Model):
_name = "split.production"
_transient = True
_fields = {
"order_id": fields.Many2One("production.order", "Production Order", required=True),
"order_to_id": fields.Many2One("production.order", "To Production Order", required=True),
"product_list": fields.Json("Product List"),
"order_to_list": fields.Json("Production To List"),
"product_id": fields.Many2One("product", "Product"),
"planned_qty": fields.Decimal("Planned Qty", readonly=True),
"actual_qty": fields.Decimal("Actual Qty", readonly=True),
"split_qty": fields.Decimal("Split Qty"),
"split_qty2": fields.Decimal("Split Secondary Qty"),
"team_id": fields.Many2One("mfg.team", "Production Team"),
"remark": fields.Char("Remark"),
"ratio_method": fields.Selection([["planned", "Planned Qty"], ["actual", "Actual Qty"]], "Ratio Method", required=True),
"journal_id": fields.Many2One("stock.journal", "Journal", required=True, condition=[["type", "=", "internal"]]),
"container_id": fields.Many2One("stock.container", "Container"),
"lines": fields.One2Many("split.production.line", "wizard_id", "Lines"),
"remain_planned_qty": fields.Decimal("Remain Planned Qty", function="get_remain_planned_qty"),
"remain_actual_qty": fields.Decimal("Remain Actual Qty", function="get_remain_actual_qty"),
"approved_by_id": fields.Many2One("base.user", "Approved By", readonly=True),
}
def _get_planned_qty(self, context={}):
order_id = int(context["refer_id"])
order = get_model("production.order").browse(order_id)
return order.qty_planned
def _get_actual_qty(self, context={}):
order_id = int(context["refer_id"])
order = get_model("production.order").browse(order_id)
return order.qty_received
def _get_product(self, context={}):
order_id = int(context["refer_id"])
order = get_model("production.order").browse(order_id)
return order.product_id.id
def _get_container_id(self, context={}):
order_id = int(context["refer_id"])
order = get_model("production.order").browse(order_id)
return order.container_id.id
def _get_product_ids(self, context={}):
order_id = int(context["refer_id"])
order = get_model("production.order").browse(order_id)
prods = []
for comp in order.components:
prods.append(comp.product_id.id)
prods.append(order.product_id.id)
return prods
def _get_order_to_ids(self, context={}):
order_id = int(context["refer_id"])
order = get_model("production.order").browse(order_id)
order_to_ids = self.get_order_to_list(order.id)
return order_to_ids
_defaults = {
"order_id": lambda self, ctx: int(ctx["refer_id"]),
"planned_qty": _get_planned_qty,
"actual_qty": _get_actual_qty,
"product_id": _get_product,
"product_list": _get_product_ids,
"order_to_list": _get_order_to_ids,
#"split_parents": True,
"split_qty": 0,
"container_id": _get_container_id,
"ratio_method": "actual",
"remain_planned_qty": _get_planned_qty,
"remain_actual_qty": _get_actual_qty
}
def get_product_list(self, order_id):
prods = []
order = get_model("production.order").browse(order_id)
if order:
for comp in order.components:
prods.append(comp.product_id.id)
prods.append(order.product_id.id)
return prods
def get_product_ids(self, ids, context={}):
res = {}
prods = []
obj = self.browse(ids)[0]
order = obj.order_id
if order:
for comp in order.components:
prods.append(comp.product_id.id)
prods.append(order.product_id.id)
res[obj.id] = prods
return res
def get_order_to_list(self, order_id):
order_to_ids = [order_id]
order = get_model("production.order").browse(order_id)
order_parent = order.parent_id
while order_parent:
order_to_ids.append(order_parent.id)
order_parent = order_parent.parent_id
order_to_ids = list(set(order_to_ids))
return order_to_ids
def get_order_to_ids(self, ids, context={}):
res = {}
obj = self.browse(ids)[0]
order_id = obj.order_id.id
res[obj.id] = self.get_order_to_list(order_id)
return res
def get_remain_actual_qty(self, ids, context={}):
res = {}
obj = self.browse(ids)[0]
if obj.ratio_method == "actual":
total_qty = 0
for line in obj.lines:
total_qty += line.qty
res[obj.id] = obj.actual_qty - total_qty
else:
res[obj.id] = obj.actual_qty
return res
def get_remain_planned_qty(self, ids, context={}):
res = {}
obj = self.browse(ids)[0]
if obj.ratio_method == "planned":
total_qty = 0
for line in obj.lines:
total_qty += line.qty
res[obj.id] = obj.planned_qty - total_qty
else:
res[obj.id] = obj.planned_qty
return res
def onchange_order(self, context={}):
data = context["data"]
order_id = data["order_id"]
order = get_model("production.order").browse(order_id)
data["product_list"] = self.get_product_list(order_id)
data["product_id"] = order.product_id.id
data["order_to_list"] = self.get_order_to_list(order_id)
data["order_to_id"] = None
self.onchange_product(context)
return data
def get_split_num(self, root_num, context={}):
root_num = re.sub("-P[0-9][0-9]$", "", root_num)
for i in range(2, 100):
num = root_num + "-P%.2d" % i
res = get_model("production.order").search([["number", "=", num]])
if not res:
return num
raise Exception("Failed to generate production order number (root=%s)" % root_num)
def get_split_container(self, prev_cont_num, order_num, context={}):
part_no = order_num.rpartition("-")[2]
if not part_no or not part_no.startswith("P") or not len(part_no) == 3:
raise Exception("Can not find split part number of production order %s" % order_num)
new_cont_num = prev_cont_num + "-" + part_no
res = get_model("stock.container").search([["number", "=", new_cont_num]])
if res:
new_cont_id = res[0]
else:
vals = {
"number": new_cont_num,
}
new_cont_id = get_model("stock.container").create(vals)
return new_cont_id
def check_split_container(self, order_comp_id):
return True
def get_lot(self, new_lot_num, context={}):
res = get_model("stock.lot").search([["number", "=", new_lot_num]])
if res:
new_lot_id = res[0]
else:
vals = {
"number": new_lot_num,
}
new_lot_id = get_model("stock.lot").create(vals)
return new_lot_id
def copy_order(self, order_id, qty, team_id, remark):
order = get_model("production.order").browse(order_id)
old_order_num = order.number
new_order_num = self.get_split_num(old_order_num)
vals = {
"number": new_order_num,
"order_date": time.strftime("%Y-%m-%d"),
"due_date": order.due_date,
"ref": order.ref,
"sale_id": order.sale_id.id,
"parent_id": order.parent_id.id,
"product_id": order.product_id.id,
"qty_planned": qty,
"uom_id": order.uom_id.id,
"bom_id": order.bom_id.id,
"routing_id": order.routing_id.id,
"production_location_id": order.production_location_id.id,
"location_id": order.location_id.id,
"team_id": team_id,
"remark": remark,
"state": order.state,
"components": [],
"operations": [],
"qc_tests": [],
}
if order.container_id:
vals["container_id"] = self.get_split_container(order.container_id.number, new_order_num)
if order.lot_id and order.lot_id.number == old_order_num: # XXX
vals["lot_id"] = self.get_lot(new_order_num)
ratio = qty / order.qty_planned
for comp in order.components:
comp_vals = {
"product_id": comp.product_id.id,
"qty_planned": round(comp.qty_planned * ratio, 2),
"uom_id": comp.uom_id.id,
"location_id": comp.location_id.id,
"issue_method": comp.issue_method,
"container_id": comp.container_id.id,
}
if comp.container_id and self.check_split_container(comp.id): # MTS need no need to split scrap box
comp_vals["container_id"] = self.get_split_container(comp.container_id.number, new_order_num)
# if comp.lot_id and comp.lot_id.number==old_order_num: # XXX
# comp_vals["lot_id"]=self.get_lot(new_order_num)
comp_vals["lot_id"] = comp.lot_id.id # Should be old number
vals["components"].append(("create", comp_vals))
for op in order.operations:
op_vals = {
"workcenter_id": op.workcenter_id.id,
"employee_id": op.employee_id.id,
"planned_duration": op.planned_duration * ratio,
}
vals["operations"].append(("create", op_vals))
for qc in order.qc_tests:
qc_vals = {
"test_id": qc.test_id.id,
}
vals["qc_tests"].append(("create", qc_vals))
new_id = get_model("production.order").create(vals)
return new_id
def modif_order(self, order_id, qty, team_id, remark):
order = get_model("production.order").browse(order_id)
ratio = qty / order.qty_planned
old_order_num = order.number
new_order_num = old_order_num + "-P01"
vals = {
"number": new_order_num,
"qty_planned": round(order.qty_planned * ratio, 2),
"team_id": team_id,
"remark": remark,
}
if order.container_id:
vals["container_id"] = self.get_split_container(order.container_id.number, new_order_num)
if order.lot_id and order.lot_id.number == old_order_num: # XXX
vals["lot_id"] = self.get_lot(new_order_num)
order.write(vals)
for comp in order.components:
vals = {
"qty_planned": round(comp.qty_planned * ratio, 2),
}
if comp.container_id and self.check_split_container(comp.id): # MTS no need to split scrap box
vals["container_id"] = self.get_split_container(comp.container_id.number, new_order_num)
# if comp.lot_id and comp.lot_id.number==old_order_num: # XXX
# vals["lot_id"]=self.get_lot(new_order_num)
vals["lot_id"] = comp.lot_id.id # Should be old number
comp.write(vals)
for op in order.operations:
vals = {
"planned_duration": op.planned_duration * ratio,
}
op.write(vals)
def split_order(self, order_id, ratios):
order = get_model("production.order").browse(order_id)
if order.state not in ("draft", "waiting_confirm", "waiting_material", "waiting_suborder", "ready", "in_progress"):
raise Exception("Invalid state to split order (%s)" % order.number)
for r in ratios[:1]:
split_ids = [(r[2], order_id)]
for r in ratios[1:]:
split_qty = order.qty_planned * r[0]
team_id = r[1]
remark = r[3]
split_id = self.copy_order(order.id, split_qty, team_id, remark)
split_ids.append((r[2], split_id))
r = ratios[0]
split_qty = order.qty_planned * r[0]
team_id = r[1]
remark = r[3]
self.modif_order(order.id, split_qty, team_id, remark)
for sub in order.sub_orders:
if sub.state not in ("draft", "waiting_confirm", "waiting_material", "waiting_suborder", "ready", "in_progress"):
continue
sub_split_ids = self.split_order(sub.id, ratios)
if sub.sub_orders:
split_ids += sub_split_ids
for i in range(len(sub_split_ids)):
sub_split_id = sub_split_ids[i][1]
split_id = split_ids[i][1]
get_model("production.order").write([sub_split_id], {"parent_id": split_id})
return split_ids
def do_split(self, ids, context={}):
obj = self.browse(ids)[0]
if not obj.approved_by_id:
raise Exception("Split order has to be approved first")
order = obj.order_id
if len(obj.lines) < 2:
raise Exception("Split needs at least 2 lines")
total_qty = sum(l.qty for l in obj.lines)
if not obj.ratio_method:
raise Exception("Please select ratio method")
if obj.ratio_method == "planned" and abs(total_qty - obj.planned_qty) > 0.01:
raise Exception("Total split qty has to be equal to planned qty")
if obj.ratio_method == "actual" and abs(total_qty - obj.actual_qty) > 0.01:
raise Exception("Total split qty has to be equal to actual qty")
ratios = []
if obj.ratio_method == "planned":
for line in obj.lines:
ratios.append((line.qty / obj.planned_qty, line.team_id.id, line.id, line.remark))
elif obj.ratio_method == "actual":
for line in obj.lines:
ratios.append((line.qty / obj.actual_qty, line.team_id.id, line.id, line.remark))
split_order = order
if obj.order_to_id:
# if obj.split_parents:
while split_order.parent_id:
split_order = split_order.parent_id
if split_order.id == obj.order_to_id.id:
break
split_order_ids = self.split_order(split_order.id, ratios)
# Combine Split Order
end_order = obj.order_id.parent_id
if obj.order_to_id and obj.order_to_id.parent_id:
end_order = obj.order_to_id.parent_id
if end_order:
comps = []
for end_sub in end_order.sub_orders:
for comp in end_order.components:
if comp.product_id.id == end_sub.product_id.id:
comps.append((comp.product_id.id, comp.location_id.id, comp.issued_method))
comp.delete()
comps = list(set(comps))
for prod_id, loc_id, issued_method in comps:
for end_sub in end_order.sub_orders:
if end_sub.product_id.id == prod_id:
vals = {
"order_id": end_order.id,
"product_id": end_sub.product_id.id,
"qty_planned": end_sub.qty_planned,
"uom_id": end_sub.uom_id.id,
"location_id": loc_id,
"issue_method": issued_method,
"lot_id": end_sub.lot_id.id,
"container_id": end_sub.container_id.id,
}
get_model("production.component").create(vals)
if obj.ratio_method == "actual":
self.split_transfer(split_order_ids=split_order_ids, split_prod_id=obj.id)
return {
"next": {
"name": "production",
},
"flash": "Order split successfully",
}
def split_transfer(self, split_order_ids, split_prod_id):
split_prod = get_model("split.production").browse(split_prod_id)
pick_vals = {
"type": "internal",
"journal_id": split_prod.journal_id.id,
"lines": [],
"done_approved_by_id": split_prod.approved_by_id.id
}
for split_line, split_order_id in split_order_ids:
split_order = get_model("production.order").browse(split_order_id)
for line in split_prod.lines:
cont_to_id = None
lot_id = None
if line.id == split_line:
if split_prod.product_id.id == split_order.product_id.id:
lot_id = split_order.lot_id.id
cont_to_id = split_order.container_id.id
else:
for comp in split_order.components:
if split_prod.product_id.id == comp.product_id.id:
lot_id = comp.lot_id.id
cont_to_id = comp.container_id.id
if cont_to_id:
break
if cont_to_id:
move_vals = {
"product_id": split_prod.product_id.id,
"qty": line.qty,
"uom_id": split_prod.product_id.uom_id.id,
"qty2": line.qty2,
"lot_id": lot_id,
"location_from_id": split_prod.order_id.location_id.id,
"location_to_id": split_prod.order_id.location_id.id,
"container_from_id": split_prod.container_id.id,
"container_to_id": cont_to_id,
}
pick_vals["lines"].append(("create", move_vals))
if len(pick_vals["lines"]) > 0:
pick_id = get_model("stock.picking").create(pick_vals, context=pick_vals)
get_model("stock.picking").set_done([pick_id])
split_order_ids.reverse()
for order_id in split_order_ids:
order = get_model("production.order").browse(order_id[1])
if order.parent_id:
order.parent_id.update_status()
def approve(self, ids, context={}):
if not check_permission_other("production_approve_split"):
raise Exception("Permission denied")
obj = self.browse(ids)[0]
user_id = get_active_user()
obj.write({"approved_by_id": user_id})
return {
"next": {
"name": "split_production",
"active_id": obj.id,
},
"flash": "Split order approved successfully",
}
def onchange_product(self, context={}):
data = context["data"]
order_id = data["order_id"]
order = get_model("production.order").browse(order_id)
prod_id = data["product_id"]
data["planned_qty"] = 0
data["actual_qty"] = 0
if order.product_id.id == prod_id:
data["planned_qty"] = order.qty_planned
data["actual_qty"] = order.qty_received
data["container_id"] = order.container_id.id
else:
for comp in order.components:
if comp.product_id.id == prod_id:
data["planned_qty"] = comp.qty_planned
data["actual_qty"] = comp.qty_stock
data["container_id"] = comp.container_id.id
data["remain_planned_qty"] = data["planned_qty"]
data["remain_actual_qty"] = data["actual_qty"]
return data
def add_lines(self, ids, context={}):
obj = self.browse(ids)[0]
if not obj.ratio_method:
raise Exception("Invalid Ratio Method")
remain = obj.remain_planned_qty if obj.ratio_method == "planned" else obj.remain_actual_qty
total_qty = 0
for line in obj.lines:
if line.product_id.id != obj.product_id.id \
or line.ratio_method != obj.ratio_method:
line.delete()
for line in obj.lines:
total_qty += line.qty
if obj.split_qty != 0 and remain + 0.001 >= obj.split_qty:
# part_no=len(obj.lines)+1
# cont_num=obj.container_id.number+"-P%.2d"%part_no
vals = {
"wizard_id": obj.id,
"ratio_method": obj.ratio_method,
"product_id": obj.product_id.id,
"qty": obj.split_qty,
"qty2": obj.split_qty2,
"team_id": obj.team_id.id,
"remark": obj.remark,
#"container_num": cont_num,
}
get_model("split.production.line").create(vals)
# part_no=1
# for line in obj.lines:
# cont_num=obj.container_id.number+"-P%.2d"%part_no
#line.write({"container_num": cont_num})
# part_no+=1
obj.split_qty = 0
obj.team_id = None
else:
raise Exception("Split Qty is too high!")
return {
"flash": "Add line success",
"focus_field": "split_qty"
}
def clear_lines(self, ids, context={}):
obj = self.browse(ids)[0]
obj.write({"lines": [("delete_all",)]})
return {
"flash": "Clear all split lines",
"focus_field": "split_qty"
}
SplitProduction.register()
| {
"content_hash": "d8d4c68fcb8df3ef0b3838525376dcc8",
"timestamp": "",
"source": "github",
"line_count": 508,
"max_line_length": 128,
"avg_line_length": 42.47047244094488,
"alnum_prop": 0.533117033603708,
"repo_name": "nfco/netforce",
"id": "921aed028b5b3c1bed3479dc5b38adedc9daa296",
"size": "22680",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "netforce_mfg/netforce_mfg/models/split_production.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "73"
},
{
"name": "CSS",
"bytes": "407336"
},
{
"name": "HTML",
"bytes": "478918"
},
{
"name": "Java",
"bytes": "11870"
},
{
"name": "JavaScript",
"bytes": "3712147"
},
{
"name": "Makefile",
"bytes": "353"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "3469515"
},
{
"name": "Roff",
"bytes": "15858"
},
{
"name": "Shell",
"bytes": "117"
}
],
"symlink_target": ""
} |
import dependency_manager
import logging
import mock
import subprocess
import unittest
from battor import battor_error
from battor import battor_wrapper
from devil.utils import battor_device_mapping
from devil.utils import find_usb_devices
import serial
from serial.tools import list_ports
class DependencyManagerMock(object):
def __init__(self, _):
self._fetch_return = 'path'
self._version_return = 'cbaa843'
def FetchPath(self, _, *unused):
del unused
return self._fetch_return
def FetchPathWithVersion(self, _, *unused):
del unused
return self._fetch_return, self._version_return
class PopenMock(object):
def __init__(self, *unused):
pass
def poll(self):
pass
def kill(self):
pass
class IsBattOrConnectedTest(unittest.TestCase):
def setUp(self):
# Windows monkey patches.
self._serial_tools_return = []
self._comports = serial.tools.list_ports.comports
serial.tools.list_ports.comports = lambda: self._serial_tools_return
# Linux/Android monkey patches.
self._generate_serial_map_return = {}
self._generate_serial_map = battor_device_mapping.GenerateSerialMap
battor_device_mapping.GenerateSerialMap = (
lambda: self._generate_serial_map_return)
self._read_serial_map_file_return = {}
self._read_serial_map_file = battor_device_mapping.ReadSerialMapFile
battor_device_mapping.ReadSerialMapFile = (
lambda f: self._read_serial_map_file_return)
self._get_bus_number_to_device_tree_map = (
find_usb_devices.GetBusNumberToDeviceTreeMap)
find_usb_devices.GetBusNumberToDeviceTreeMap = lambda fast=None: {}
self._get_battor_list_return = []
self._get_battor_list = battor_device_mapping.GetBattOrList
battor_device_mapping.GetBattOrList = lambda x: self._get_battor_list_return
def tearDown(self):
serial.tools.list_ports.comports = self._comports
battor_device_mapping.GenerateSerialMap = self._generate_serial_map
battor_device_mapping.ReadSerialMapFile = self._read_serial_map_file
find_usb_devices.GetBusNumberToDeviceTreeMap = (
self._get_bus_number_to_device_tree_map)
battor_device_mapping.GetBattOrList = self._get_battor_list
def forceException(self):
raise NotImplementedError
def testAndroidWithBattOr(self):
self._generate_serial_map_return = {'abc': '123'}
self.assertTrue(battor_wrapper.IsBattOrConnected('android', 'abc'))
def testAndroidWithoutMatchingBattOr(self):
self._generate_serial_map_return = {'notabc': 'not123'}
self.assertFalse(battor_wrapper.IsBattOrConnected('android', 'abc'))
def testAndroidNoDevicePassed(self):
with self.assertRaises(ValueError):
battor_wrapper.IsBattOrConnected('android')
def testAndroidWithMapAndFile(self):
device_map = {'abc': '123'}
battor_device_mapping.ReadSerialMapFile = self.forceException
self.assertTrue(
battor_wrapper.IsBattOrConnected('android', android_device='abc',
android_device_map=device_map,
android_device_file='file'))
def testAndroidWithMap(self):
self.assertTrue(
battor_wrapper.IsBattOrConnected('android', android_device='abc',
android_device_map={'abc', '123'}))
def testAndroidWithFile(self):
self._read_serial_map_file_return = {'abc': '123'}
self.assertTrue(
battor_wrapper.IsBattOrConnected('android', android_device='abc',
android_device_file='file'))
def testLinuxWithBattOr(self):
self._get_battor_list_return = ['battor']
self.assertTrue(battor_wrapper.IsBattOrConnected('linux'))
def testLinuxWithoutBattOr(self):
self._get_battor_list_return = []
self.assertFalse(battor_wrapper.IsBattOrConnected('linux'))
def testMacWithBattOr(self):
self._serial_tools_return = [('/dev/tty.usbserial-MAA', 'BattOr v3.3', '')]
self.assertTrue(battor_wrapper.IsBattOrConnected('mac'))
def testMacWithoutBattOr(self):
self._serial_tools_return = [('/dev/tty.usbserial-MAA', 'not_one', '')]
self.assertFalse(battor_wrapper.IsBattOrConnected('mac'))
def testWinWithBattOr(self):
self._serial_tools_return = [('COM4', 'USB Serial Port', '')]
self.assertTrue(battor_wrapper.IsBattOrConnected('win'))
def testWinWithoutBattOr(self):
self._get_battor_list_return = []
self.assertFalse(battor_wrapper.IsBattOrConnected('win'))
class BattOrWrapperTest(unittest.TestCase):
def setUp(self):
self._battor = None
self._is_battor = True
self._battor_list = ['battor1']
self._should_pass = True
self._fake_map = {'battor1': 'device1'}
self._fake_return_code = None
self._fake_battor_return = 'Done.\n'
self._get_battor_path_from_phone_serial = (
battor_device_mapping.GetBattOrPathFromPhoneSerial)
self._get_bus_number_to_device_tree_map = (
find_usb_devices.GetBusNumberToDeviceTreeMap)
self._dependency_manager = dependency_manager.DependencyManager
self._get_battor_list = battor_device_mapping.GetBattOrList
self._is_battor = battor_device_mapping.IsBattOr
self._generate_serial_map = battor_device_mapping.GenerateSerialMap
self._serial_tools = serial.tools.list_ports.comports
battor_device_mapping.GetBattOrPathFromPhoneSerial = (
lambda x, serial_map_file=None, serial_map=None: x + '_battor')
find_usb_devices.GetBusNumberToDeviceTreeMap = lambda fast=False: True
dependency_manager.DependencyManager = DependencyManagerMock
battor_device_mapping.GetBattOrList = lambda x: self._battor_list
battor_device_mapping.IsBattOr = lambda x, y: self._is_battor
battor_device_mapping.GenerateSerialMap = lambda: self._fake_map
serial.tools.list_ports.comports = lambda: [('COM4', 'USB Serial Port', '')]
self._subprocess_check_output_code = 0
def subprocess_check_output_mock(*unused):
if self._subprocess_check_output_code != 0:
raise subprocess.CalledProcessError(None, None)
return 0
self._subprocess_check_output = subprocess.check_output
subprocess.check_output = subprocess_check_output_mock
def tearDown(self):
battor_device_mapping.GetBattOrPathFromPhoneSerial = (
self._get_battor_path_from_phone_serial)
find_usb_devices.GetBusNumberToDeviceTreeMap = (
self._get_bus_number_to_device_tree_map)
dependency_manager.DependencyManager = self._dependency_manager
battor_device_mapping.GetBattOrList = self._get_battor_list
battor_device_mapping.IsBattOr = self._is_battor
battor_device_mapping.GenerateSerialMap = self._generate_serial_map
serial.tools.list_ports.comports = self._serial_tools
subprocess.check_output = self._subprocess_check_output
def _DefaultBattOrReplacements(self):
battor_wrapper.DEFAULT_SHELL_CLOSE_TIMEOUT_S = .1
self._battor._StartShellImpl = lambda *unused: PopenMock()
self._battor.GetShellReturnCode = lambda *unused: self._fake_return_code
self._battor._SendBattOrCommandImpl = lambda x: self._fake_battor_return
self._battor._StopTracingImpl = lambda *unused: (self._fake_battor_return,
None)
def testBadPlatform(self):
with self.assertRaises(battor_error.BattOrError):
self._battor = battor_wrapper.BattOrWrapper('unknown')
def testInitAndroidWithBattOr(self):
self._battor = battor_wrapper.BattOrWrapper('android', android_device='abc')
self.assertEquals(self._battor._battor_path, 'abc_battor')
def testInitAndroidWithoutBattOr(self):
self._battor_list = []
self._fake_map = {}
battor_device_mapping.GetBattOrPathFromPhoneSerial = (
self._get_battor_path_from_phone_serial)
with self.assertRaises(battor_error.BattOrError):
self._battor = battor_wrapper.BattOrWrapper('android',
android_device='abc')
def testInitBattOrPathIsBattOr(self):
battor_path = 'battor/path/here'
self._battor = battor_wrapper.BattOrWrapper(
'android', android_device='abc', battor_path=battor_path)
self.assertEquals(self._battor._battor_path, battor_path)
def testInitNonAndroidWithBattOr(self):
self._battor = battor_wrapper.BattOrWrapper('win')
self.assertEquals(self._battor._battor_path, 'COM4')
def testInitNonAndroidWithMultipleBattOr(self):
self._battor_list.append('battor2')
with self.assertRaises(battor_error.BattOrError):
self._battor = battor_wrapper.BattOrWrapper('linux')
def testInitNonAndroidWithoutBattOr(self):
self._battor_list = []
serial.tools.list_ports.comports = lambda: [('COM4', 'None', '')]
with self.assertRaises(battor_error.BattOrError):
self._battor = battor_wrapper.BattOrWrapper('win')
def testStartShellPass(self):
self._battor = battor_wrapper.BattOrWrapper('win')
self._DefaultBattOrReplacements()
self._battor.StartShell()
self.assertIsNotNone(self._battor._battor_shell)
def testStartShellDoubleStart(self):
self._battor = battor_wrapper.BattOrWrapper('win')
self._DefaultBattOrReplacements()
self._battor.StartShell()
with self.assertRaises(AssertionError):
self._battor.StartShell()
def testStartShellFail(self):
self._battor = battor_wrapper.BattOrWrapper('win')
self._DefaultBattOrReplacements()
self._battor.GetShellReturnCode = lambda *unused: 1
with self.assertRaises(AssertionError):
self._battor.StartShell()
def testStartTracingPass(self):
self._battor = battor_wrapper.BattOrWrapper('win')
self._DefaultBattOrReplacements()
self._battor.StartShell()
self._battor.StartTracing()
self.assertTrue(self._battor._tracing)
def testStartTracingDoubleStart(self):
self._battor = battor_wrapper.BattOrWrapper('win')
self._DefaultBattOrReplacements()
self._battor.StartShell()
self._battor.StartTracing()
with self.assertRaises(AssertionError):
self._battor.StartTracing()
def testStartTracingCommandFails(self):
self._battor = battor_wrapper.BattOrWrapper('win')
self._DefaultBattOrReplacements()
self._battor._SendBattOrCommandImpl = lambda *unused: 'Fail.\n'
self._battor.StartShell()
with self.assertRaises(battor_error.BattOrError):
self._battor.StartTracing()
def testStopTracingPass(self):
self._battor = battor_wrapper.BattOrWrapper('win')
self._DefaultBattOrReplacements()
self._battor.StartShell()
self._battor.StartTracing()
self._battor.GetShellReturnCode = lambda *unused: 0
self._battor.StopTracing()
self.assertFalse(self._battor._tracing)
def testStopTracingNotRunning(self):
self._battor = battor_wrapper.BattOrWrapper('win')
self._DefaultBattOrReplacements()
with self.assertRaises(AssertionError):
self._battor.StopTracing()
def testFlashFirmwarePass(self):
self._battor = battor_wrapper.BattOrWrapper('linux')
self._DefaultBattOrReplacements()
self.assertTrue(self._battor.FlashFirmware('hex_path', 'config_path'))
def testFlashFirmwareFail(self):
self._battor = battor_wrapper.BattOrWrapper('linux')
self._DefaultBattOrReplacements()
self._subprocess_check_output_code = 1
with self.assertRaises(battor_wrapper.BattOrFlashError):
self._battor.FlashFirmware('hex_path', 'config_path')
def testFlashFirmwareShellRunning(self):
self._battor = battor_wrapper.BattOrWrapper('linux')
self._DefaultBattOrReplacements()
self._battor.StartShell()
with self.assertRaises(AssertionError):
self._battor.FlashFirmware('hex_path', 'config_path')
def testGetFirmwareGitHashNotRunning(self):
self._battor = battor_wrapper.BattOrWrapper('win')
self._DefaultBattOrReplacements()
with self.assertRaises(AssertionError):
self._battor.GetFirmwareGitHash()
def testGetFirmwareGitHashPass(self):
self._battor = battor_wrapper.BattOrWrapper('win')
self._DefaultBattOrReplacements()
self._battor.StartShell()
self._battor.GetFirmwareGitHash = lambda: 'cbaa843'
self.assertTrue(isinstance(self._battor.GetFirmwareGitHash(), basestring))
def testStopShellPass(self):
self._battor = battor_wrapper.BattOrWrapper('win')
self._DefaultBattOrReplacements()
self._battor.StartShell()
self._fake_return_code = 0
self._battor.StopShell()
self.assertIsNone(self._battor._battor_shell)
@mock.patch('time.sleep', mock.Mock)
def testStopShellTimeOutAndKill(self):
self._battor = battor_wrapper.BattOrWrapper('win')
self._DefaultBattOrReplacements()
self._battor.StartShell()
self._battor.StopShell()
self.assertIsNone(self._battor._battor_shell)
def testStopShellNotStarted(self):
self._battor = battor_wrapper.BattOrWrapper('win')
self._DefaultBattOrReplacements()
with self.assertRaises(AssertionError):
self._battor.StopShell()
@mock.patch('time.sleep', mock.Mock)
def testFlashBattOrSameGitHash(self):
self._battor = battor_wrapper.BattOrWrapper('linux')
self._DefaultBattOrReplacements()
self._battor.StartShell()
self._battor.GetFirmwareGitHash = lambda: 'cbaa843'
dependency_manager.DependencyManager._version_return = 'cbaa843'
self.assertFalse(self._battor._FlashBattOr())
@mock.patch('time.sleep', mock.Mock)
def testFlashBattOrDifferentGitHash(self):
self._battor = battor_wrapper.BattOrWrapper('linux')
self._DefaultBattOrReplacements()
self._battor.StartShell()
self._battor.GetFirmwareGitHash = lambda: 'bazz732'
dependency_manager.DependencyManager._version_return = 'cbaa843'
self.assertTrue(self._battor._FlashBattOr())
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
| {
"content_hash": "e41fab7141cd0516d3a20b342bd83957",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 80,
"avg_line_length": 38.337950138504155,
"alnum_prop": 0.7119219653179191,
"repo_name": "benschmaus/catapult",
"id": "3e48a3362dc90775d2ad07b03770f325c351f918",
"size": "14003",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "common/battor/battor/battor_wrapper_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "C++",
"bytes": "43486"
},
{
"name": "CSS",
"bytes": "24873"
},
{
"name": "Go",
"bytes": "58279"
},
{
"name": "HTML",
"bytes": "11801772"
},
{
"name": "JavaScript",
"bytes": "518002"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "Python",
"bytes": "6141932"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v12.resources",
marshal="google.ads.googleads.v12",
manifest={"AdScheduleView",},
)
class AdScheduleView(proto.Message):
r"""An ad schedule view summarizes the performance of campaigns
by AdSchedule criteria.
Attributes:
resource_name (str):
Output only. The resource name of the ad schedule view.
AdSchedule view resource names have the form:
``customers/{customer_id}/adScheduleViews/{campaign_id}~{criterion_id}``
"""
resource_name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "12c28b95bcd8b334b7fc4feb9bee1f80",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 84,
"avg_line_length": 26.692307692307693,
"alnum_prop": 0.6657060518731989,
"repo_name": "googleads/google-ads-python",
"id": "3d5bd07f8ebb62526fceb70bb155338cde2d80fb",
"size": "1294",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v12/resources/types/ad_schedule_view.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
from oslo_config import fixture
from oslotest import base
from aodh import service
class TestNotifierBase(base.BaseTestCase):
def setUp(self):
super(TestNotifierBase, self).setUp()
conf = service.prepare_service(argv=[], config_files=[])
self.conf = self.useFixture(fixture.Config(conf)).conf
| {
"content_hash": "aa0e986ed893a6c79015426e786937a4",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 64,
"avg_line_length": 25.076923076923077,
"alnum_prop": 0.7116564417177914,
"repo_name": "openstack/aodh",
"id": "74e87e45e3d1fc96b0cfeb26ea7623496b2aba9f",
"size": "937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aodh/tests/unit/notifier/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1061"
},
{
"name": "Python",
"bytes": "693850"
},
{
"name": "Shell",
"bytes": "12979"
}
],
"symlink_target": ""
} |
from io import (
BytesIO,
StringIO,
)
import random
import string
import numpy as np
from pandas import (
Categorical,
DataFrame,
concat,
date_range,
read_csv,
to_datetime,
)
from ..pandas_vb_common import (
BaseIO,
tm,
)
class ToCSV(BaseIO):
fname = "__test__.csv"
params = ["wide", "long", "mixed"]
param_names = ["kind"]
def setup(self, kind):
wide_frame = DataFrame(np.random.randn(3000, 30))
long_frame = DataFrame(
{
"A": np.arange(50000),
"B": np.arange(50000) + 1.0,
"C": np.arange(50000) + 2.0,
"D": np.arange(50000) + 3.0,
}
)
mixed_frame = DataFrame(
{
"float": np.random.randn(5000),
"int": np.random.randn(5000).astype(int),
"bool": (np.arange(5000) % 2) == 0,
"datetime": date_range("2001", freq="s", periods=5000),
"object": ["foo"] * 5000,
}
)
mixed_frame.loc[30:500, "float"] = np.nan
data = {"wide": wide_frame, "long": long_frame, "mixed": mixed_frame}
self.df = data[kind]
def time_frame(self, kind):
self.df.to_csv(self.fname)
class ToCSVDatetime(BaseIO):
fname = "__test__.csv"
def setup(self):
rng = date_range("1/1/2000", periods=1000)
self.data = DataFrame(rng, index=rng)
def time_frame_date_formatting(self):
self.data.to_csv(self.fname, date_format="%Y%m%d")
class ToCSVDatetimeBig(BaseIO):
fname = "__test__.csv"
timeout = 1500
params = [1000, 10000, 100000]
param_names = ["obs"]
def setup(self, obs):
d = "2018-11-29"
dt = "2018-11-26 11:18:27.0"
self.data = DataFrame(
{
"dt": [np.datetime64(dt)] * obs,
"d": [np.datetime64(d)] * obs,
"r": [np.random.uniform()] * obs,
}
)
def time_frame(self, obs):
self.data.to_csv(self.fname)
class ToCSVIndexes(BaseIO):
fname = "__test__.csv"
@staticmethod
def _create_df(rows, cols):
index_cols = {
"index1": np.random.randint(0, rows, rows),
"index2": np.full(rows, 1, dtype=int),
"index3": np.full(rows, 1, dtype=int),
}
data_cols = {
f"col{i}": np.random.uniform(0, 100000.0, rows) for i in range(cols)
}
df = DataFrame({**index_cols, **data_cols})
return df
def setup(self):
ROWS = 100000
COLS = 5
# For tests using .head(), create an initial dataframe with this many times
# more rows
HEAD_ROW_MULTIPLIER = 10
self.df_standard_index = self._create_df(ROWS, COLS)
self.df_custom_index_then_head = (
self._create_df(ROWS * HEAD_ROW_MULTIPLIER, COLS)
.set_index(["index1", "index2", "index3"])
.head(ROWS)
)
self.df_head_then_custom_index = (
self._create_df(ROWS * HEAD_ROW_MULTIPLIER, COLS)
.head(ROWS)
.set_index(["index1", "index2", "index3"])
)
def time_standard_index(self):
self.df_standard_index.to_csv(self.fname)
def time_multiindex(self):
self.df_head_then_custom_index.to_csv(self.fname)
def time_head_of_multiindex(self):
self.df_custom_index_then_head.to_csv(self.fname)
class StringIORewind:
def data(self, stringio_object):
stringio_object.seek(0)
return stringio_object
class ReadCSVDInferDatetimeFormat(StringIORewind):
params = ([True, False], ["custom", "iso8601", "ymd"])
param_names = ["infer_datetime_format", "format"]
def setup(self, infer_datetime_format, format):
rng = date_range("1/1/2000", periods=1000)
formats = {
"custom": "%m/%d/%Y %H:%M:%S.%f",
"iso8601": "%Y-%m-%d %H:%M:%S",
"ymd": "%Y%m%d",
}
dt_format = formats[format]
self.StringIO_input = StringIO("\n".join(rng.strftime(dt_format).tolist()))
def time_read_csv(self, infer_datetime_format, format):
read_csv(
self.data(self.StringIO_input),
header=None,
names=["foo"],
parse_dates=["foo"],
infer_datetime_format=infer_datetime_format,
)
class ReadCSVConcatDatetime(StringIORewind):
iso8601 = "%Y-%m-%d %H:%M:%S"
def setup(self):
rng = date_range("1/1/2000", periods=50000, freq="S")
self.StringIO_input = StringIO("\n".join(rng.strftime(self.iso8601).tolist()))
def time_read_csv(self):
read_csv(
self.data(self.StringIO_input),
header=None,
names=["foo"],
parse_dates=["foo"],
infer_datetime_format=False,
)
class ReadCSVConcatDatetimeBadDateValue(StringIORewind):
params = (["nan", "0", ""],)
param_names = ["bad_date_value"]
def setup(self, bad_date_value):
self.StringIO_input = StringIO((f"{bad_date_value},\n") * 50000)
def time_read_csv(self, bad_date_value):
read_csv(
self.data(self.StringIO_input),
header=None,
names=["foo", "bar"],
parse_dates=["foo"],
infer_datetime_format=False,
)
class ReadCSVSkipRows(BaseIO):
fname = "__test__.csv"
params = ([None, 10000], ["c", "python", "pyarrow"])
param_names = ["skiprows", "engine"]
def setup(self, skiprows, engine):
N = 20000
index = tm.makeStringIndex(N)
df = DataFrame(
{
"float1": np.random.randn(N),
"float2": np.random.randn(N),
"string1": ["foo"] * N,
"bool1": [True] * N,
"int1": np.random.randint(0, N, size=N),
},
index=index,
)
df.to_csv(self.fname)
def time_skipprows(self, skiprows, engine):
read_csv(self.fname, skiprows=skiprows, engine=engine)
class ReadUint64Integers(StringIORewind):
def setup(self):
self.na_values = [2 ** 63 + 500]
arr = np.arange(10000).astype("uint64") + 2 ** 63
self.data1 = StringIO("\n".join(arr.astype(str).tolist()))
arr = arr.astype(object)
arr[500] = -1
self.data2 = StringIO("\n".join(arr.astype(str).tolist()))
def time_read_uint64(self):
read_csv(self.data(self.data1), header=None, names=["foo"])
def time_read_uint64_neg_values(self):
read_csv(self.data(self.data2), header=None, names=["foo"])
def time_read_uint64_na_values(self):
read_csv(
self.data(self.data1), header=None, names=["foo"], na_values=self.na_values
)
class ReadCSVThousands(BaseIO):
fname = "__test__.csv"
params = ([",", "|"], [None, ","], ["c", "python"])
param_names = ["sep", "thousands", "engine"]
def setup(self, sep, thousands, engine):
N = 10000
K = 8
data = np.random.randn(N, K) * np.random.randint(100, 10000, (N, K))
df = DataFrame(data)
if thousands is not None:
fmt = f":{thousands}"
fmt = "{" + fmt + "}"
df = df.applymap(lambda x: fmt.format(x))
df.to_csv(self.fname, sep=sep)
def time_thousands(self, sep, thousands, engine):
read_csv(self.fname, sep=sep, thousands=thousands, engine=engine)
class ReadCSVComment(StringIORewind):
params = ["c", "python"]
param_names = ["engine"]
def setup(self, engine):
data = ["A,B,C"] + (["1,2,3 # comment"] * 100000)
self.StringIO_input = StringIO("\n".join(data))
def time_comment(self, engine):
read_csv(
self.data(self.StringIO_input), comment="#", header=None, names=list("abc")
)
class ReadCSVFloatPrecision(StringIORewind):
params = ([",", ";"], [".", "_"], [None, "high", "round_trip"])
param_names = ["sep", "decimal", "float_precision"]
def setup(self, sep, decimal, float_precision):
floats = [
"".join([random.choice(string.digits) for _ in range(28)])
for _ in range(15)
]
rows = sep.join([f"0{decimal}" + "{}"] * 3) + "\n"
data = rows * 5
data = data.format(*floats) * 200 # 1000 x 3 strings csv
self.StringIO_input = StringIO(data)
def time_read_csv(self, sep, decimal, float_precision):
read_csv(
self.data(self.StringIO_input),
sep=sep,
header=None,
names=list("abc"),
float_precision=float_precision,
)
def time_read_csv_python_engine(self, sep, decimal, float_precision):
read_csv(
self.data(self.StringIO_input),
sep=sep,
header=None,
engine="python",
float_precision=None,
names=list("abc"),
)
class ReadCSVEngine(StringIORewind):
params = ["c", "python", "pyarrow"]
param_names = ["engine"]
def setup(self, engine):
data = ["A,B,C,D,E"] + (["1,2,3,4,5"] * 100000)
self.StringIO_input = StringIO("\n".join(data))
# simulate reading from file
self.BytesIO_input = BytesIO(self.StringIO_input.read().encode("utf-8"))
def time_read_stringcsv(self, engine):
read_csv(self.data(self.StringIO_input), engine=engine)
def time_read_bytescsv(self, engine):
read_csv(self.data(self.BytesIO_input), engine=engine)
class ReadCSVCategorical(BaseIO):
fname = "__test__.csv"
params = ["c", "python"]
param_names = ["engine"]
def setup(self, engine):
N = 100000
group1 = ["aaaaaaaa", "bbbbbbb", "cccccccc", "dddddddd", "eeeeeeee"]
df = DataFrame(np.random.choice(group1, (N, 3)), columns=list("abc"))
df.to_csv(self.fname, index=False)
def time_convert_post(self, engine):
read_csv(self.fname, engine=engine).apply(Categorical)
def time_convert_direct(self, engine):
read_csv(self.fname, engine=engine, dtype="category")
class ReadCSVParseDates(StringIORewind):
params = ["c", "python"]
param_names = ["engine"]
def setup(self, engine):
data = """{},19:00:00,18:56:00,0.8100,2.8100,7.2000,0.0000,280.0000\n
{},20:00:00,19:56:00,0.0100,2.2100,7.2000,0.0000,260.0000\n
{},21:00:00,20:56:00,-0.5900,2.2100,5.7000,0.0000,280.0000\n
{},21:00:00,21:18:00,-0.9900,2.0100,3.6000,0.0000,270.0000\n
{},22:00:00,21:56:00,-0.5900,1.7100,5.1000,0.0000,290.0000\n
"""
two_cols = ["KORD,19990127"] * 5
data = data.format(*two_cols)
self.StringIO_input = StringIO(data)
def time_multiple_date(self, engine):
read_csv(
self.data(self.StringIO_input),
engine=engine,
sep=",",
header=None,
names=list(string.digits[:9]),
parse_dates=[[1, 2], [1, 3]],
)
def time_baseline(self, engine):
read_csv(
self.data(self.StringIO_input),
engine=engine,
sep=",",
header=None,
parse_dates=[1],
names=list(string.digits[:9]),
)
class ReadCSVCachedParseDates(StringIORewind):
params = ([True, False], ["c", "python"])
param_names = ["do_cache", "engine"]
def setup(self, do_cache, engine):
data = ("\n".join([f"10/{year}" for year in range(2000, 2100)]) + "\n") * 10
self.StringIO_input = StringIO(data)
def time_read_csv_cached(self, do_cache, engine):
try:
read_csv(
self.data(self.StringIO_input),
engine=engine,
header=None,
parse_dates=[0],
cache_dates=do_cache,
)
except TypeError:
# cache_dates is a new keyword in 0.25
pass
class ReadCSVMemoryGrowth(BaseIO):
chunksize = 20
num_rows = 1000
fname = "__test__.csv"
params = ["c", "python"]
param_names = ["engine"]
def setup(self, engine):
with open(self.fname, "w") as f:
for i in range(self.num_rows):
f.write(f"{i}\n")
def mem_parser_chunks(self, engine):
# see gh-24805.
result = read_csv(self.fname, chunksize=self.chunksize, engine=engine)
for _ in result:
pass
class ReadCSVParseSpecialDate(StringIORewind):
params = (["mY", "mdY", "hm"], ["c", "python"])
param_names = ["value", "engine"]
objects = {
"mY": "01-2019\n10-2019\n02/2000\n",
"mdY": "12/02/2010\n",
"hm": "21:34\n",
}
def setup(self, value, engine):
count_elem = 10000
data = self.objects[value] * count_elem
self.StringIO_input = StringIO(data)
def time_read_special_date(self, value, engine):
read_csv(
self.data(self.StringIO_input),
engine=engine,
sep=",",
header=None,
names=["Date"],
parse_dates=["Date"],
)
class ReadCSVMemMapUTF8:
fname = "__test__.csv"
number = 5
def setup(self):
lines = []
line_length = 128
start_char = " "
end_char = "\U00010080"
# This for loop creates a list of 128-char strings
# consisting of consecutive Unicode chars
for lnum in range(ord(start_char), ord(end_char), line_length):
line = "".join([chr(c) for c in range(lnum, lnum + 0x80)]) + "\n"
try:
line.encode("utf-8")
except UnicodeEncodeError:
# Some 16-bit words are not valid Unicode chars and must be skipped
continue
lines.append(line)
df = DataFrame(lines)
df = concat([df for n in range(100)], ignore_index=True)
df.to_csv(self.fname, index=False, header=False, encoding="utf-8")
def time_read_memmapped_utf8(self):
read_csv(self.fname, header=None, memory_map=True, encoding="utf-8", engine="c")
class ParseDateComparison(StringIORewind):
params = ([False, True],)
param_names = ["cache_dates"]
def setup(self, cache_dates):
count_elem = 10000
data = "12-02-2010\n" * count_elem
self.StringIO_input = StringIO(data)
def time_read_csv_dayfirst(self, cache_dates):
try:
read_csv(
self.data(self.StringIO_input),
sep=",",
header=None,
names=["Date"],
parse_dates=["Date"],
cache_dates=cache_dates,
dayfirst=True,
)
except TypeError:
# cache_dates is a new keyword in 0.25
pass
def time_to_datetime_dayfirst(self, cache_dates):
df = read_csv(
self.data(self.StringIO_input), dtype={"date": str}, names=["date"]
)
to_datetime(df["date"], cache=cache_dates, dayfirst=True)
def time_to_datetime_format_DD_MM_YYYY(self, cache_dates):
df = read_csv(
self.data(self.StringIO_input), dtype={"date": str}, names=["date"]
)
to_datetime(df["date"], cache=cache_dates, format="%d-%m-%Y")
from ..pandas_vb_common import setup # noqa: F401 isort:skip
| {
"content_hash": "1b022c51f946752fb474e8133f9f415b",
"timestamp": "",
"source": "github",
"line_count": 528,
"max_line_length": 88,
"avg_line_length": 29.240530303030305,
"alnum_prop": 0.5370814171902325,
"repo_name": "jorisvandenbossche/pandas",
"id": "153cad403dcc3594ae0e89585277c531ece4469d",
"size": "15439",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "asv_bench/benchmarks/io/csv.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "127"
},
{
"name": "C",
"bytes": "360342"
},
{
"name": "CSS",
"bytes": "1438"
},
{
"name": "Cython",
"bytes": "1083849"
},
{
"name": "Dockerfile",
"bytes": "1690"
},
{
"name": "HTML",
"bytes": "456275"
},
{
"name": "Makefile",
"bytes": "507"
},
{
"name": "Python",
"bytes": "17541583"
},
{
"name": "Shell",
"bytes": "10719"
},
{
"name": "Smarty",
"bytes": "7820"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
} |
import os
import re
import random
import time
import threading
from concurrent.futures import ThreadPoolExecutor
from eutester.euca.euca_ops import Eucaops
from eutester.aws.ec2.euinstance import EuInstance
from eutester.utils.eutestcase import EutesterTestCase
from eutester.aws.ec2.ec2ops import EC2ops
class InstanceBasics(EutesterTestCase):
def __init__( self, name="InstanceBasics", credpath=None, region=None, config_file=None, password=None, emi=None, zone=None,
user_data=None, instance_user=None, **kwargs):
"""
EC2 API tests focused on instance store instances
:param credpath: Path to directory containing eucarc file
:param region: EC2 Region to run testcase in
:param config_file: Configuration file path
:param password: SSH password for bare metal machines if config is passed and keys arent synced
:param emi: Image id to use for test
:param zone: Availability Zone to run test in
:param user_data: User Data to pass to instance
:param instance_user: User to login to instance as
:param kwargs: Additional arguments
"""
super(InstanceBasics, self).__init__(name=name)
self.get_args()
self.show_args()
for kw in kwargs:
print 'Setting kwarg:'+str(kw)+" to "+str(kwargs[kw])
self.set_arg(kw ,kwargs[kw])
self.show_args()
if self.args.region:
self.tester = EC2ops(credpath=self.args.redpath, region=self.args.region)
else:
self.tester = Eucaops(config_file=self.args.config_file,
password=self.args.password,
credpath=self.args.credpath)
self.instance_timeout = 600
### Add and authorize a group for the instance
self.group = self.tester.ec2.add_group(group_name="group-" + str(time.time()))
self.tester.ec2.authorize_group_by_name(group_name=self.group.name)
self.tester.ec2.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp")
### Generate a keypair for the instance
self.keypair = self.tester.ec2.add_keypair("keypair-" + str(time.time()))
self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
if emi:
self.image = self.tester.ec2.get_emi(emi=self.args.emi)
else:
self.image = self.tester.ec2.get_emi(root_device_type="instance-store", basic_image=True)
self.address = None
self.volume = None
self.private_addressing = False
if not self.args.zone:
zones = self.tester.ec2.connection.get_all_zones()
self.zone = random.choice(zones).name
else:
self.zone = self.args.zone
self.reservation = None
self.reservation_lock = threading.Lock()
self.run_instance_params = {'image': self.image,
'user_data': self.args.user_data,
'username': self.args.instance_user,
'keypair': self.keypair.name,
'group': self.group.name,
'zone': self.zone,
'return_reservation': True,
'timeout': self.instance_timeout}
self.managed_network = True
### If I have access to the underlying infrastructure I can look
### at the network mode and only run certain tests where it makes sense
if hasattr(self.tester, "service_manager"):
cc = self.tester.get_component_machines("cc")[0]
network_mode = cc.sys("cat " + self.tester.eucapath + "/etc/eucalyptus/eucalyptus.conf | grep MODE")[0]
if re.search("(SYSTEM|STATIC)", network_mode):
self.managed_network = False
def set_reservation(self, reservation):
self.reservation_lock.acquire()
self.reservation = reservation
self.reservation_lock.release()
def clean_method(self):
self.tester.cleanup_artifacts()
def BasicInstanceChecks(self):
"""
This case was developed to run through a series of basic instance tests.
The tests are as follows:
- execute run_instances command
- make sure that public DNS name and private IP aren't the same
(This is for Managed/Managed-NOVLAN networking modes)
- test to see if instance is ping-able
- test to make sure that instance is accessible via ssh
(ssh into instance and run basic ls command)
If any of these tests fail, the test case will error out, logging the results.
"""
reservation = self.tester.ec2.run_image(**self.run_instance_params)
for instance in reservation.instances:
self.assertTrue(self.tester.ec2.wait_for_reservation(reservation), 'Instance did not go to running')
self.assertTrue(self.tester.ping(instance.ip_address), 'Could not ping instance')
if self.image.virtualization_type == "paravirtual":
paravirtual_ephemeral = "/dev/" + instance.rootfs_device + "2"
self.assertFalse(instance.found("ls -1 " + paravirtual_ephemeral, "No such file or directory"),
"Did not find ephemeral storage at " + paravirtual_ephemeral)
elif self.image.virtualization_type == "hvm":
hvm_ephemeral = "/dev/" + instance.block_device_prefix + "b"
self.assertFalse(instance.found("ls -1 " + hvm_ephemeral, "No such file or directory"),
"Did not find ephemeral storage at " + hvm_ephemeral)
self.debug("Pinging instance public IP from inside instance")
instance.sys('ping -c 1 ' + instance.ip_address, code=0)
self.debug("Pinging instance private IP from inside instance")
instance.sys('ping -c 1 ' + instance.private_ip_address, code=0)
self.set_reservation(reservation)
return reservation
def ElasticIps(self):
"""
This case was developed to test elastic IPs in Eucalyptus. This test case does
not test instances that are launched using private-addressing option.
The test case executes the following tests:
- allocates an IP, associates the IP to the instance, then pings the instance.
- disassociates the allocated IP, then pings the instance.
- releases the allocated IP address
If any of the tests fail, the test case will error out, logging the results.
"""
if not self.reservation:
reservation = self.tester.ec2.run_image(**self.run_instance_params)
else:
reservation = self.reservation
for instance in reservation.instances:
if instance.ip_address == instance.private_ip_address:
self.tester.debug("WARNING: System or Static mode detected, skipping ElasticIps")
return reservation
self.address = self.tester.ec2.allocate_address(domain=instance.vpc_id)
self.assertTrue(self.address, 'Unable to allocate address')
self.tester.ec2.associate_address(instance, self.address)
instance.update()
self.assertTrue(self.tester.ping(instance.ip_address), "Could not ping instance with new IP")
self.tester.ec2.disassociate_address_from_instance(instance)
self.tester.ec2.release_address(self.address)
self.address = None
assert isinstance(instance, EuInstance)
self.tester.sleep(5)
instance.update()
self.assertTrue(self.tester.ping(instance.ip_address), "Could not ping after dissassociate")
self.set_reservation(reservation)
return reservation
def MultipleInstances(self):
"""
This case was developed to test the maximum number of m1.small vm types a configured
cloud can run. The test runs the maximum number of m1.small vm types allowed, then
tests to see if all the instances reached a running state. If there is a failure,
the test case errors out; logging the results.
"""
if self.reservation:
self.tester.ec2.terminate_instances(self.reservation)
self.set_reservation(None)
reservation = self.tester.ec2.run_image(min=2, max=2, **self.run_instance_params)
self.assertTrue(self.tester.ec2.wait_for_reservation(reservation), 'Not all instances went to running')
self.set_reservation(reservation)
return reservation
def LargestInstance(self):
"""
This case was developed to test the maximum number of c1.xlarge vm types a configured
cloud can run. The test runs the maximum number of c1.xlarge vm types allowed, then
tests to see if all the instances reached a running state. If there is a failure,
the test case errors out; logging the results.
"""
if self.reservation:
self.tester.ec2.terminate_instances(self.reservation)
self.set_reservation(None)
reservation = self.tester.ec2.run_image(type="c1.xlarge", **self.run_instance_params)
self.assertTrue(self.tester.ec2.wait_for_reservation(reservation), 'Not all instances went to running')
self.set_reservation(reservation)
return reservation
def MetaData(self):
"""
This case was developed to test the metadata service of an instance for consistency.
The following meta-data attributes are tested:
- public-keys/0/openssh-key
- security-groups
- instance-id
- local-ipv4
- public-ipv4
- ami-id
- ami-launch-index
- reservation-id
- placement/availability-zone
- kernel-id
- public-hostname
- local-hostname
- hostname
- ramdisk-id
- instance-type
- any bad metadata that shouldn't be present.
Missing nodes
['block-device-mapping/', 'ami-manifest-path']
If any of these tests fail, the test case will error out; logging the results.
"""
if not self.reservation:
reservation = self.tester.ec2.run_image(**self.run_instance_params)
else:
reservation = self.reservation
for instance in reservation.instances:
## Need to verify the public key (could just be checking for a string of a certain length)
self.assertTrue(re.match(instance.get_metadata("public-keys/0/openssh-key")[0].split('eucalyptus.')[-1],
self.keypair.name), 'Incorrect public key in metadata')
self.assertTrue(re.match(instance.get_metadata("security-groups")[0], self.group.name),
'Incorrect security group in metadata')
# Need to validate block device mapping
#self.assertTrue(re.search(instance_ssh.get_metadata("block-device-mapping/")[0], ""))
self.assertTrue(re.match(instance.get_metadata("instance-id")[0], instance.id),
'Incorrect instance id in metadata')
self.assertTrue(re.match(instance.get_metadata("local-ipv4")[0], instance.private_ip_address),
'Incorrect private ip in metadata')
self.assertTrue(re.match(instance.get_metadata("public-ipv4")[0], instance.ip_address),
'Incorrect public ip in metadata')
self.assertTrue(re.match(instance.get_metadata("ami-id")[0], instance.image_id),
'Incorrect ami id in metadata')
self.assertTrue(re.match(instance.get_metadata("ami-launch-index")[0], instance.ami_launch_index),
'Incorrect launch index in metadata')
self.assertTrue(re.match(instance.get_metadata("reservation-id")[0], reservation.id),
'Incorrect reservation in metadata')
self.assertTrue(re.match(instance.get_metadata("placement/availability-zone")[0], instance.placement),
'Incorrect availability-zone in metadata')
if self.image.virtualization_type == "paravirtual":
self.assertTrue(re.match(instance.get_metadata("kernel-id")[0], instance.kernel),
'Incorrect kernel id in metadata')
self.assertTrue(re.match(instance.get_metadata("ramdisk-id")[0], instance.ramdisk),
'Incorrect ramdisk in metadata')
self.assertTrue(re.match(instance.get_metadata("public-hostname")[0], instance.public_dns_name),
'Incorrect public host name in metadata')
self.assertTrue(re.match(instance.get_metadata("local-hostname")[0], instance.private_dns_name),
'Incorrect private host name in metadata')
self.assertTrue(re.match(instance.get_metadata("hostname")[0], instance.private_dns_name),
'Incorrect host name in metadata')
self.assertTrue(re.match(instance.get_metadata("instance-type")[0], instance.instance_type),
'Incorrect instance type in metadata')
bad_meta_data_keys = ['foobar']
for key in bad_meta_data_keys:
self.assertTrue(re.search("Not Found", "".join(instance.get_metadata(key))),
'No fail message on invalid meta-data node')
self.set_reservation(reservation)
return reservation
def DNSResolveCheck(self):
"""
This case was developed to test DNS resolution information for public/private DNS
names and IP addresses. The tested DNS resolution behavior is expected to follow
AWS EC2. The following tests are ran using the associated meta-data attributes:
- check to see if Eucalyptus Dynamic DNS is configured
- nslookup on hostname; checks to see if it matches local-ipv4
- nslookup on local-hostname; check to see if it matches local-ipv4
- nslookup on local-ipv4; check to see if it matches local-hostname
- nslookup on public-hostname; check to see if it matches local-ipv4
- nslookup on public-ipv4; check to see if it matches public-host
If any of these tests fail, the test case will error out; logging the results.
"""
if not self.reservation:
reservation = self.tester.ec2.run_image(**self.run_instance_params)
else:
reservation = self.reservation
def validate_instance_dns():
try:
for instance in reservation.instances:
if not re.search("internal", instance.private_dns_name):
self.tester.debug("Did not find instance DNS enabled, skipping test")
self.set_reservation(reservation)
return reservation
self.debug('\n'
'# Test to see if Dynamic DNS has been configured \n'
'# Per AWS standard, resolution should have private hostname or '
'private IP as a valid response\n'
'# Perform DNS resolution against public IP and public DNS name\n'
'# Perform DNS resolution against private IP and private DNS name\n'
'# Check to see if nslookup was able to resolve\n')
assert isinstance(instance, EuInstance)
self.debug('Check nslookup to resolve public DNS Name to local-ipv4 address')
self.assertTrue(instance.found("nslookup " + instance.public_dns_name,
instance.private_ip_address), "Incorrect DNS resolution for hostname.")
self.debug('Check nslookup to resolve public-ipv4 address to public DNS name')
if self.managed_network:
self.assertTrue(instance.found("nslookup " + instance.ip_address,
instance.public_dns_name),
"Incorrect DNS resolution for public IP address")
self.debug('Check nslookup to resolve private DNS Name to local-ipv4 address')
if self.managed_network:
self.assertTrue(instance.found("nslookup " + instance.private_dns_name,
instance.private_ip_address),
"Incorrect DNS resolution for private hostname.")
self.debug('Check nslookup to resolve local-ipv4 address to private DNS name')
self.assertTrue(instance.found("nslookup " + instance.private_ip_address,
instance.private_dns_name),
"Incorrect DNS resolution for private IP address")
self.debug('Attempt to ping instance public_dns_name')
self.assertTrue(self.tester.ping(instance.public_dns_name))
return True
except Exception, e:
return False
self.tester.ec2.wait_for_result(validate_instance_dns, True, timeout=120)
self.set_reservation(reservation)
return reservation
def Reboot(self):
"""
This case was developed to test IP connectivity and volume attachment after
instance reboot. The following tests are done for this test case:
- creates a 1 gig EBS volume, then attach volume
- reboot instance
- attempts to connect to instance via ssh
- checks to see if EBS volume is attached
- detaches volume
- deletes volume
If any of these tests fail, the test case will error out; logging the results.
"""
if not self.reservation:
reservation = self.tester.ec2.run_image(**self.run_instance_params)
else:
reservation = self.reservation
for instance in reservation.instances:
### Create 1GB volume in first AZ
volume = self.tester.ec2.create_volume(instance.placement, size=1, timepergig=180)
instance.attach_volume(volume)
### Reboot instance
instance.reboot_instance_and_verify(waitconnect=20)
instance.detach_euvolume(volume)
self.tester.ec2.delete_volume(volume)
self.set_reservation(reservation)
return reservation
def Churn(self):
"""
This case was developed to test robustness of Eucalyptus by starting instances,
stopping them before they are running, and increase the time to terminate on each
iteration. This test case leverages the BasicInstanceChecks test case. The
following steps are ran:
- runs BasicInstanceChecks test case 5 times, 10 second apart.
- While each test is running, run and terminate instances with a 10sec sleep in between.
- When a test finishes, rerun BasicInstanceChecks test case.
If any of these tests fail, the test case will error out; logging the results.
"""
if self.reservation:
self.tester.ec2.terminate_instances(self.reservation)
self.set_reservation(None)
try:
available_instances_before = self.tester.get_available_vms(zone=self.zone)
if available_instances_before > 4:
count = 4
else:
count = available_instances_before
except IndexError, e:
self.debug("Running as non-admin, defaulting to 4 VMs")
available_instances_before = count = 4
future_instances = []
with ThreadPoolExecutor(max_workers=count) as executor:
## Start asynchronous activity
## Run 5 basic instance check instances 10s apart
for i in xrange(count):
future_instances.append(executor.submit(self.BasicInstanceChecks))
self.tester.sleep(10)
with ThreadPoolExecutor(max_workers=count) as executor:
## Start asynchronous activity
## Terminate all instances
for future in future_instances:
executor.submit(self.tester.ec2.terminate_instances, future.result())
def available_after_greater():
try:
return self.tester.get_available_vms(zone=self.zone) >= available_instances_before
except IndexError, e:
self.debug("Running as non-admin, skipping validation of available VMs.")
return True
self.tester.ec2.wait_for_result(available_after_greater, result=True, timeout=360)
def PrivateIPAddressing(self):
"""
This case was developed to test instances that are launched with private-addressing
set to True. The tests executed are as follows:
- run an instance with private-addressing set to True
- allocate/associate/disassociate/release an Elastic IP to that instance
- check to see if the instance went back to private addressing
If any of these tests fail, the test case will error out; logging the results.
"""
if self.reservation:
for instance in self.reservation.instances:
if instance.ip_address == instance.private_ip_address:
self.tester.debug("WARNING: System or Static mode detected, skipping PrivateIPAddressing")
return self.reservation
self.tester.ec2.terminate_instances(self.reservation)
self.set_reservation(None)
reservation = self.tester.ec2.run_image(private_addressing=True,
auto_connect=False,
**self.run_instance_params)
for instance in reservation.instances:
address = self.tester.ec2.allocate_address()
self.assertTrue(address, 'Unable to allocate address')
self.tester.ec2.associate_address(instance, address)
self.tester.sleep(30)
instance.update()
self.debug('Attempting to ping associated IP:"{0}"'.format(address.public_ip))
self.assertTrue(self.tester.ping(instance.ip_address), "Could not ping instance with new IP")
address.disassociate()
self.tester.sleep(30)
instance.update()
self.debug('Confirming disassociated IP:"{0}" is no longer in use'
.format(address.public_ip))
self.assertFalse(self.tester.ping(address.public_ip, poll_count=3),
"Was able to ping address that should no long be associated with an "
"instance")
address.release()
if instance.ip_address != "0.0.0.0" and instance.ip_address != instance.private_ip_address:
self.fail("Instance received a new public IP: " + instance.ip_address)
self.tester.ec2.terminate_instances(reservation)
self.set_reservation(reservation)
return reservation
def ReuseAddresses(self):
"""
This case was developed to test when you run instances in a series, and make sure
they get the same address. The test launches an instance, checks the IP information,
then terminates the instance. This test is launched 5 times in a row. If there
is an error, the test case will error out; logging the results.
"""
prev_address = None
if self.reservation:
self.tester.ec2.terminate_instances(self.reservation)
self.set_reservation(None)
for i in xrange(5):
reservation = self.tester.ec2.run_image(**self.run_instance_params)
for instance in reservation.instances:
if prev_address is not None:
self.assertTrue(re.search(str(prev_address), str(instance.ip_address)),
str(prev_address) + " Address did not get reused but rather " +
str(instance.public_dns_name))
prev_address = instance.ip_address
self.tester.ec2.terminate_instances(reservation)
def BundleInstance(self):
if not self.reservation:
self.reservation = self.tester.ec2.run_image(**self.run_instance_params)
original_image = self.run_instance_params['image']
for instance in self.reservation.instances:
current_time = str(int(time.time()))
temp_file = "/root/my-new-file-" + current_time
instance.sys("touch " + temp_file)
self.tester.sleep(60)
starting_uptime = instance.get_uptime()
self.run_instance_params['image'] = self.tester.ec2.bundle_instance_monitor_and_register(instance)
instance.connect_to_instance()
ending_uptime = instance.get_uptime()
if ending_uptime > starting_uptime:
raise Exception("Instance did not get stopped then started")
bundled_image_reservation = self.tester.ec2.run_image(**self.run_instance_params)
for new_instance in bundled_image_reservation.instances:
new_instance.sys("ls " + temp_file, code=0)
self.tester.ec2.terminate_instances(bundled_image_reservation)
self.run_instance_params['image'] = original_image
if __name__ == "__main__":
testcase = EutesterTestCase(name='instancetest')
testcase.setup_parser(description="Test the Eucalyptus EC2 instance store image functionality.")
testcase.get_args()
instancetestsuite = testcase.do_with_args(InstanceBasics)
### Either use the list of tests passed from config/command line to determine what subset of tests to run
test_list = testcase.args.tests or ["BasicInstanceChecks", "DNSResolveCheck", "Reboot", "MetaData", "ElasticIps",
"MultipleInstances", "LargestInstance", "PrivateIPAddressing", "Churn"]
### Convert test suite methods to EutesterUnitTest objects
unit_list = []
for test in test_list:
test = getattr(instancetestsuite, test)
unit_list.append(testcase.create_testunit_from_method(test))
testcase.clean_method = instancetestsuite.clean_method
result = testcase.run_test_case_list(unit_list)
exit(result)
| {
"content_hash": "8fe8155e16dc4acd740996018178ac0a",
"timestamp": "",
"source": "github",
"line_count": 492,
"max_line_length": 128,
"avg_line_length": 54.90243902439025,
"alnum_prop": 0.6066563009033022,
"repo_name": "nephomaniac/eutester",
"id": "0cbfd8c42a41931237b77154256a6706fe1d9963",
"size": "27291",
"binary": false,
"copies": "1",
"ref": "refs/heads/reorg1",
"path": "eutester/testcases/instances/instancetest.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Erlang",
"bytes": "15990"
},
{
"name": "Groovy",
"bytes": "162726"
},
{
"name": "HTML",
"bytes": "1792"
},
{
"name": "Java",
"bytes": "863698"
},
{
"name": "Python",
"bytes": "2054823"
},
{
"name": "RobotFramework",
"bytes": "4827"
}
],
"symlink_target": ""
} |