max_stars_repo_path
stringlengths 5
128
| max_stars_repo_name
stringlengths 8
105
| max_stars_count
int64 0
41.3k
| id
stringlengths 5
5
| content
stringlengths 19
155k
| content_cleaned
stringlengths 17
155k
| language
stringclasses 18
values | language_score
float64 0.05
1
| edu_score
float64 0.76
4.4
| edu_int_score
int64 1
4
|
---|---|---|---|---|---|---|---|---|---|
src/client/__init__.py | kyehyukahn/scp-prototype | 1 | 13600 | <gh_stars>1-10
from .client import send_message, MessageInfo # noqa
| from .client import send_message, MessageInfo # noqa | pt | 0.394397 | 0.97895 | 1 |
src/comparing_scoring_seasons.py | davgav123/Mining_NBA | 0 | 13601 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
from pathlib import Path
import pandas as pd
from numpy import around
if __name__ == "__main__":
# Harden's PPG is from 2018-19 season
# Bryant's PPG is from 2005-06 season
# Jordan's PPG is from 1986-87 season
per_game_df = pd.read_csv(Path('../data/compare_players_per_game.csv'))
per_48_df = pd.read_csv(Path('../data/compare_players_per_48.csv'))
per_100_df = pd.read_csv(Path('../data/compare_players_per_100_poss.csv'))
avg_TS_for_2018_19_season = 0.560 # source: https://www.basketball-reference.com/leagues/NBA_2019.html#all_misc_stats
avg_TS_for_2005_06_season = 0.536 # source: https://www.basketball-reference.com/leagues/NBA_2006.html#all_misc_stats
avg_TS_for_1986_87_season = 0.538 # source: https://www.basketball-reference.com/leagues/NBA_1987.html#all_misc_stats
# per game
per_game_harden = per_game_df[per_game_df['Player'] == '<NAME>']
per_game_bryant = per_game_df[per_game_df['Player'] == '<NAME>']
per_game_jordan = per_game_df[per_game_df['Player'] == '<NAME>']
harden_ppg = per_game_harden['PTS'].values[0]
bryant_ppg = per_game_bryant['PTS'].values[0]
jordan_ppg = per_game_jordan['PTS'].values[0]
# shooting stats
harden_efg = per_game_harden['eFG%'].values[0]
bryant_efg = per_game_bryant['eFG%'].values[0]
jordan_efg = per_game_jordan['eFG%'].values[0]
harden_ts = per_game_harden['TS%'].values[0]
bryant_ts = per_game_bryant['TS%'].values[0]
jordan_ts = per_game_jordan['TS%'].values[0]
# number of games
harden_g = per_game_harden['G'].values[0]
bryant_g = per_game_bryant['G'].values[0]
jordan_g = per_game_jordan['G'].values[0]
# minutes per game
harden_mpg = per_game_harden['MP'].values[0]
bryant_mpg = per_game_bryant['MP'].values[0]
jordan_mpg = per_game_jordan['MP'].values[0]
# per 48
per_48_harden = per_48_df[per_48_df['Player'] == '<NAME>']
per_48_bryant = per_48_df[per_48_df['Player'] == '<NAME>']
per_48_jordan = per_48_df[per_48_df['Player'] == '<NAME>']
harden_pp48 = per_48_harden['PTS'].values[0]
bryant_pp48 = per_48_bryant['PTS'].values[0]
jordan_pp48 = per_48_jordan['PTS'].values[0]
# per 100
per_100_harden = per_100_df[per_100_df['Player'] == '<NAME>']
per_100_bryant = per_100_df[per_100_df['Player'] == '<NAME>']
per_100_jordan = per_100_df[per_100_df['Player'] == '<NAME>']
harden_pp100 = per_100_harden['PTS'].values[0]
bryant_pp100 = per_100_bryant['PTS'].values[0]
jordan_pp100 = per_100_jordan['PTS'].values[0]
print('<NAME> in 2018-19: {} games, {} PPG, {}eFG%, {}TS% in {} minutes per game'
.format(harden_g, harden_ppg, harden_efg, harden_ts, harden_mpg))
print('He was {} more efficient than the average player in was that season'
.format(around(harden_ts - avg_TS_for_2018_19_season, 3)))
print('In the same season, he had {} Points per 48 minutes, and {} Points per 100 possessions'
.format(harden_pp48, harden_pp100))
print('\n------------------------------------------------------------------------------------------\n')
print('<NAME> in 2005-06: {} games, {} PPG, {}eFG%, {}TS% in {} minutes per game'
.format(bryant_g, bryant_ppg, bryant_efg, bryant_ts, bryant_mpg))
print('He was {} more efficient than the average player was in that season'
.format(around(bryant_ts - avg_TS_for_2005_06_season, 3)))
print('In the same season, he had {} Points per 48 minutes, and {} Points per 100 possessions'
.format(bryant_pp48, bryant_pp100))
print('\n------------------------------------------------------------------------------------------\n')
print('<NAME> in 1986-87: {} games, {} PPG, {}eFG%, {}TS% in {} minutes per game'
.format(jordan_g, jordan_ppg, jordan_efg, jordan_ts, jordan_mpg))
print('He was {} more efficient than the average player was in that season'
.format(around(jordan_ts - avg_TS_for_1986_87_season, 3)))
print('In the same season, he had {} Points per 48 minutes, and {} Points per 100 possessions'
.format(jordan_pp48, jordan_pp100))
| #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
from pathlib import Path
import pandas as pd
from numpy import around
if __name__ == "__main__":
# Harden's PPG is from 2018-19 season
# Bryant's PPG is from 2005-06 season
# Jordan's PPG is from 1986-87 season
per_game_df = pd.read_csv(Path('../data/compare_players_per_game.csv'))
per_48_df = pd.read_csv(Path('../data/compare_players_per_48.csv'))
per_100_df = pd.read_csv(Path('../data/compare_players_per_100_poss.csv'))
avg_TS_for_2018_19_season = 0.560 # source: https://www.basketball-reference.com/leagues/NBA_2019.html#all_misc_stats
avg_TS_for_2005_06_season = 0.536 # source: https://www.basketball-reference.com/leagues/NBA_2006.html#all_misc_stats
avg_TS_for_1986_87_season = 0.538 # source: https://www.basketball-reference.com/leagues/NBA_1987.html#all_misc_stats
# per game
per_game_harden = per_game_df[per_game_df['Player'] == '<NAME>']
per_game_bryant = per_game_df[per_game_df['Player'] == '<NAME>']
per_game_jordan = per_game_df[per_game_df['Player'] == '<NAME>']
harden_ppg = per_game_harden['PTS'].values[0]
bryant_ppg = per_game_bryant['PTS'].values[0]
jordan_ppg = per_game_jordan['PTS'].values[0]
# shooting stats
harden_efg = per_game_harden['eFG%'].values[0]
bryant_efg = per_game_bryant['eFG%'].values[0]
jordan_efg = per_game_jordan['eFG%'].values[0]
harden_ts = per_game_harden['TS%'].values[0]
bryant_ts = per_game_bryant['TS%'].values[0]
jordan_ts = per_game_jordan['TS%'].values[0]
# number of games
harden_g = per_game_harden['G'].values[0]
bryant_g = per_game_bryant['G'].values[0]
jordan_g = per_game_jordan['G'].values[0]
# minutes per game
harden_mpg = per_game_harden['MP'].values[0]
bryant_mpg = per_game_bryant['MP'].values[0]
jordan_mpg = per_game_jordan['MP'].values[0]
# per 48
per_48_harden = per_48_df[per_48_df['Player'] == '<NAME>']
per_48_bryant = per_48_df[per_48_df['Player'] == '<NAME>']
per_48_jordan = per_48_df[per_48_df['Player'] == '<NAME>']
harden_pp48 = per_48_harden['PTS'].values[0]
bryant_pp48 = per_48_bryant['PTS'].values[0]
jordan_pp48 = per_48_jordan['PTS'].values[0]
# per 100
per_100_harden = per_100_df[per_100_df['Player'] == '<NAME>']
per_100_bryant = per_100_df[per_100_df['Player'] == '<NAME>']
per_100_jordan = per_100_df[per_100_df['Player'] == '<NAME>']
harden_pp100 = per_100_harden['PTS'].values[0]
bryant_pp100 = per_100_bryant['PTS'].values[0]
jordan_pp100 = per_100_jordan['PTS'].values[0]
print('<NAME> in 2018-19: {} games, {} PPG, {}eFG%, {}TS% in {} minutes per game'
.format(harden_g, harden_ppg, harden_efg, harden_ts, harden_mpg))
print('He was {} more efficient than the average player in was that season'
.format(around(harden_ts - avg_TS_for_2018_19_season, 3)))
print('In the same season, he had {} Points per 48 minutes, and {} Points per 100 possessions'
.format(harden_pp48, harden_pp100))
print('\n------------------------------------------------------------------------------------------\n')
print('<NAME> in 2005-06: {} games, {} PPG, {}eFG%, {}TS% in {} minutes per game'
.format(bryant_g, bryant_ppg, bryant_efg, bryant_ts, bryant_mpg))
print('He was {} more efficient than the average player was in that season'
.format(around(bryant_ts - avg_TS_for_2005_06_season, 3)))
print('In the same season, he had {} Points per 48 minutes, and {} Points per 100 possessions'
.format(bryant_pp48, bryant_pp100))
print('\n------------------------------------------------------------------------------------------\n')
print('<NAME> in 1986-87: {} games, {} PPG, {}eFG%, {}TS% in {} minutes per game'
.format(jordan_g, jordan_ppg, jordan_efg, jordan_ts, jordan_mpg))
print('He was {} more efficient than the average player was in that season'
.format(around(jordan_ts - avg_TS_for_1986_87_season, 3)))
print('In the same season, he had {} Points per 48 minutes, and {} Points per 100 possessions'
.format(jordan_pp48, jordan_pp100))
| it | 0.123513 | 2.862669 | 3 |
modules/citymap.py | sebastianbernasek/dataincubator | 0 | 13602 | <gh_stars>0
from os.path import join
import numpy as np
import geopandas as gpd
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.colors import Normalize
from matplotlib.colorbar import ColorbarBase
from matplotlib.dates import YearLocator, DateFormatter
from matplotlib.animation import FuncAnimation
class CityMap:
def __init__(self,
timeseries,
dirpath='./chicago/',
cbar=True,
timeline=True,
figsize=(6, 6),
cmap=None,
vmin=-1,
vmax=1,
label=None,
bg='w',
**kwargs):
self.cbar = cbar
self.timeline = timeline
# load geomap
self.citylimits = gpd.read_file(join(dirpath, 'chicago.geojson'))
self.ziplimits = gpd.read_file(join(dirpath, 'chicago_zips.geojson'))
self.ziplimits.zip = self.ziplimits.zip.astype(int)
# exclude NaNs
timeseries = timeseries[~timeseries.isna().any(axis=1)]
# add timeseries (smoothed)
self.timeseries = timeseries.resample('1M', axis=0).interpolate().transpose()
# set colormap
if cmap is None:
cmap = plt.cm.Blues
cmap.set_bad(bg)
self.cmap = cmap
if vmin == 'min':
vmin = self.timeseries.min().min()
if vmax == 'max':
vmax = self.timeseries.max().max()
self.norm = Normalize(vmin=vmin, vmax=vmax)
# create figure and plot city limits
self.create_figure(figsize=figsize, cbar=cbar, timeline=timeline)
#self.initialize_city_limits(**kwargs)
self.initialize_zip_codes(**kwargs)
# add colorbar
if self.cbar:
self.draw_colorbar(cmap, vmin, vmax, label=label)
# add timeline
if self.timeline:
self.draw_timeline()
@property
def zipcodes(self):
""" Unique zipcodes. """
return self.ziplimits.zip.unique().astype(int)
@property
def num_frames(self):
return self.timeseries.shape[1]
@property
def ax(self):
return self.fig.axes[self.map_ax_ind]
@property
def cax(self):
return self.fig.axes[self.cbar_ax_ind]
@property
def tax(self):
return self.fig.axes[self.timeline_ax_ind]
def create_figure(self, figsize=(4, 4), cbar=True, timeline=True):
""" Create figure. """
self.fig = plt.figure(figsize=figsize)
gs = GridSpec(nrows=3, ncols=3, height_ratios=(1,25,3), width_ratios=[1,2,1])
gs.update(wspace=0., hspace=0)
lb = 0
if cbar:
self.cbar_ax_ind = len(self.fig.axes)
self.fig.add_subplot(gs[0, 1])
lb += 1
if timeline:
self.timeline_ax_ind = len(self.fig.axes)
self.fig.add_subplot(gs[-1, :])
# add map axis
self.fig.add_subplot(gs[lb:-1, :])
else:
self.fig.add_subplot(gs[lb:, :])
self.map_ax_ind = len(self.fig.axes) - 1
# turn off axes
self.ax.axis('off')
self.ax.set_aspect(1)
def draw_timeline(self):
self.tax.set_yticks([])
self.tax.spines['top'].set_visible(False)
self.tax.spines['left'].set_visible(False)
self.tax.spines['right'].set_visible(False)
start = self.timeseries.columns.min()
stop = self.timeseries.columns.max()
self.tax.plot((start, stop), (0,0), alpha=0)
self.tax.get_xaxis().set_major_locator(YearLocator(1, month=3))
self.tax.get_xaxis().set_major_formatter(DateFormatter("%Y"))
self.tax.xaxis.set_tick_params(rotation=45)
self.tax.tick_params(pad=0, length=0)
self.tax.set_ylim(0, 1)
def draw_colorbar(self, cmap, vmin, vmax, label=None):
norm = Normalize(vmin=vmin, vmax=vmax)
cbar = ColorbarBase(self.cax, cmap=cmap, norm=norm, orientation='horizontal')
cbar.set_ticks([])
cbar.set_label(label)
cbar.ax.xaxis.set_label_position('top')
def initialize_city_limits(self, color='w', edgecolor='k', lw=.5, **kwargs):
""" Add city limits to axes. """
self.citylimits.plot(ax=self.ax, color=color, edgecolor=edgecolor, lw=lw, **kwargs)
def initialize_zip_codes(self, **kwargs):
""" Add zipcodes to axes. """
# build shader
shader = self.build_shader(0)
# shade zipcode polygons
shader.plot(column='VALUE', cmap=plt.cm.Greys, vmin=0, vmax=0, ax=self.ax, **kwargs)
self.shade(0)
# set date marker
if self.timeline:
self.tax.plot(self.timeseries.columns[0], .5, '.k', markersize=10)
def build_shader(self, index):
# get color vector
colors = self.timeseries.iloc[:, index].rename('VALUE')
# join with zipdata
shader = self.ziplimits.join(colors, on='zip', how='left')
return shader
def shade(self, index):
# build shader
shader = self.build_shader(index)
# shade zipcodes
colors = self.cmap(np.ma.masked_invalid(self.norm(shader.VALUE.values)))
self.ax.collections[-1].set_facecolors(colors)
def set_title(self, index):
date = self.timeseries.columns[index].strftime('%b-%Y')
self.ax.set_title(date)
def update(self, index):
self.shade(index)
if self.timeline:
self.mark_time(index)
def mark_time(self, index):
date = self.timeseries.columns[index]
self.tax.lines[0].set_data(date, 0.5)
def fix_date(self, date):
index = self.timeseries.columns.get_loc(date).start
self.update(index)
def animate(self, filepath, fps=12, dpi=150):
vid = FuncAnimation(self.fig, self.update, frames=np.arange(self.num_frames))
vid.save(filepath, fps=fps, dpi=dpi)
| from os.path import join
import numpy as np
import geopandas as gpd
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.colors import Normalize
from matplotlib.colorbar import ColorbarBase
from matplotlib.dates import YearLocator, DateFormatter
from matplotlib.animation import FuncAnimation
class CityMap:
def __init__(self,
timeseries,
dirpath='./chicago/',
cbar=True,
timeline=True,
figsize=(6, 6),
cmap=None,
vmin=-1,
vmax=1,
label=None,
bg='w',
**kwargs):
self.cbar = cbar
self.timeline = timeline
# load geomap
self.citylimits = gpd.read_file(join(dirpath, 'chicago.geojson'))
self.ziplimits = gpd.read_file(join(dirpath, 'chicago_zips.geojson'))
self.ziplimits.zip = self.ziplimits.zip.astype(int)
# exclude NaNs
timeseries = timeseries[~timeseries.isna().any(axis=1)]
# add timeseries (smoothed)
self.timeseries = timeseries.resample('1M', axis=0).interpolate().transpose()
# set colormap
if cmap is None:
cmap = plt.cm.Blues
cmap.set_bad(bg)
self.cmap = cmap
if vmin == 'min':
vmin = self.timeseries.min().min()
if vmax == 'max':
vmax = self.timeseries.max().max()
self.norm = Normalize(vmin=vmin, vmax=vmax)
# create figure and plot city limits
self.create_figure(figsize=figsize, cbar=cbar, timeline=timeline)
#self.initialize_city_limits(**kwargs)
self.initialize_zip_codes(**kwargs)
# add colorbar
if self.cbar:
self.draw_colorbar(cmap, vmin, vmax, label=label)
# add timeline
if self.timeline:
self.draw_timeline()
@property
def zipcodes(self):
""" Unique zipcodes. """
return self.ziplimits.zip.unique().astype(int)
@property
def num_frames(self):
return self.timeseries.shape[1]
@property
def ax(self):
return self.fig.axes[self.map_ax_ind]
@property
def cax(self):
return self.fig.axes[self.cbar_ax_ind]
@property
def tax(self):
return self.fig.axes[self.timeline_ax_ind]
def create_figure(self, figsize=(4, 4), cbar=True, timeline=True):
""" Create figure. """
self.fig = plt.figure(figsize=figsize)
gs = GridSpec(nrows=3, ncols=3, height_ratios=(1,25,3), width_ratios=[1,2,1])
gs.update(wspace=0., hspace=0)
lb = 0
if cbar:
self.cbar_ax_ind = len(self.fig.axes)
self.fig.add_subplot(gs[0, 1])
lb += 1
if timeline:
self.timeline_ax_ind = len(self.fig.axes)
self.fig.add_subplot(gs[-1, :])
# add map axis
self.fig.add_subplot(gs[lb:-1, :])
else:
self.fig.add_subplot(gs[lb:, :])
self.map_ax_ind = len(self.fig.axes) - 1
# turn off axes
self.ax.axis('off')
self.ax.set_aspect(1)
def draw_timeline(self):
self.tax.set_yticks([])
self.tax.spines['top'].set_visible(False)
self.tax.spines['left'].set_visible(False)
self.tax.spines['right'].set_visible(False)
start = self.timeseries.columns.min()
stop = self.timeseries.columns.max()
self.tax.plot((start, stop), (0,0), alpha=0)
self.tax.get_xaxis().set_major_locator(YearLocator(1, month=3))
self.tax.get_xaxis().set_major_formatter(DateFormatter("%Y"))
self.tax.xaxis.set_tick_params(rotation=45)
self.tax.tick_params(pad=0, length=0)
self.tax.set_ylim(0, 1)
def draw_colorbar(self, cmap, vmin, vmax, label=None):
norm = Normalize(vmin=vmin, vmax=vmax)
cbar = ColorbarBase(self.cax, cmap=cmap, norm=norm, orientation='horizontal')
cbar.set_ticks([])
cbar.set_label(label)
cbar.ax.xaxis.set_label_position('top')
def initialize_city_limits(self, color='w', edgecolor='k', lw=.5, **kwargs):
""" Add city limits to axes. """
self.citylimits.plot(ax=self.ax, color=color, edgecolor=edgecolor, lw=lw, **kwargs)
def initialize_zip_codes(self, **kwargs):
""" Add zipcodes to axes. """
# build shader
shader = self.build_shader(0)
# shade zipcode polygons
shader.plot(column='VALUE', cmap=plt.cm.Greys, vmin=0, vmax=0, ax=self.ax, **kwargs)
self.shade(0)
# set date marker
if self.timeline:
self.tax.plot(self.timeseries.columns[0], .5, '.k', markersize=10)
def build_shader(self, index):
# get color vector
colors = self.timeseries.iloc[:, index].rename('VALUE')
# join with zipdata
shader = self.ziplimits.join(colors, on='zip', how='left')
return shader
def shade(self, index):
# build shader
shader = self.build_shader(index)
# shade zipcodes
colors = self.cmap(np.ma.masked_invalid(self.norm(shader.VALUE.values)))
self.ax.collections[-1].set_facecolors(colors)
def set_title(self, index):
date = self.timeseries.columns[index].strftime('%b-%Y')
self.ax.set_title(date)
def update(self, index):
self.shade(index)
if self.timeline:
self.mark_time(index)
def mark_time(self, index):
date = self.timeseries.columns[index]
self.tax.lines[0].set_data(date, 0.5)
def fix_date(self, date):
index = self.timeseries.columns.get_loc(date).start
self.update(index)
def animate(self, filepath, fps=12, dpi=150):
vid = FuncAnimation(self.fig, self.update, frames=np.arange(self.num_frames))
vid.save(filepath, fps=fps, dpi=dpi) | pt | 0.133473 | 2.35726 | 2 |
w1data/metadata.py | swork/w1-datalogger | 0 | 13603 | <reponame>swork/w1-datalogger
import logging, sys
logger = logging.getLogger(__name__)
def measurement_for_skey(sensor_key, metadata):
# logger.debug("sensor_key:{} metadata:{}".format(sensor_key, metadata))
return metadata['collector']['sensors'][sensor_key]['name']
| import logging, sys
logger = logging.getLogger(__name__)
def measurement_for_skey(sensor_key, metadata):
# logger.debug("sensor_key:{} metadata:{}".format(sensor_key, metadata))
return metadata['collector']['sensors'][sensor_key]['name'] | pt | 0.18319 | 2.562252 | 3 |
easyvista/setup.py | GreyNoise-Intelligence/insightconnect-plugins | 0 | 13604 | # GENERATED BY KOMAND SDK - DO NOT EDIT
from setuptools import setup, find_packages
setup(name="easyvista-rapid7-plugin",
version="1.0.0",
description="EasyVista Service Manager platform supports even the most complex requirements, while bringing a new level of simplicity, agility, and mobility required to make cloud based IT Service Management (ITSM) software easy to use and easy to deliver. Using the EasyVista plugin for Rapid7 InsightConnect, users can manage the creation, update, search and closure of incident, service request, problem or event tickets",
author="rapid7",
author_email="",
url="",
packages=find_packages(),
install_requires=['insightconnect-plugin-runtime'], # Add third-party dependencies to requirements.txt, not here!
scripts=['bin/icon_easyvista']
)
| # GENERATED BY KOMAND SDK - DO NOT EDIT
from setuptools import setup, find_packages
setup(name="easyvista-rapid7-plugin",
version="1.0.0",
description="EasyVista Service Manager platform supports even the most complex requirements, while bringing a new level of simplicity, agility, and mobility required to make cloud based IT Service Management (ITSM) software easy to use and easy to deliver. Using the EasyVista plugin for Rapid7 InsightConnect, users can manage the creation, update, search and closure of incident, service request, problem or event tickets",
author="rapid7",
author_email="",
url="",
packages=find_packages(),
install_requires=['insightconnect-plugin-runtime'], # Add third-party dependencies to requirements.txt, not here!
scripts=['bin/icon_easyvista']
)
| pt | 0.354318 | 1.406335 | 1 |
dbms/tests/integration/helpers/test_tools.py | qqiangwu/ClickHouse | 4 | 13605 | <filename>dbms/tests/integration/helpers/test_tools.py
import difflib
import time
class TSV:
"""Helper to get pretty diffs between expected and actual tab-separated value files"""
def __init__(self, contents):
raw_lines = contents.readlines() if isinstance(contents, file) else contents.splitlines(True)
self.lines = [l.strip() for l in raw_lines if l.strip()]
def __eq__(self, other):
return self.lines == other.lines
def __ne__(self, other):
return self.lines != other.lines
def diff(self, other, n1=None, n2=None):
return list(line.rstrip() for line in difflib.unified_diff(self.lines, other.lines, fromfile=n1, tofile=n2))[2:]
def __str__(self):
return '\n'.join(self.lines)
@staticmethod
def toMat(contents):
return [line.split("\t") for line in contents.split("\n") if line.strip()]
def assert_eq_with_retry(instance, query, expectation, retry_count=20, sleep_time=0.5, stdin=None, timeout=None, settings=None, user=None, ignore_error=False):
expectation_tsv = TSV(expectation)
for i in xrange(retry_count):
try:
if TSV(instance.query(query)) == expectation_tsv:
break
time.sleep(sleep_time)
except Exception as ex:
print "assert_eq_with_retry retry {} exception {}".format(i + 1, ex)
time.sleep(sleep_time)
else:
val = TSV(instance.query(query))
if expectation_tsv != val:
raise AssertionError("'{}' != '{}'\n{}".format(expectation_tsv, val, '\n'.join(expectation_tsv.diff(val, n1="expectation", n2="query"))))
| <filename>dbms/tests/integration/helpers/test_tools.py
import difflib
import time
class TSV:
"""Helper to get pretty diffs between expected and actual tab-separated value files"""
def __init__(self, contents):
raw_lines = contents.readlines() if isinstance(contents, file) else contents.splitlines(True)
self.lines = [l.strip() for l in raw_lines if l.strip()]
def __eq__(self, other):
return self.lines == other.lines
def __ne__(self, other):
return self.lines != other.lines
def diff(self, other, n1=None, n2=None):
return list(line.rstrip() for line in difflib.unified_diff(self.lines, other.lines, fromfile=n1, tofile=n2))[2:]
def __str__(self):
return '\n'.join(self.lines)
@staticmethod
def toMat(contents):
return [line.split("\t") for line in contents.split("\n") if line.strip()]
def assert_eq_with_retry(instance, query, expectation, retry_count=20, sleep_time=0.5, stdin=None, timeout=None, settings=None, user=None, ignore_error=False):
expectation_tsv = TSV(expectation)
for i in xrange(retry_count):
try:
if TSV(instance.query(query)) == expectation_tsv:
break
time.sleep(sleep_time)
except Exception as ex:
print "assert_eq_with_retry retry {} exception {}".format(i + 1, ex)
time.sleep(sleep_time)
else:
val = TSV(instance.query(query))
if expectation_tsv != val:
raise AssertionError("'{}' != '{}'\n{}".format(expectation_tsv, val, '\n'.join(expectation_tsv.diff(val, n1="expectation", n2="query"))))
| it | 0.24537 | 2.223975 | 2 |
env/Lib/site-packages/Algorithmia/acl.py | Vivek-Kamboj/Sargam | 142 | 13606 | class Acl(object):
def __init__(self, read_acl):
self.read_acl = read_acl
@staticmethod
def from_acl_response(acl_response):
'''Takes JSON response from API and converts to ACL object'''
if 'read' in acl_response:
read_acl = AclType.from_acl_response(acl_response['read'])
return Acl(read_acl)
else:
raise ValueError('Response does not contain read ACL')
def to_api_param(self):
read_acl_string = self.read_acl.acl_string
if read_acl_string is None:
return {'read':[]}
return {'read':[read_acl_string]}
class AclInner(object):
def __init__(self, pseudonym, acl_string):
self.pseudonym = pseudonym
self.acl_string = acl_string
def __repr__(self):
return 'AclType(pseudonym=%s,acl_string=%s)' % (self.pseudonym, self.acl_string)
class AclType(object):
public = AclInner('public','user://*')
my_algos = AclInner('my_algos','algo://.my/*')
private = AclInner('private',None) # Really is an empty list
default = my_algos
types = (public, my_algos, private)
@staticmethod
def from_acl_response(acl_list):
if len(acl_list) == 0:
return AclType.private
else:
acl_string = acl_list[0]
for t in AclType.types:
if t.acl_string == acl_string:
return t
else:
raise ValueError('Invalid acl string %s' % (acl_list[0]))
class ReadAcl(object):
public = Acl(AclType.public)
private = Acl(AclType.private)
my_algos = Acl(AclType.my_algos)
| class Acl(object):
def __init__(self, read_acl):
self.read_acl = read_acl
@staticmethod
def from_acl_response(acl_response):
'''Takes JSON response from API and converts to ACL object'''
if 'read' in acl_response:
read_acl = AclType.from_acl_response(acl_response['read'])
return Acl(read_acl)
else:
raise ValueError('Response does not contain read ACL')
def to_api_param(self):
read_acl_string = self.read_acl.acl_string
if read_acl_string is None:
return {'read':[]}
return {'read':[read_acl_string]}
class AclInner(object):
def __init__(self, pseudonym, acl_string):
self.pseudonym = pseudonym
self.acl_string = acl_string
def __repr__(self):
return 'AclType(pseudonym=%s,acl_string=%s)' % (self.pseudonym, self.acl_string)
class AclType(object):
public = AclInner('public','user://*')
my_algos = AclInner('my_algos','algo://.my/*')
private = AclInner('private',None) # Really is an empty list
default = my_algos
types = (public, my_algos, private)
@staticmethod
def from_acl_response(acl_list):
if len(acl_list) == 0:
return AclType.private
else:
acl_string = acl_list[0]
for t in AclType.types:
if t.acl_string == acl_string:
return t
else:
raise ValueError('Invalid acl string %s' % (acl_list[0]))
class ReadAcl(object):
public = Acl(AclType.public)
private = Acl(AclType.private)
my_algos = Acl(AclType.my_algos)
| pt | 0.27484 | 2.891967 | 3 |
nlp/name_extractor.py | Karthik-Venkatesh/ATOM | 1 | 13607 | <reponame>Karthik-Venkatesh/ATOM
#
# name_extractor.py
# ATOM
#
# Created by <NAME>.
# Updated copyright on 16/1/19 5:54 PM.
#
# Copyright © 2019 <NAME>. All rights reserved.
#
# Reference
# Question link: https://stackoverflow.com/questions/20290870/improving-the-extraction-of-human-names-with-nltk
# Answer link: https://stackoverflow.com/a/49500219/5019015
import nltk
from nltk.corpus import wordnet
class NameExtractor:
@staticmethod
def download_required_packages():
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('maxent_ne_chunker')
nltk.download('words')
nltk.download('wordnet')
@staticmethod
def get_human_names(text):
tokens = nltk.tokenize.word_tokenize(text)
pos = nltk.pos_tag(tokens)
sent = nltk.ne_chunk(pos, binary=False)
person_list = []
person = []
name = ""
for subtree in sent.subtrees(filter=lambda t: t.label() == 'PERSON'):
for leaf in subtree.leaves():
person.append(leaf[0])
if len(person) > 0:
for part in person:
name += part + ' '
if name[:-1] not in person_list:
person_list.append(name[:-1])
name = ''
person = []
person_names = person_list
for person in person_list:
person_split = person.split(" ")
for name in person_split:
if wordnet.synsets(name):
if name in person:
person_names.remove(person)
break
return person_names
@staticmethod
def extract_names(text: str):
names = NameExtractor.get_human_names(text)
return names
| #
# name_extractor.py
# ATOM
#
# Created by <NAME>.
# Updated copyright on 16/1/19 5:54 PM.
#
# Copyright © 2019 <NAME>. All rights reserved.
#
# Reference
# Question link: https://stackoverflow.com/questions/20290870/improving-the-extraction-of-human-names-with-nltk
# Answer link: https://stackoverflow.com/a/49500219/5019015
import nltk
from nltk.corpus import wordnet
class NameExtractor:
@staticmethod
def download_required_packages():
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('maxent_ne_chunker')
nltk.download('words')
nltk.download('wordnet')
@staticmethod
def get_human_names(text):
tokens = nltk.tokenize.word_tokenize(text)
pos = nltk.pos_tag(tokens)
sent = nltk.ne_chunk(pos, binary=False)
person_list = []
person = []
name = ""
for subtree in sent.subtrees(filter=lambda t: t.label() == 'PERSON'):
for leaf in subtree.leaves():
person.append(leaf[0])
if len(person) > 0:
for part in person:
name += part + ' '
if name[:-1] not in person_list:
person_list.append(name[:-1])
name = ''
person = []
person_names = person_list
for person in person_list:
person_split = person.split(" ")
for name in person_split:
if wordnet.synsets(name):
if name in person:
person_names.remove(person)
break
return person_names
@staticmethod
def extract_names(text: str):
names = NameExtractor.get_human_names(text)
return names | en | 0.11516 | 3.166434 | 3 |
tfx/dsl/components/base/node_registry.py | johnPertoft/tfx | 0 | 13608 | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Node registry."""
import threading
from typing import Any, FrozenSet
# To resolve circular dependency caused by type annotations.
base_node = Any # base_node.py imports this module.
class _NodeRegistry(threading.local):
"""Stores registered nodes in the local thread."""
def __init__(self):
super().__init__()
self._nodes = set()
def register(self, node: 'base_node.BaseNode'):
self._nodes.add(node)
def registered_nodes(self):
return self._nodes
_node_registry = _NodeRegistry()
def register_node(node: 'base_node.BaseNode'):
"""Register a node in the local thread."""
_node_registry.register(node)
def registered_nodes() -> FrozenSet['base_node.BaseNode']:
"""Get registered nodes in the local thread."""
return frozenset(_node_registry.registered_nodes())
| # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Node registry."""
import threading
from typing import Any, FrozenSet
# To resolve circular dependency caused by type annotations.
base_node = Any # base_node.py imports this module.
class _NodeRegistry(threading.local):
"""Stores registered nodes in the local thread."""
def __init__(self):
super().__init__()
self._nodes = set()
def register(self, node: 'base_node.BaseNode'):
self._nodes.add(node)
def registered_nodes(self):
return self._nodes
_node_registry = _NodeRegistry()
def register_node(node: 'base_node.BaseNode'):
"""Register a node in the local thread."""
_node_registry.register(node)
def registered_nodes() -> FrozenSet['base_node.BaseNode']:
"""Get registered nodes in the local thread."""
return frozenset(_node_registry.registered_nodes())
| pt | 0.211989 | 2.334363 | 2 |
tests/unit/api/test_api.py | Mattlk13/datadogpy | 0 | 13609 | # stdlib
from copy import deepcopy
from functools import wraps
import os
import tempfile
from time import time
# 3p
import mock
# datadog
from datadog import initialize, api, util
from datadog.api import (
Distribution,
Metric,
ServiceCheck
)
from datadog.api.exceptions import ApiError, ApiNotInitialized
from datadog.util.compat import is_p3k
from tests.unit.api.helper import (
DatadogAPIWithInitialization,
DatadogAPINoInitialization,
MyCreatable,
MyUpdatable,
MyDeletable,
MyGetable,
MyListable,
MyListableSubResource,
MyAddableSubResource,
MyUpdatableSubResource,
MyDeletableSubResource,
MyActionable,
API_KEY,
APP_KEY,
API_HOST,
HOST_NAME,
FAKE_PROXY
)
from tests.util.contextmanagers import EnvVars
class TestInitialization(DatadogAPINoInitialization):
def test_no_initialization_fails(self):
"""
Raise ApiNotInitialized exception when `initialize` has not ran or no API key was set.
"""
self.assertRaises(ApiNotInitialized, MyCreatable.create)
# No API key => only stats in statsd mode should work
initialize()
api._api_key = None
self.assertRaises(ApiNotInitialized, MyCreatable.create)
# Finally, initialize with an API key
initialize(api_key=API_KEY, api_host=API_HOST)
MyCreatable.create()
self.assertEqual(self.request_mock.call_count(), 1)
@mock.patch('datadog.util.config.get_config_path')
def test_get_hostname(self, mock_config_path):
"""
API hostname parameter fallback with Datadog Agent hostname when available.
"""
# Generate a fake agent config
tmpfilepath = os.path.join(tempfile.gettempdir(), "tmp-agentconfig")
with open(tmpfilepath, "wb") as f:
if is_p3k():
f.write(bytes("[Main]\n", 'UTF-8'))
f.write(bytes("hostname: {0}\n".format(HOST_NAME), 'UTF-8'))
else:
f.write("[Main]\n")
f.write("hostname: {0}\n".format(HOST_NAME))
# Mock get_config_path to return this fake agent config
mock_config_path.return_value = tmpfilepath
initialize()
self.assertEqual(api._host_name, HOST_NAME, api._host_name)
def test_request_parameters(self):
"""
API parameters are set with `initialize` method.
"""
# Test API, application keys, API host, and some HTTP client options
initialize(api_key=API_KEY, app_key=APP_KEY, api_host=API_HOST)
# Make a simple API call
MyCreatable.create()
_, options = self.request_mock.call_args()
# Assert `requests` parameters
self.assertIn('params', options)
self.assertIn('api_key', options['params'])
self.assertEqual(options['params']['api_key'], API_KEY)
self.assertIn('application_key', options['params'])
self.assertEqual(options['params']['application_key'], APP_KEY)
self.assertIn('headers', options)
self.assertEqual(options['headers'], {'Content-Type': 'application/json'})
def test_initialize_options(self):
"""
HTTP client and API options are set with `initialize` method.
"""
initialize(api_key=API_KEY, app_key=APP_KEY, api_host=API_HOST,
proxies=FAKE_PROXY, cacert=False)
# Make a simple API call
MyCreatable.create()
_, options = self.request_mock.call_args()
# Assert `requests` parameters
self.assertIn('proxies', options)
self.assertEqual(options['proxies'], FAKE_PROXY)
self.assertIn('verify', options)
self.assertEqual(options['verify'], False)
# Arm the `requests` to raise
self.arm_requests_to_raise()
# No exception should be raised (mute=True by default)
MyCreatable.create()
# Repeat with mute to False
initialize(api_key=API_KEY, mute=False)
self.assertRaises(ApiError, MyCreatable.create)
def test_return_raw_response(self):
# Test default initialization sets return_raw_response to False
initialize()
assert not api._return_raw_response
# Assert that we can set this to True
initialize(return_raw_response=True)
assert api._return_raw_response
# Assert we get multiple fields back when set to True
initialize(api_key="<KEY>", app_key="123456", return_raw_response=True)
data, raw = api.Monitor.get_all()
def test_default_values(self):
with EnvVars(ignore=[
"DATADOG_API_KEY",
"DATADOG_APP_KEY",
"DD_API_KEY",
"DD_APP_KEY"
]):
initialize()
self.assertIsNone(api._api_key)
self.assertIsNone(api._application_key)
self.assertEqual(api._api_host, "https://api.datadoghq.com")
self.assertEqual(api._host_name, util.hostname.get_hostname())
def test_env_var_values(self):
with EnvVars(
env_vars={
"DATADOG_API_KEY": "API_KEY_ENV",
"DATADOG_APP_KEY": "APP_KEY_ENV",
"DATADOG_HOST": "HOST_ENV",
}
):
initialize()
self.assertEqual(api._api_key, "API_KEY_ENV")
self.assertEqual(api._application_key, "APP_KEY_ENV")
self.assertEqual(api._api_host, "HOST_ENV")
self.assertEqual(api._host_name, util.hostname.get_hostname())
del os.environ["DATADOG_API_KEY"]
del os.environ["DATADOG_APP_KEY"]
del os.environ["DATADOG_HOST"]
with EnvVars(env_vars={
"DD_API_KEY": "API_KEY_ENV_DD",
"DD_APP_KEY": "APP_KEY_ENV_DD",
}):
api._api_key = None
api._application_key = None
initialize()
self.assertEqual(api._api_key, "API_KEY_ENV_DD")
self.assertEqual(api._application_key, "APP_KEY_ENV_DD")
def test_function_param_value(self):
initialize(api_key="API_KEY", app_key="APP_KEY", api_host="HOST", host_name="HOSTNAME")
self.assertEqual(api._api_key, "API_KEY")
self.assertEqual(api._application_key, "APP_KEY")
self.assertEqual(api._api_host, "HOST")
self.assertEqual(api._host_name, "HOSTNAME")
def test_precedence(self):
# Initialize first with env vars
with EnvVars(env_vars={
"DD_API_KEY": "API_KEY_ENV_DD",
"DD_APP_KEY": "APP_KEY_ENV_DD",
}):
os.environ["DATADOG_API_KEY"] = "API_KEY_ENV"
os.environ["DATADOG_APP_KEY"] = "APP_KEY_ENV"
os.environ["DATADOG_HOST"] = "HOST_ENV"
initialize()
self.assertEqual(api._api_key, "API_KEY_ENV")
self.assertEqual(api._application_key, "APP_KEY_ENV")
self.assertEqual(api._api_host, "HOST_ENV")
self.assertEqual(api._host_name, util.hostname.get_hostname())
# Initialize again to check given parameters take precedence over already set value and env vars
initialize(api_key="API_KEY", app_key="APP_KEY", api_host="HOST", host_name="HOSTNAME")
self.assertEqual(api._api_key, "API_KEY")
self.assertEqual(api._application_key, "APP_KEY")
self.assertEqual(api._api_host, "HOST")
self.assertEqual(api._host_name, "HOSTNAME")
# Initialize again without specifying attributes to check that already initialized value takes precedence
initialize()
self.assertEqual(api._api_key, "API_KEY")
self.assertEqual(api._application_key, "APP_KEY")
self.assertEqual(api._api_host, "HOST")
self.assertEqual(api._host_name, "HOSTNAME")
del os.environ["DATADOG_API_KEY"]
del os.environ["DATADOG_APP_KEY"]
del os.environ["DATADOG_HOST"]
class TestResources(DatadogAPIWithInitialization):
def test_creatable(self):
"""
Creatable resource logic.
"""
MyCreatable.create(mydata="val")
self.request_called_with('POST', API_HOST + "/api/v1/creatables", data={'mydata': "val"})
MyCreatable.create(mydata="val", attach_host_name=True)
self.request_called_with('POST', API_HOST + "/api/v1/creatables",
data={'mydata': "val", 'host': api._host_name})
def test_getable(self):
"""
Getable resource logic.
"""
getable_object_id = 123
MyGetable.get(getable_object_id, otherparam="val")
self.request_called_with('GET', API_HOST + "/api/v1/getables/" + str(getable_object_id),
params={'otherparam': "val"})
_, kwargs = self.request_mock.call_args()
self.assertIsNone(kwargs["data"])
def test_listable(self):
"""
Listable resource logic.
"""
MyListable.get_all(otherparam="val")
self.request_called_with('GET', API_HOST + "/api/v1/listables", params={'otherparam': "val"})
_, kwargs = self.request_mock.call_args()
self.assertIsNone(kwargs["data"])
def test_updatable(self):
"""
Updatable resource logic.
"""
updatable_object_id = 123
MyUpdatable.update(updatable_object_id, params={'myparam': "val1"}, mydata="val2")
self.request_called_with('PUT', API_HOST + "/api/v1/updatables/" + str(updatable_object_id),
params={'myparam': "val1"}, data={'mydata': "val2"})
def test_detalable(self):
"""
Deletable resource logic.
"""
deletable_object_id = 123
MyDeletable.delete(deletable_object_id, otherparam="val")
self.request_called_with('DELETE', API_HOST + "/api/v1/deletables/" + str(deletable_object_id),
params={'otherparam': "val"})
def test_listable_sub_resources(self):
"""
Listable sub-resources logic.
"""
resource_id = 123
MyListableSubResource.get_items(resource_id, otherparam="val")
self.request_called_with(
'GET',
API_HOST + '/api/v1/resource_name/{0}/sub_resource_name'.format(resource_id),
params={'otherparam': "val"}
)
_, kwargs = self.request_mock.call_args()
self.assertIsNone(kwargs["data"])
def test_addable_sub_resources(self):
"""
Addable sub-resources logic.
"""
resource_id = 123
MyAddableSubResource.add_items(resource_id, params={'myparam': 'val1'}, mydata='val2')
self.request_called_with(
'POST',
API_HOST + '/api/v1/resource_name/{0}/sub_resource_name'.format(resource_id),
params={'myparam': 'val1'},
data={'mydata': 'val2'}
)
def test_updatable_sub_resources(self):
"""
Updatable sub-resources logic.
"""
resource_id = 123
MyUpdatableSubResource.update_items(resource_id, params={'myparam': 'val1'}, mydata='val2')
self.request_called_with(
'PUT',
API_HOST + '/api/v1/resource_name/{0}/sub_resource_name'.format(resource_id),
params={'myparam': 'val1'},
data={'mydata': 'val2'}
)
def test_deletable_sub_resources(self):
"""
Deletable sub-resources logic.
"""
resource_id = 123
MyDeletableSubResource.delete_items(resource_id, params={'myparam': 'val1'}, mydata='val2')
self.request_called_with(
'DELETE',
API_HOST + '/api/v1/resource_name/{0}/sub_resource_name'.format(resource_id),
params={'myparam': 'val1'},
data={'mydata': 'val2'}
)
def test_actionable(self):
"""
Actionable resource logic.
"""
actionable_object_id = 123
MyActionable.trigger_class_action(
'POST',
'actionname',
id=actionable_object_id,
params={'myparam': 'val1'},
mydata='val',
mydata2='val2'
)
self.request_called_with(
'POST',
API_HOST + '/api/v1/actionables/{0}/actionname'.format(str(actionable_object_id)),
params={'myparam': 'val1'},
data={'mydata': 'val', 'mydata2': 'val2'}
)
MyActionable.trigger_class_action(
'POST',
'actionname',
id=actionable_object_id,
mydata='val',
mydata2='val2'
)
self.request_called_with(
'POST',
API_HOST +'/api/v1/actionables/{0}/actionname'.format(str(actionable_object_id)),
params={},
data={'mydata': 'val', 'mydata2': 'val2'}
)
MyActionable.trigger_class_action(
'GET',
'actionname',
id=actionable_object_id,
params={'param1': 'val1', 'param2': 'val2'}
)
self.request_called_with(
'GET',
API_HOST + '/api/v1/actionables/{0}/actionname'.format(str(actionable_object_id)),
params={'param1': 'val1', 'param2': 'val2'}
)
_, kwargs = self.request_mock.call_args()
self.assertIsNone(kwargs["data"])
MyActionable.trigger_action(
'POST',
'actionname',
id=actionable_object_id,
mydata="val"
)
self.request_called_with(
'POST',
API_HOST + '/api/v1/actionname/{0}'.format(actionable_object_id),
data={'mydata': "val"}
)
MyActionable.trigger_action(
'GET',
'actionname',
id=actionable_object_id,
)
self.request_called_with(
'GET',
API_HOST + '/api/v1/actionname/{0}'.format(actionable_object_id)
)
_, kwargs = self.request_mock.call_args()
self.assertIsNone(kwargs["data"])
class TestMetricResource(DatadogAPIWithInitialization):
def submit_and_assess_metric_payload(self, serie, attach_host_name=True):
"""
Helper to assess the metric payload format.
"""
now = time()
if isinstance(serie, dict):
Metric.send(attach_host_name=attach_host_name, **deepcopy(serie))
serie = [serie]
else:
Metric.send(deepcopy(serie), attach_host_name=attach_host_name)
payload = self.get_request_data()
for i, metric in enumerate(payload['series']):
if attach_host_name:
self.assertEqual(set(metric.keys()), set(['metric', 'points', 'host']))
self.assertEqual(metric['host'], api._host_name)
else:
self.assertEqual(set(metric.keys()), set(['metric', 'points']))
self.assertEqual(metric['metric'], serie[i]['metric'])
# points is a list of 1 point
self.assertTrue(isinstance(metric['points'], list))
self.assertEqual(len(metric['points']), 1)
# it consists of a [time, value] pair
self.assertEqual(len(metric['points'][0]), 2)
# its value == value we sent
self.assertEqual(metric['points'][0][1], float(serie[i]['points']))
# it's time not so far from current time
assert now - 1 < metric['points'][0][0] < now + 1
def submit_and_assess_dist_payload(self, serie, attach_host_name=True):
"""
Helper to assess the metric payload format.
"""
now = time()
if isinstance(serie, dict):
Distribution.send(attach_host_name=attach_host_name, **deepcopy(serie))
serie = [serie]
else:
Distribution.send(deepcopy(serie), attach_host_name=attach_host_name)
payload = self.get_request_data()
for i, metric in enumerate(payload['series']):
if attach_host_name:
self.assertEqual(set(metric.keys()), set(['metric', 'points', 'host']))
self.assertEqual(metric['host'], api._host_name)
else:
self.assertEqual(set(metric.keys()), set(['metric', 'points']))
self.assertEqual(metric['metric'], serie[i]['metric'])
# points is a list of 1 point
self.assertTrue(isinstance(metric['points'], list))
self.assertEqual(len(metric['points']), 1)
# it consists of a [time, value] pair
self.assertEqual(len(metric['points'][0]), 2)
# its value == value we sent
self.assertEqual(metric['points'][0][1], serie[i]['points'][0][1])
# it's time not so far from current time
assert now - 1 < metric['points'][0][0] < now + 1
def test_metric_submit_query_switch(self):
"""
Endpoints are different for submission and queries.
"""
Metric.send(points=(123, 456))
self.request_called_with('POST', API_HOST + "/api/v1/series",
data={'series': [{'points': [[123, 456.0]], 'host': api._host_name}]})
Metric.query(start="val1", end="val2")
self.request_called_with('GET', API_HOST + "/api/v1/query",
params={'from': "val1", 'to': "val2"})
def test_points_submission(self):
"""
Assess the data payload format, when submitting a single or multiple points.
"""
# Single point
serie = dict(metric='metric.1', points=13)
self.submit_and_assess_metric_payload(serie)
# Multiple point
serie = [dict(metric='metric.1', points=13),
dict(metric='metric.2', points=19)]
self.submit_and_assess_metric_payload(serie)
# Single point no hostname
serie = dict(metric='metric.1', points=13)
self.submit_and_assess_metric_payload(serie, attach_host_name=False)
# Multiple point no hostname
serie = [dict(metric='metric.1', points=13),
dict(metric='metric.2', points=19)]
self.submit_and_assess_metric_payload(serie, attach_host_name=False)
def test_dist_points_submission(self):
"""
Assess the distribution data payload format, when submitting a single or multiple points.
"""
# Single point
serie = dict(metric='metric.1', points=[[time(), [13]]])
self.submit_and_assess_dist_payload(serie)
# Multiple point
serie = [dict(metric='metric.1', points=[[time(), [13]]]),
dict(metric='metric.2', points=[[time(), [19]]])]
self.submit_and_assess_dist_payload(serie)
# Single point no hostname
serie = dict(metric='metric.1', points=[[time(), [13]]])
self.submit_and_assess_dist_payload(serie, attach_host_name=False)
# Multiple point no hostname
serie = [dict(metric='metric.1', points=[[time(), [13]]]),
dict(metric='metric.2', points=[[time(), [19]]])]
self.submit_and_assess_dist_payload(serie, attach_host_name=False)
def test_data_type_support(self):
"""
`Metric` API supports `real` numerical data types.
"""
from decimal import Decimal
from fractions import Fraction
m_long = int(1) # long in Python 3.x
if not is_p3k():
m_long = long(1)
supported_data_types = [1, 1.0, m_long, Decimal(1), Fraction(1, 2)]
for point in supported_data_types:
serie = dict(metric='metric.numerical', points=point)
self.submit_and_assess_metric_payload(serie)
class TestServiceCheckResource(DatadogAPIWithInitialization):
def test_service_check_supports_none_parameters(self):
"""
ServiceCheck should support none parameters
```
$ dog service_check check check_pg host0 1
```
resulted in `RuntimeError: dictionary changed size during iteration`
"""
ServiceCheck.check(
check='check_pg', host_name='host0', status=1, message=None,
timestamp=None, tags=None)
| # stdlib
from copy import deepcopy
from functools import wraps
import os
import tempfile
from time import time
# 3p
import mock
# datadog
from datadog import initialize, api, util
from datadog.api import (
Distribution,
Metric,
ServiceCheck
)
from datadog.api.exceptions import ApiError, ApiNotInitialized
from datadog.util.compat import is_p3k
from tests.unit.api.helper import (
DatadogAPIWithInitialization,
DatadogAPINoInitialization,
MyCreatable,
MyUpdatable,
MyDeletable,
MyGetable,
MyListable,
MyListableSubResource,
MyAddableSubResource,
MyUpdatableSubResource,
MyDeletableSubResource,
MyActionable,
API_KEY,
APP_KEY,
API_HOST,
HOST_NAME,
FAKE_PROXY
)
from tests.util.contextmanagers import EnvVars
class TestInitialization(DatadogAPINoInitialization):
def test_no_initialization_fails(self):
"""
Raise ApiNotInitialized exception when `initialize` has not ran or no API key was set.
"""
self.assertRaises(ApiNotInitialized, MyCreatable.create)
# No API key => only stats in statsd mode should work
initialize()
api._api_key = None
self.assertRaises(ApiNotInitialized, MyCreatable.create)
# Finally, initialize with an API key
initialize(api_key=API_KEY, api_host=API_HOST)
MyCreatable.create()
self.assertEqual(self.request_mock.call_count(), 1)
@mock.patch('datadog.util.config.get_config_path')
def test_get_hostname(self, mock_config_path):
"""
API hostname parameter fallback with Datadog Agent hostname when available.
"""
# Generate a fake agent config
tmpfilepath = os.path.join(tempfile.gettempdir(), "tmp-agentconfig")
with open(tmpfilepath, "wb") as f:
if is_p3k():
f.write(bytes("[Main]\n", 'UTF-8'))
f.write(bytes("hostname: {0}\n".format(HOST_NAME), 'UTF-8'))
else:
f.write("[Main]\n")
f.write("hostname: {0}\n".format(HOST_NAME))
# Mock get_config_path to return this fake agent config
mock_config_path.return_value = tmpfilepath
initialize()
self.assertEqual(api._host_name, HOST_NAME, api._host_name)
def test_request_parameters(self):
"""
API parameters are set with `initialize` method.
"""
# Test API, application keys, API host, and some HTTP client options
initialize(api_key=API_KEY, app_key=APP_KEY, api_host=API_HOST)
# Make a simple API call
MyCreatable.create()
_, options = self.request_mock.call_args()
# Assert `requests` parameters
self.assertIn('params', options)
self.assertIn('api_key', options['params'])
self.assertEqual(options['params']['api_key'], API_KEY)
self.assertIn('application_key', options['params'])
self.assertEqual(options['params']['application_key'], APP_KEY)
self.assertIn('headers', options)
self.assertEqual(options['headers'], {'Content-Type': 'application/json'})
def test_initialize_options(self):
"""
HTTP client and API options are set with `initialize` method.
"""
initialize(api_key=API_KEY, app_key=APP_KEY, api_host=API_HOST,
proxies=FAKE_PROXY, cacert=False)
# Make a simple API call
MyCreatable.create()
_, options = self.request_mock.call_args()
# Assert `requests` parameters
self.assertIn('proxies', options)
self.assertEqual(options['proxies'], FAKE_PROXY)
self.assertIn('verify', options)
self.assertEqual(options['verify'], False)
# Arm the `requests` to raise
self.arm_requests_to_raise()
# No exception should be raised (mute=True by default)
MyCreatable.create()
# Repeat with mute to False
initialize(api_key=API_KEY, mute=False)
self.assertRaises(ApiError, MyCreatable.create)
def test_return_raw_response(self):
# Test default initialization sets return_raw_response to False
initialize()
assert not api._return_raw_response
# Assert that we can set this to True
initialize(return_raw_response=True)
assert api._return_raw_response
# Assert we get multiple fields back when set to True
initialize(api_key="<KEY>", app_key="123456", return_raw_response=True)
data, raw = api.Monitor.get_all()
def test_default_values(self):
with EnvVars(ignore=[
"DATADOG_API_KEY",
"DATADOG_APP_KEY",
"DD_API_KEY",
"DD_APP_KEY"
]):
initialize()
self.assertIsNone(api._api_key)
self.assertIsNone(api._application_key)
self.assertEqual(api._api_host, "https://api.datadoghq.com")
self.assertEqual(api._host_name, util.hostname.get_hostname())
def test_env_var_values(self):
with EnvVars(
env_vars={
"DATADOG_API_KEY": "API_KEY_ENV",
"DATADOG_APP_KEY": "APP_KEY_ENV",
"DATADOG_HOST": "HOST_ENV",
}
):
initialize()
self.assertEqual(api._api_key, "API_KEY_ENV")
self.assertEqual(api._application_key, "APP_KEY_ENV")
self.assertEqual(api._api_host, "HOST_ENV")
self.assertEqual(api._host_name, util.hostname.get_hostname())
del os.environ["DATADOG_API_KEY"]
del os.environ["DATADOG_APP_KEY"]
del os.environ["DATADOG_HOST"]
with EnvVars(env_vars={
"DD_API_KEY": "API_KEY_ENV_DD",
"DD_APP_KEY": "APP_KEY_ENV_DD",
}):
api._api_key = None
api._application_key = None
initialize()
self.assertEqual(api._api_key, "API_KEY_ENV_DD")
self.assertEqual(api._application_key, "APP_KEY_ENV_DD")
def test_function_param_value(self):
initialize(api_key="API_KEY", app_key="APP_KEY", api_host="HOST", host_name="HOSTNAME")
self.assertEqual(api._api_key, "API_KEY")
self.assertEqual(api._application_key, "APP_KEY")
self.assertEqual(api._api_host, "HOST")
self.assertEqual(api._host_name, "HOSTNAME")
def test_precedence(self):
# Initialize first with env vars
with EnvVars(env_vars={
"DD_API_KEY": "API_KEY_ENV_DD",
"DD_APP_KEY": "APP_KEY_ENV_DD",
}):
os.environ["DATADOG_API_KEY"] = "API_KEY_ENV"
os.environ["DATADOG_APP_KEY"] = "APP_KEY_ENV"
os.environ["DATADOG_HOST"] = "HOST_ENV"
initialize()
self.assertEqual(api._api_key, "API_KEY_ENV")
self.assertEqual(api._application_key, "APP_KEY_ENV")
self.assertEqual(api._api_host, "HOST_ENV")
self.assertEqual(api._host_name, util.hostname.get_hostname())
# Initialize again to check given parameters take precedence over already set value and env vars
initialize(api_key="API_KEY", app_key="APP_KEY", api_host="HOST", host_name="HOSTNAME")
self.assertEqual(api._api_key, "API_KEY")
self.assertEqual(api._application_key, "APP_KEY")
self.assertEqual(api._api_host, "HOST")
self.assertEqual(api._host_name, "HOSTNAME")
# Initialize again without specifying attributes to check that already initialized value takes precedence
initialize()
self.assertEqual(api._api_key, "API_KEY")
self.assertEqual(api._application_key, "APP_KEY")
self.assertEqual(api._api_host, "HOST")
self.assertEqual(api._host_name, "HOSTNAME")
del os.environ["DATADOG_API_KEY"]
del os.environ["DATADOG_APP_KEY"]
del os.environ["DATADOG_HOST"]
class TestResources(DatadogAPIWithInitialization):
def test_creatable(self):
"""
Creatable resource logic.
"""
MyCreatable.create(mydata="val")
self.request_called_with('POST', API_HOST + "/api/v1/creatables", data={'mydata': "val"})
MyCreatable.create(mydata="val", attach_host_name=True)
self.request_called_with('POST', API_HOST + "/api/v1/creatables",
data={'mydata': "val", 'host': api._host_name})
def test_getable(self):
"""
Getable resource logic.
"""
getable_object_id = 123
MyGetable.get(getable_object_id, otherparam="val")
self.request_called_with('GET', API_HOST + "/api/v1/getables/" + str(getable_object_id),
params={'otherparam': "val"})
_, kwargs = self.request_mock.call_args()
self.assertIsNone(kwargs["data"])
def test_listable(self):
"""
Listable resource logic.
"""
MyListable.get_all(otherparam="val")
self.request_called_with('GET', API_HOST + "/api/v1/listables", params={'otherparam': "val"})
_, kwargs = self.request_mock.call_args()
self.assertIsNone(kwargs["data"])
def test_updatable(self):
"""
Updatable resource logic.
"""
updatable_object_id = 123
MyUpdatable.update(updatable_object_id, params={'myparam': "val1"}, mydata="val2")
self.request_called_with('PUT', API_HOST + "/api/v1/updatables/" + str(updatable_object_id),
params={'myparam': "val1"}, data={'mydata': "val2"})
def test_detalable(self):
"""
Deletable resource logic.
"""
deletable_object_id = 123
MyDeletable.delete(deletable_object_id, otherparam="val")
self.request_called_with('DELETE', API_HOST + "/api/v1/deletables/" + str(deletable_object_id),
params={'otherparam': "val"})
def test_listable_sub_resources(self):
"""
Listable sub-resources logic.
"""
resource_id = 123
MyListableSubResource.get_items(resource_id, otherparam="val")
self.request_called_with(
'GET',
API_HOST + '/api/v1/resource_name/{0}/sub_resource_name'.format(resource_id),
params={'otherparam': "val"}
)
_, kwargs = self.request_mock.call_args()
self.assertIsNone(kwargs["data"])
def test_addable_sub_resources(self):
"""
Addable sub-resources logic.
"""
resource_id = 123
MyAddableSubResource.add_items(resource_id, params={'myparam': 'val1'}, mydata='val2')
self.request_called_with(
'POST',
API_HOST + '/api/v1/resource_name/{0}/sub_resource_name'.format(resource_id),
params={'myparam': 'val1'},
data={'mydata': 'val2'}
)
def test_updatable_sub_resources(self):
"""
Updatable sub-resources logic.
"""
resource_id = 123
MyUpdatableSubResource.update_items(resource_id, params={'myparam': 'val1'}, mydata='val2')
self.request_called_with(
'PUT',
API_HOST + '/api/v1/resource_name/{0}/sub_resource_name'.format(resource_id),
params={'myparam': 'val1'},
data={'mydata': 'val2'}
)
def test_deletable_sub_resources(self):
"""
Deletable sub-resources logic.
"""
resource_id = 123
MyDeletableSubResource.delete_items(resource_id, params={'myparam': 'val1'}, mydata='val2')
self.request_called_with(
'DELETE',
API_HOST + '/api/v1/resource_name/{0}/sub_resource_name'.format(resource_id),
params={'myparam': 'val1'},
data={'mydata': 'val2'}
)
def test_actionable(self):
"""
Actionable resource logic.
"""
actionable_object_id = 123
MyActionable.trigger_class_action(
'POST',
'actionname',
id=actionable_object_id,
params={'myparam': 'val1'},
mydata='val',
mydata2='val2'
)
self.request_called_with(
'POST',
API_HOST + '/api/v1/actionables/{0}/actionname'.format(str(actionable_object_id)),
params={'myparam': 'val1'},
data={'mydata': 'val', 'mydata2': 'val2'}
)
MyActionable.trigger_class_action(
'POST',
'actionname',
id=actionable_object_id,
mydata='val',
mydata2='val2'
)
self.request_called_with(
'POST',
API_HOST +'/api/v1/actionables/{0}/actionname'.format(str(actionable_object_id)),
params={},
data={'mydata': 'val', 'mydata2': 'val2'}
)
MyActionable.trigger_class_action(
'GET',
'actionname',
id=actionable_object_id,
params={'param1': 'val1', 'param2': 'val2'}
)
self.request_called_with(
'GET',
API_HOST + '/api/v1/actionables/{0}/actionname'.format(str(actionable_object_id)),
params={'param1': 'val1', 'param2': 'val2'}
)
_, kwargs = self.request_mock.call_args()
self.assertIsNone(kwargs["data"])
MyActionable.trigger_action(
'POST',
'actionname',
id=actionable_object_id,
mydata="val"
)
self.request_called_with(
'POST',
API_HOST + '/api/v1/actionname/{0}'.format(actionable_object_id),
data={'mydata': "val"}
)
MyActionable.trigger_action(
'GET',
'actionname',
id=actionable_object_id,
)
self.request_called_with(
'GET',
API_HOST + '/api/v1/actionname/{0}'.format(actionable_object_id)
)
_, kwargs = self.request_mock.call_args()
self.assertIsNone(kwargs["data"])
class TestMetricResource(DatadogAPIWithInitialization):
def submit_and_assess_metric_payload(self, serie, attach_host_name=True):
"""
Helper to assess the metric payload format.
"""
now = time()
if isinstance(serie, dict):
Metric.send(attach_host_name=attach_host_name, **deepcopy(serie))
serie = [serie]
else:
Metric.send(deepcopy(serie), attach_host_name=attach_host_name)
payload = self.get_request_data()
for i, metric in enumerate(payload['series']):
if attach_host_name:
self.assertEqual(set(metric.keys()), set(['metric', 'points', 'host']))
self.assertEqual(metric['host'], api._host_name)
else:
self.assertEqual(set(metric.keys()), set(['metric', 'points']))
self.assertEqual(metric['metric'], serie[i]['metric'])
# points is a list of 1 point
self.assertTrue(isinstance(metric['points'], list))
self.assertEqual(len(metric['points']), 1)
# it consists of a [time, value] pair
self.assertEqual(len(metric['points'][0]), 2)
# its value == value we sent
self.assertEqual(metric['points'][0][1], float(serie[i]['points']))
# it's time not so far from current time
assert now - 1 < metric['points'][0][0] < now + 1
def submit_and_assess_dist_payload(self, serie, attach_host_name=True):
"""
Helper to assess the metric payload format.
"""
now = time()
if isinstance(serie, dict):
Distribution.send(attach_host_name=attach_host_name, **deepcopy(serie))
serie = [serie]
else:
Distribution.send(deepcopy(serie), attach_host_name=attach_host_name)
payload = self.get_request_data()
for i, metric in enumerate(payload['series']):
if attach_host_name:
self.assertEqual(set(metric.keys()), set(['metric', 'points', 'host']))
self.assertEqual(metric['host'], api._host_name)
else:
self.assertEqual(set(metric.keys()), set(['metric', 'points']))
self.assertEqual(metric['metric'], serie[i]['metric'])
# points is a list of 1 point
self.assertTrue(isinstance(metric['points'], list))
self.assertEqual(len(metric['points']), 1)
# it consists of a [time, value] pair
self.assertEqual(len(metric['points'][0]), 2)
# its value == value we sent
self.assertEqual(metric['points'][0][1], serie[i]['points'][0][1])
# it's time not so far from current time
assert now - 1 < metric['points'][0][0] < now + 1
def test_metric_submit_query_switch(self):
"""
Endpoints are different for submission and queries.
"""
Metric.send(points=(123, 456))
self.request_called_with('POST', API_HOST + "/api/v1/series",
data={'series': [{'points': [[123, 456.0]], 'host': api._host_name}]})
Metric.query(start="val1", end="val2")
self.request_called_with('GET', API_HOST + "/api/v1/query",
params={'from': "val1", 'to': "val2"})
def test_points_submission(self):
"""
Assess the data payload format, when submitting a single or multiple points.
"""
# Single point
serie = dict(metric='metric.1', points=13)
self.submit_and_assess_metric_payload(serie)
# Multiple point
serie = [dict(metric='metric.1', points=13),
dict(metric='metric.2', points=19)]
self.submit_and_assess_metric_payload(serie)
# Single point no hostname
serie = dict(metric='metric.1', points=13)
self.submit_and_assess_metric_payload(serie, attach_host_name=False)
# Multiple point no hostname
serie = [dict(metric='metric.1', points=13),
dict(metric='metric.2', points=19)]
self.submit_and_assess_metric_payload(serie, attach_host_name=False)
def test_dist_points_submission(self):
"""
Assess the distribution data payload format, when submitting a single or multiple points.
"""
# Single point
serie = dict(metric='metric.1', points=[[time(), [13]]])
self.submit_and_assess_dist_payload(serie)
# Multiple point
serie = [dict(metric='metric.1', points=[[time(), [13]]]),
dict(metric='metric.2', points=[[time(), [19]]])]
self.submit_and_assess_dist_payload(serie)
# Single point no hostname
serie = dict(metric='metric.1', points=[[time(), [13]]])
self.submit_and_assess_dist_payload(serie, attach_host_name=False)
# Multiple point no hostname
serie = [dict(metric='metric.1', points=[[time(), [13]]]),
dict(metric='metric.2', points=[[time(), [19]]])]
self.submit_and_assess_dist_payload(serie, attach_host_name=False)
def test_data_type_support(self):
"""
`Metric` API supports `real` numerical data types.
"""
from decimal import Decimal
from fractions import Fraction
m_long = int(1) # long in Python 3.x
if not is_p3k():
m_long = long(1)
supported_data_types = [1, 1.0, m_long, Decimal(1), Fraction(1, 2)]
for point in supported_data_types:
serie = dict(metric='metric.numerical', points=point)
self.submit_and_assess_metric_payload(serie)
class TestServiceCheckResource(DatadogAPIWithInitialization):
def test_service_check_supports_none_parameters(self):
"""
ServiceCheck should support none parameters
```
$ dog service_check check check_pg host0 1
```
resulted in `RuntimeError: dictionary changed size during iteration`
"""
ServiceCheck.check(
check='check_pg', host_name='host0', status=1, message=None,
timestamp=None, tags=None)
| pt | 0.196853 | 1.979633 | 2 |
keras_train.py | jmeisele/mlflow_demo | 0 | 13610 | <gh_stars>0
"""
Author: <NAME>
Email: <EMAIL>
Date: August 1, 2020
"""
import argparse
import keras
import tensorflow as tf
import cloudpickle
parser = argparse.ArgumentParser(
description='Train a Keras feed-forward network for MNIST classification')
parser.add_argument('--batch-size', '-b', type=int, default=128)
parser.add_argument('--epochs', '-e', type=int, default=1)
parser.add_argument('--learning_rate', '-l', type=float, default=0.05)
parser.add_argument('--num-hidden-units', '-n', type=float, default=512)
parser.add_argument('--dropout', '-d', type=float, default=0.25)
parser.add_argument('--momentum', '-m', type=float, default=0.85)
args = parser.parse_args()
mnist = keras.datasets.mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train, X_test = X_train / 255.0, X_test / 255.0
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=X_train[0].shape),
keras.layers.Dense(args.num_hidden_units, activation=tf.nn.relu),
keras.layers.Dropout(args.dropout),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
optimizer = keras.optimizers.SGD(lr=args.learning_rate, momentum=args.momentum)
model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(X_train, y_train, epochs=args.epochs, batch_size=args.batch_size)
test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2)
| """
Author: <NAME>
Email: <EMAIL>
Date: August 1, 2020
"""
import argparse
import keras
import tensorflow as tf
import cloudpickle
parser = argparse.ArgumentParser(
description='Train a Keras feed-forward network for MNIST classification')
parser.add_argument('--batch-size', '-b', type=int, default=128)
parser.add_argument('--epochs', '-e', type=int, default=1)
parser.add_argument('--learning_rate', '-l', type=float, default=0.05)
parser.add_argument('--num-hidden-units', '-n', type=float, default=512)
parser.add_argument('--dropout', '-d', type=float, default=0.25)
parser.add_argument('--momentum', '-m', type=float, default=0.85)
args = parser.parse_args()
mnist = keras.datasets.mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train, X_test = X_train / 255.0, X_test / 255.0
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=X_train[0].shape),
keras.layers.Dense(args.num_hidden_units, activation=tf.nn.relu),
keras.layers.Dropout(args.dropout),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
optimizer = keras.optimizers.SGD(lr=args.learning_rate, momentum=args.momentum)
model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(X_train, y_train, epochs=args.epochs, batch_size=args.batch_size)
test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2) | en | 0.339593 | 2.710444 | 3 |
4USCityEmotion/get_flickr_photos.py | HCH2CHO/EmotionMap | 3 | 13611 | <filename>4USCityEmotion/get_flickr_photos.py<gh_stars>1-10
# coding:utf-8
# version:python3.5.1
# author:kyh
import flickrapi
import datetime
import psycopg2
import time
# flickr照片类
class flickr_photo(object):
def __init__(self, photo_id, photo_city, photo_url):
self.id = photo_id
self.city = photo_city
self.url = photo_url
# 将照片插入数据库
def insert_db(self, db_connection, db_cursor):
try:
sql_command_insert = "INSERT INTO photo(id,url,city) VALUES({0},'{1}','{2}')".format(self.id,
self.url,
self.city
)
db_cursor.execute(sql_command_insert)
db_connection.commit()
return True
except Exception as e:
with open('log.txt','a') as log:
log.writelines(str(e))
db_connection.rollback()
return False
# 连接数据库
def db_connect():
try:
connection = psycopg2.connect(database="PlaceEmotion", user="postgres",
password="<PASSWORD>", host="127.0.0.1", port="5432")
cursor = connection.cursor()
print("Database Connection has been opened completely!")
return connection, cursor
except Exception as e:
with open('log.txt','a') as log:
log.writelines(str(e))
# 查询需要挖掘数据的地点
def query_location(db_connection, db_cursor):
sql_command_select = "SELECT id, city_name, lat, lon FROM location WHERE start_query='FALSE' LIMIT 1"
db_cursor.execute(sql_command_select)
db_connection.commit()
location = db_cursor.fetchone()
# 如果存在这样的地点,记录经纬度进行挖掘
if location is not None:
location_id = location[0]
city = location[1]
lat = location[2]
lon = location[3]
sql_command_update = "UPDATE location SET start_query='TRUE' WHERE id ='{0}'".format(location_id)
db_cursor.execute(sql_command_update)
db_connection.commit()
return city, lat, lon
# 不存在这样的地点,说明已经全部挖掘完毕
else:
return None, None, None
# flickr api信息
def query_api(db_connection, db_cursor):
sql_command_select = "SELECT key, secret FROM API WHERE type = 'flickr' AND start_use = FALSE LIMIT 1"
db_cursor.execute(sql_command_select)
db_connection.commit()
api = db_cursor.fetchone()
# 如果存在这样的API,记录API进行挖掘
if api is not None:
key = api[0]
secret = api[1]
sql_command_update = "UPDATE API SET start_use='TRUE' WHERE key='{0}'".format(key)
db_cursor.execute(sql_command_update)
db_connection.commit()
api_key = u'{0}'.format(key)
api_secret = u'{0}'.format(secret)
flickr = flickrapi.FlickrAPI(api_key, api_secret, cache=True)
print("API:", api_key, api_secret)
return flickr, key
# 不存在这样的API,说明已经全部挖掘完毕
else:
return None, None
# 计算时间
def compute_time(db_connection, db_cursor, location, latitude, longitude, flickr_api):
DATE=datetime.date(2012,1,1)
while(True):
DATE2=DATE+datetime.timedelta(days=10)
datemin ="{0}-{1}-{2}".format(DATE.year,DATE.month,DATE.day)
datemax ="{0}-{1}-{2}".format(DATE2.year,DATE2.month,DATE2.day)
DATE=DATE+datetime.timedelta(days=10)
#print(datemin,datemax)
get_photo_from_location(db_connection, db_cursor, location, latitude, longitude, datemin, datemax, flickr_api)
if DATE.year==2018 and DATE.month==11:
break
# 获取照片
def get_photo_from_location(db_connection, db_cursor, location, latitude, longitude, datemin, datemax, flickr):
# 获取所有图片
try:
time.sleep(2)
#latitude = 48.8584
#longitude = 2.2945
photos = flickr.walk(lat=latitude, lon=longitude, radius=1,
min_taken_date=datemin, max_taken_date=datemax, per_page=500, extras='url_c')
except Exception as e:
with open('log.txt','a') as log:
log.writelines(str(e))
# 获取每一张图片
try:
for photo_url in photos:
url = photo_url.get('url_c')
print(url)
# 如果url不为空,将该图片插入数据库
if url is not None:
photo_id = int(photo_url.get('id'))
photo = flickr_photo(photo_id, location, url)
if photo.insert_db(db_connection, db_cursor):
print("Success! Photo id:" + str(photo_id) + "\tPhoto url:" + url)
except Exception as e:
with open('log.txt','a') as log:
log.writelines(str(e))
def release_api(db_connection, db_cursor, api_key):
try:
sql_command_update = "UPDATE API SET start_use = FALSE WHERE key = '{0}'".format(api_key)
db_cursor.execute(sql_command_update)
db_connection.commit()
except Exception as e:
db_connection.rollback()
# 关闭数据库
def close_connection(connection):
try:
connection.close()
print("Database Connection has been closed completely!")
return True
except Exception as e:
with open('log.txt','a') as log:
log.writelines(str(e))
# 主操作步骤
if __name__ == '__main__':
db_connection, db_cursor = db_connect()
flickr, api_key = query_api(db_connection, db_cursor)
location, lat, lon= query_location(db_connection, db_cursor)
while location is not None:
compute_time(db_connection, db_cursor, location, lat, lon, flickr)
location, lat, lon= query_location(db_connection, db_cursor)
print("All locations have been recorded!")
release_api(db_connection, db_cursor, api_key)
close_connection(db_connection)
| <filename>4USCityEmotion/get_flickr_photos.py<gh_stars>1-10
# coding:utf-8
# version:python3.5.1
# author:kyh
import flickrapi
import datetime
import psycopg2
import time
# flickr照片类
class flickr_photo(object):
def __init__(self, photo_id, photo_city, photo_url):
self.id = photo_id
self.city = photo_city
self.url = photo_url
# 将照片插入数据库
def insert_db(self, db_connection, db_cursor):
try:
sql_command_insert = "INSERT INTO photo(id,url,city) VALUES({0},'{1}','{2}')".format(self.id,
self.url,
self.city
)
db_cursor.execute(sql_command_insert)
db_connection.commit()
return True
except Exception as e:
with open('log.txt','a') as log:
log.writelines(str(e))
db_connection.rollback()
return False
# 连接数据库
def db_connect():
try:
connection = psycopg2.connect(database="PlaceEmotion", user="postgres",
password="<PASSWORD>", host="127.0.0.1", port="5432")
cursor = connection.cursor()
print("Database Connection has been opened completely!")
return connection, cursor
except Exception as e:
with open('log.txt','a') as log:
log.writelines(str(e))
# 查询需要挖掘数据的地点
def query_location(db_connection, db_cursor):
sql_command_select = "SELECT id, city_name, lat, lon FROM location WHERE start_query='FALSE' LIMIT 1"
db_cursor.execute(sql_command_select)
db_connection.commit()
location = db_cursor.fetchone()
# 如果存在这样的地点,记录经纬度进行挖掘
if location is not None:
location_id = location[0]
city = location[1]
lat = location[2]
lon = location[3]
sql_command_update = "UPDATE location SET start_query='TRUE' WHERE id ='{0}'".format(location_id)
db_cursor.execute(sql_command_update)
db_connection.commit()
return city, lat, lon
# 不存在这样的地点,说明已经全部挖掘完毕
else:
return None, None, None
# flickr api信息
def query_api(db_connection, db_cursor):
sql_command_select = "SELECT key, secret FROM API WHERE type = 'flickr' AND start_use = FALSE LIMIT 1"
db_cursor.execute(sql_command_select)
db_connection.commit()
api = db_cursor.fetchone()
# 如果存在这样的API,记录API进行挖掘
if api is not None:
key = api[0]
secret = api[1]
sql_command_update = "UPDATE API SET start_use='TRUE' WHERE key='{0}'".format(key)
db_cursor.execute(sql_command_update)
db_connection.commit()
api_key = u'{0}'.format(key)
api_secret = u'{0}'.format(secret)
flickr = flickrapi.FlickrAPI(api_key, api_secret, cache=True)
print("API:", api_key, api_secret)
return flickr, key
# 不存在这样的API,说明已经全部挖掘完毕
else:
return None, None
# 计算时间
def compute_time(db_connection, db_cursor, location, latitude, longitude, flickr_api):
DATE=datetime.date(2012,1,1)
while(True):
DATE2=DATE+datetime.timedelta(days=10)
datemin ="{0}-{1}-{2}".format(DATE.year,DATE.month,DATE.day)
datemax ="{0}-{1}-{2}".format(DATE2.year,DATE2.month,DATE2.day)
DATE=DATE+datetime.timedelta(days=10)
#print(datemin,datemax)
get_photo_from_location(db_connection, db_cursor, location, latitude, longitude, datemin, datemax, flickr_api)
if DATE.year==2018 and DATE.month==11:
break
# 获取照片
def get_photo_from_location(db_connection, db_cursor, location, latitude, longitude, datemin, datemax, flickr):
# 获取所有图片
try:
time.sleep(2)
#latitude = 48.8584
#longitude = 2.2945
photos = flickr.walk(lat=latitude, lon=longitude, radius=1,
min_taken_date=datemin, max_taken_date=datemax, per_page=500, extras='url_c')
except Exception as e:
with open('log.txt','a') as log:
log.writelines(str(e))
# 获取每一张图片
try:
for photo_url in photos:
url = photo_url.get('url_c')
print(url)
# 如果url不为空,将该图片插入数据库
if url is not None:
photo_id = int(photo_url.get('id'))
photo = flickr_photo(photo_id, location, url)
if photo.insert_db(db_connection, db_cursor):
print("Success! Photo id:" + str(photo_id) + "\tPhoto url:" + url)
except Exception as e:
with open('log.txt','a') as log:
log.writelines(str(e))
def release_api(db_connection, db_cursor, api_key):
try:
sql_command_update = "UPDATE API SET start_use = FALSE WHERE key = '{0}'".format(api_key)
db_cursor.execute(sql_command_update)
db_connection.commit()
except Exception as e:
db_connection.rollback()
# 关闭数据库
def close_connection(connection):
try:
connection.close()
print("Database Connection has been closed completely!")
return True
except Exception as e:
with open('log.txt','a') as log:
log.writelines(str(e))
# 主操作步骤
if __name__ == '__main__':
db_connection, db_cursor = db_connect()
flickr, api_key = query_api(db_connection, db_cursor)
location, lat, lon= query_location(db_connection, db_cursor)
while location is not None:
compute_time(db_connection, db_cursor, location, lat, lon, flickr)
location, lat, lon= query_location(db_connection, db_cursor)
print("All locations have been recorded!")
release_api(db_connection, db_cursor, api_key)
close_connection(db_connection)
| zh | 0.991937 | 2.829997 | 3 |
Lianjia/LianjiaErShouFang.py | Detailscool/YHSpider | 1 | 13612 | <reponame>Detailscool/YHSpider
# -*- coding:utf-8 -*-
import requests
from bs4 import BeautifulSoup
import sys
import csv
reload(sys)
sys.setdefaultencoding('utf-8')
def not_empty(str):
return str and str.strip()
if __name__ == '__main__':
url_main = 'http://gz.lianjia.com'
f = open(u'广州二手房.csv', 'wb')
f.write(unicode('\xEF\xBB\xBF', 'utf-8')) # 文件头
writer = csv.writer(f)
writer.writerow(['区域', '小区名称', '户型', '面积', '价格(万)', '单价(元/平米)',
'性质', '朝向', '装修', '是否有电梯', '楼层', '建筑年代', '楼型'])
res = requests.get(url_main+'ershoufang')
res = res.text.encode(res.encoding).decode('utf-8')
soup = BeautifulSoup(res, 'html.parser')
# print soup.prettify()
districts = soup.find(name='div', attrs={'data-role':'ershoufang'}) # <div data-role="ershoufang">
# soup.select()
for district in districts.find_all(name='a'):
print district['title']
district_name = district.text # '东城', '西城', '朝阳', '海淀'......
url = '%s%s' % (url_main, district['href'])
# print url
res = requests.get(url)
res = res.text.encode(res.encoding).decode('utf-8')
soup = BeautifulSoup(res,'html.parser')
# print soup.prettify()
page = soup.find('div', {'class':'page-box house-lst-page-box'})
if not page: # 平谷区没有房源,直接返回
continue
total_pages = dict(eval(page['page-data']))['totalPage'] # 总页数
# print total_pages
for j in range(1, total_pages+1):
url_page = '%spg%d/' % (url, j)
res = requests.get(url_page)
res = res.text.encode(res.encoding).decode('utf-8')
soup = BeautifulSoup(res, 'html.parser')
# print soup.prettify()
sells = soup.find(name='ul', attrs={'class':'sellListContent', 'log-mod':'list'})
if not sells:
continue
# <a class="title" data-bl="list" data-el="ershoufang" data-log_index="1" href="XX" target="_blank">
titles = soup.find_all(name='a', attrs={'class':'title', 'data-bl':'list', 'data-el':'ershoufang'})
# <a data-el="region" data-log_index="1" href="X" target="_blank">
regions = sells.find_all(name='a', attrs={'data-el':'region'})
infos = sells.find_all(name='div', class_='houseInfo') # <div class="houseInfo">
infos2 = sells.find_all(name='div', class_='positionInfo') # <div class="positionInfo">
prices = sells.find_all(name='div', class_='totalPrice') # <div class="totalPrice">
unit_prices = sells.find_all(name='div', class_='unitPrice') # <div class="unitPrice" data-hid="X" data-price="X" data-rid="X">
subways = sells.find_all(name='span', class_='subway') # <span class="subway">
taxs = sells.find_all(name='span', class_='taxfree') # <span class="taxfree">
N = max(len(titles), len(regions), len(prices), len(unit_prices), len(subways), len(taxs), len(infos), len(infos2))
# for title, region, price, unit_price, subway, tax, info, info2 in zip(titles, regions, prices, unit_prices, subways, taxs, infos, infos2):
for i in range(N):
room_type = area = orientation = decoration = elevator = floor = year = slab_tower = None
title = titles[i] if len(titles) > i else None
region = regions[i] if len(regions) > i else None
price = prices[i] if len(prices) > i else None
unit_price = unit_prices[i] if len(unit_prices) > i else None
subway = subways[i] if len(subways) > i else None
tax = taxs[i] if len(taxs) > i else None
info = infos[i] if len(infos) > i else None
info2 = infos2[i] if len(infos2) > i else None
if title:
print 'Title: ', title.text
if region:
region = region.text
if price:
price = price.text
price = price[:price.find('万')]
if unit_price:
unit_price = unit_price.span.text.strip()
unit_price = unit_price[:unit_price.find('元/平米')]
if unit_price.find('单价') != -1:
unit_price = unit_price[2:]
if subway:
subway = subway.text.strip()
if tax:
tax = tax.text.strip()
if info:
info = info.text.split('|')
room_type = info[1].strip() # 几室几厅
area = info[2].strip() # 房屋面积
area = area[:area.find('平米')]
orientation = info[3].strip().replace(' ', '') # 朝向
decoration = '-'
if len(info) > 4: # 如果是车位,则该项为空
decoration = info[4].strip() # 装修类型:简装、中装、精装、豪装、其他
elevator = '无'
if len(info) > 5:
elevator = info[5].strip() # 是否有电梯:有、无
if info2:
info2 = filter(not_empty, info2.text.split(' '))
floor = info2[0].strip()
info2 = info2[1]
year = info2[:info2.find('年')]
slab_tower = info2[info2.find('建')+1:]
print district_name, region, room_type, area, price, unit_price, tax, orientation, decoration, elevator, floor, year, slab_tower
writer.writerow([district_name, region, room_type, area, price, unit_price, tax, orientation, decoration, elevator, floor, year, slab_tower])
# break
# break
# break
f.close()
| # -*- coding:utf-8 -*-
import requests
from bs4 import BeautifulSoup
import sys
import csv
reload(sys)
sys.setdefaultencoding('utf-8')
def not_empty(str):
return str and str.strip()
if __name__ == '__main__':
url_main = 'http://gz.lianjia.com'
f = open(u'广州二手房.csv', 'wb')
f.write(unicode('\xEF\xBB\xBF', 'utf-8')) # 文件头
writer = csv.writer(f)
writer.writerow(['区域', '小区名称', '户型', '面积', '价格(万)', '单价(元/平米)',
'性质', '朝向', '装修', '是否有电梯', '楼层', '建筑年代', '楼型'])
res = requests.get(url_main+'ershoufang')
res = res.text.encode(res.encoding).decode('utf-8')
soup = BeautifulSoup(res, 'html.parser')
# print soup.prettify()
districts = soup.find(name='div', attrs={'data-role':'ershoufang'}) # <div data-role="ershoufang">
# soup.select()
for district in districts.find_all(name='a'):
print district['title']
district_name = district.text # '东城', '西城', '朝阳', '海淀'......
url = '%s%s' % (url_main, district['href'])
# print url
res = requests.get(url)
res = res.text.encode(res.encoding).decode('utf-8')
soup = BeautifulSoup(res,'html.parser')
# print soup.prettify()
page = soup.find('div', {'class':'page-box house-lst-page-box'})
if not page: # 平谷区没有房源,直接返回
continue
total_pages = dict(eval(page['page-data']))['totalPage'] # 总页数
# print total_pages
for j in range(1, total_pages+1):
url_page = '%spg%d/' % (url, j)
res = requests.get(url_page)
res = res.text.encode(res.encoding).decode('utf-8')
soup = BeautifulSoup(res, 'html.parser')
# print soup.prettify()
sells = soup.find(name='ul', attrs={'class':'sellListContent', 'log-mod':'list'})
if not sells:
continue
# <a class="title" data-bl="list" data-el="ershoufang" data-log_index="1" href="XX" target="_blank">
titles = soup.find_all(name='a', attrs={'class':'title', 'data-bl':'list', 'data-el':'ershoufang'})
# <a data-el="region" data-log_index="1" href="X" target="_blank">
regions = sells.find_all(name='a', attrs={'data-el':'region'})
infos = sells.find_all(name='div', class_='houseInfo') # <div class="houseInfo">
infos2 = sells.find_all(name='div', class_='positionInfo') # <div class="positionInfo">
prices = sells.find_all(name='div', class_='totalPrice') # <div class="totalPrice">
unit_prices = sells.find_all(name='div', class_='unitPrice') # <div class="unitPrice" data-hid="X" data-price="X" data-rid="X">
subways = sells.find_all(name='span', class_='subway') # <span class="subway">
taxs = sells.find_all(name='span', class_='taxfree') # <span class="taxfree">
N = max(len(titles), len(regions), len(prices), len(unit_prices), len(subways), len(taxs), len(infos), len(infos2))
# for title, region, price, unit_price, subway, tax, info, info2 in zip(titles, regions, prices, unit_prices, subways, taxs, infos, infos2):
for i in range(N):
room_type = area = orientation = decoration = elevator = floor = year = slab_tower = None
title = titles[i] if len(titles) > i else None
region = regions[i] if len(regions) > i else None
price = prices[i] if len(prices) > i else None
unit_price = unit_prices[i] if len(unit_prices) > i else None
subway = subways[i] if len(subways) > i else None
tax = taxs[i] if len(taxs) > i else None
info = infos[i] if len(infos) > i else None
info2 = infos2[i] if len(infos2) > i else None
if title:
print 'Title: ', title.text
if region:
region = region.text
if price:
price = price.text
price = price[:price.find('万')]
if unit_price:
unit_price = unit_price.span.text.strip()
unit_price = unit_price[:unit_price.find('元/平米')]
if unit_price.find('单价') != -1:
unit_price = unit_price[2:]
if subway:
subway = subway.text.strip()
if tax:
tax = tax.text.strip()
if info:
info = info.text.split('|')
room_type = info[1].strip() # 几室几厅
area = info[2].strip() # 房屋面积
area = area[:area.find('平米')]
orientation = info[3].strip().replace(' ', '') # 朝向
decoration = '-'
if len(info) > 4: # 如果是车位,则该项为空
decoration = info[4].strip() # 装修类型:简装、中装、精装、豪装、其他
elevator = '无'
if len(info) > 5:
elevator = info[5].strip() # 是否有电梯:有、无
if info2:
info2 = filter(not_empty, info2.text.split(' '))
floor = info2[0].strip()
info2 = info2[1]
year = info2[:info2.find('年')]
slab_tower = info2[info2.find('建')+1:]
print district_name, region, room_type, area, price, unit_price, tax, orientation, decoration, elevator, floor, year, slab_tower
writer.writerow([district_name, region, room_type, area, price, unit_price, tax, orientation, decoration, elevator, floor, year, slab_tower])
# break
# break
# break
f.close() | ja | 0.283466 | 2.845413 | 3 |
test/test_main.py | bluesheeptoken/PyGolf | 7 | 13613 | <reponame>bluesheeptoken/PyGolf<gh_stars>1-10
import argparse
import tempfile
import unittest
from pygolf.__main__ import get_arguments_warning, read_input_code, shorten
class TestMain(unittest.TestCase):
def test_reduce(self):
self.assertEqual(shorten("print( 1 + 2 )"), "print(1+2)")
self.assertEqual(shorten("not valid code"), None)
def test_read_input_code(self):
name_space = argparse.Namespace()
name_space.code = None
name_space.clipboard = None
name_space.input_file = None
name_space.code = "print('code')"
self.assertEqual(read_input_code(name_space), "print('code')")
name_space.code = None
with tempfile.NamedTemporaryFile("w+") as fp:
fp.write("print('input_file')")
fp.flush()
name_space.input_file = fp.name
self.assertEqual(read_input_code(name_space), "print('input_file')")
name_space.input_file = None
def test_get_arguments_warning(self):
name_space = argparse.Namespace()
name_space.input_file = None
name_space.output_file = "path"
self.assertEqual(len(list(get_arguments_warning(name_space))), 1)
| import argparse
import tempfile
import unittest
from pygolf.__main__ import get_arguments_warning, read_input_code, shorten
class TestMain(unittest.TestCase):
def test_reduce(self):
self.assertEqual(shorten("print( 1 + 2 )"), "print(1+2)")
self.assertEqual(shorten("not valid code"), None)
def test_read_input_code(self):
name_space = argparse.Namespace()
name_space.code = None
name_space.clipboard = None
name_space.input_file = None
name_space.code = "print('code')"
self.assertEqual(read_input_code(name_space), "print('code')")
name_space.code = None
with tempfile.NamedTemporaryFile("w+") as fp:
fp.write("print('input_file')")
fp.flush()
name_space.input_file = fp.name
self.assertEqual(read_input_code(name_space), "print('input_file')")
name_space.input_file = None
def test_get_arguments_warning(self):
name_space = argparse.Namespace()
name_space.input_file = None
name_space.output_file = "path"
self.assertEqual(len(list(get_arguments_warning(name_space))), 1) | none | 1 | 3.155287 | 3 |
Session_01/koch.py | UP-RS-ESP/GEW-DAP05-2018 | 2 | 13614 | <gh_stars>1-10
import sys
import numpy as np
from matplotlib import pyplot as pl
def koch(x0, y0, rho, phi, order):
global xr, yr
x1, y1 = x0 + rho * np.cos(phi), y0 + rho * np.sin(phi)
if order:
x, y = x0, y0
for angle in [0, np.pi/3, 5*np.pi/3, 0]:
x, y = koch(x, y, rho / 3.0, phi + angle, order - 1)
else:
xr.append(x1)
yr.append(y1)
return (x1, y1)
xr = [1,]
yr = [1,]
koch(xr[0], yr[0], 1, 0, 5)
pl.plot(xr, yr, 'r.-', lw = 0.5)
ax = pl.gca()
ax.set_aspect('equal')
pl.grid()
pl.show()
| import sys
import numpy as np
from matplotlib import pyplot as pl
def koch(x0, y0, rho, phi, order):
global xr, yr
x1, y1 = x0 + rho * np.cos(phi), y0 + rho * np.sin(phi)
if order:
x, y = x0, y0
for angle in [0, np.pi/3, 5*np.pi/3, 0]:
x, y = koch(x, y, rho / 3.0, phi + angle, order - 1)
else:
xr.append(x1)
yr.append(y1)
return (x1, y1)
xr = [1,]
yr = [1,]
koch(xr[0], yr[0], 1, 0, 5)
pl.plot(xr, yr, 'r.-', lw = 0.5)
ax = pl.gca()
ax.set_aspect('equal')
pl.grid()
pl.show() | none | 1 | 2.910056 | 3 |
core/views.py | xuhang57/atmosphere | 0 | 13615 | # -*- coding: utf-8 -*-
"""
Core views to provide custom operations
"""
import uuid
from datetime import datetime
from django.http import HttpResponseRedirect
from threepio import logger
from atmosphere import settings
from django_cyverse_auth.decorators import atmo_login_required
from django_cyverse_auth.models import Token as AuthToken
from core.models import AtmosphereUser as DjangoUser
@atmo_login_required
def emulate_request(request, username=None):
try:
logger.info("Emulate attempt: %s wants to be %s"
% (request.user, username))
logger.info(request.session.__dict__)
if not username and 'emulator' in request.session:
logger.info("Clearing emulation attributes from user")
username = request.session['emulator']
orig_token = request.session['emulator_token']
request.session['username'] = username
request.session['token'] = orig_token
del request.session['emulator']
del request.session['emulator_token']
# Allow user to fall through on line below
return HttpResponseRedirect(settings.REDIRECT_URL + "/api/v1/profile")
try:
user = DjangoUser.objects.get(username=username)
except DjangoUser.DoesNotExist:
logger.info("Emulate attempt failed. User <%s> does not exist"
% username)
return HttpResponseRedirect(
settings.REDIRECT_URL +
"/api/v1/profile")
logger.info("Emulate success, creating tokens for %s" % username)
token = AuthToken(
user=user,
key=str(uuid.uuid4()),
issuedTime=datetime.now(),
remote_ip=request.META['REMOTE_ADDR'],
api_server_url=settings.API_SERVER_URL
)
token.save()
# Keep original emulator+token if it exists, or use the last known username+token
if 'emulator' not in request.session:
original_emulator = request.session['username']
request.session['emulator'] = original_emulator
logger.info("Returning user %s - Emulated as user %s - to api profile "
% (original_emulator, username))
if 'emulator_token' not in request.session:
original_token = request.session['token']
request.session['emulator_token'] = original_token
# # Set the username to the user to be emulated
# # to whom the token also belongs
request.session['username'] = username
request.session['token'] = token.key
request.session.save()
logger.info(request.session.__dict__)
logger.info(request.user)
return HttpResponseRedirect(settings.REDIRECT_URL + "/api/v1/profile")
except Exception as e:
logger.warn("Emulate request failed")
logger.exception(e)
return HttpResponseRedirect(settings.REDIRECT_URL + "/api/v1/profile")
| # -*- coding: utf-8 -*-
"""
Core views to provide custom operations
"""
import uuid
from datetime import datetime
from django.http import HttpResponseRedirect
from threepio import logger
from atmosphere import settings
from django_cyverse_auth.decorators import atmo_login_required
from django_cyverse_auth.models import Token as AuthToken
from core.models import AtmosphereUser as DjangoUser
@atmo_login_required
def emulate_request(request, username=None):
try:
logger.info("Emulate attempt: %s wants to be %s"
% (request.user, username))
logger.info(request.session.__dict__)
if not username and 'emulator' in request.session:
logger.info("Clearing emulation attributes from user")
username = request.session['emulator']
orig_token = request.session['emulator_token']
request.session['username'] = username
request.session['token'] = orig_token
del request.session['emulator']
del request.session['emulator_token']
# Allow user to fall through on line below
return HttpResponseRedirect(settings.REDIRECT_URL + "/api/v1/profile")
try:
user = DjangoUser.objects.get(username=username)
except DjangoUser.DoesNotExist:
logger.info("Emulate attempt failed. User <%s> does not exist"
% username)
return HttpResponseRedirect(
settings.REDIRECT_URL +
"/api/v1/profile")
logger.info("Emulate success, creating tokens for %s" % username)
token = AuthToken(
user=user,
key=str(uuid.uuid4()),
issuedTime=datetime.now(),
remote_ip=request.META['REMOTE_ADDR'],
api_server_url=settings.API_SERVER_URL
)
token.save()
# Keep original emulator+token if it exists, or use the last known username+token
if 'emulator' not in request.session:
original_emulator = request.session['username']
request.session['emulator'] = original_emulator
logger.info("Returning user %s - Emulated as user %s - to api profile "
% (original_emulator, username))
if 'emulator_token' not in request.session:
original_token = request.session['token']
request.session['emulator_token'] = original_token
# # Set the username to the user to be emulated
# # to whom the token also belongs
request.session['username'] = username
request.session['token'] = token.key
request.session.save()
logger.info(request.session.__dict__)
logger.info(request.user)
return HttpResponseRedirect(settings.REDIRECT_URL + "/api/v1/profile")
except Exception as e:
logger.warn("Emulate request failed")
logger.exception(e)
return HttpResponseRedirect(settings.REDIRECT_URL + "/api/v1/profile")
| pt | 0.259096 | 2.104286 | 2 |
indico/util/serializer.py | jgrigera/indico | 1 | 13616 | <gh_stars>1-10
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from enum import Enum
from indico.core.errors import IndicoError
from indico.core.logger import Logger
class Serializer(object):
__public__ = []
def to_serializable(self, attr='__public__', converters=None):
serializable = {}
if converters is None:
converters = {}
for k in getattr(self, attr):
try:
if isinstance(k, tuple):
k, name = k
else:
k, name = k, k
v = getattr(self, k)
if callable(v): # to make it generic, we can get rid of it by properties
v = v()
if isinstance(v, Serializer):
v = v.to_serializable()
elif isinstance(v, list):
v = [e.to_serializable() for e in v]
elif isinstance(v, dict):
v = dict((k, vv.to_serializable() if isinstance(vv, Serializer) else vv)
for k, vv in v.iteritems())
elif isinstance(v, Enum):
v = v.name
if type(v) in converters:
v = converters[type(v)](v)
serializable[name] = v
except Exception:
msg = 'Could not retrieve {}.{}.'.format(self.__class__.__name__, k)
Logger.get('Serializer{}'.format(self.__class__.__name__)).exception(msg)
raise IndicoError(msg)
return serializable
| # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from enum import Enum
from indico.core.errors import IndicoError
from indico.core.logger import Logger
class Serializer(object):
__public__ = []
def to_serializable(self, attr='__public__', converters=None):
serializable = {}
if converters is None:
converters = {}
for k in getattr(self, attr):
try:
if isinstance(k, tuple):
k, name = k
else:
k, name = k, k
v = getattr(self, k)
if callable(v): # to make it generic, we can get rid of it by properties
v = v()
if isinstance(v, Serializer):
v = v.to_serializable()
elif isinstance(v, list):
v = [e.to_serializable() for e in v]
elif isinstance(v, dict):
v = dict((k, vv.to_serializable() if isinstance(vv, Serializer) else vv)
for k, vv in v.iteritems())
elif isinstance(v, Enum):
v = v.name
if type(v) in converters:
v = converters[type(v)](v)
serializable[name] = v
except Exception:
msg = 'Could not retrieve {}.{}.'.format(self.__class__.__name__, k)
Logger.get('Serializer{}'.format(self.__class__.__name__)).exception(msg)
raise IndicoError(msg)
return serializable | pt | 0.139049 | 2.335764 | 2 |
step/lambdas/get_image_status.py | mbeacom/cloudendure-python | 7 | 13617 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Check the state of an AWS AMI."""
from __future__ import annotations
import json
from typing import Any, Dict
import boto3
print("Loading function get_image_status")
ec2_client = boto3.client("ec2")
# {
# "instance_id": "i-identifier",
# "kms_id": "KMS ID",
# "account": "account_number",
# "instance_status": "should be there if in loop"
# "migrated_ami_id": "ami-identifier"
# }
def lambda_handler(event: Dict[str, Any], context: Any) -> str:
"""Handle signaling and entry into the AWS Lambda."""
print("Received event: " + json.dumps(event, indent=2))
migrated_ami_id: str = event["migrated_ami_id"]
ami_state: Dict[str, Any] = ec2_client.describe_images(ImageIds=[migrated_ami_id])
return ami_state["Images"][0]["State"]
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Check the state of an AWS AMI."""
from __future__ import annotations
import json
from typing import Any, Dict
import boto3
print("Loading function get_image_status")
ec2_client = boto3.client("ec2")
# {
# "instance_id": "i-identifier",
# "kms_id": "KMS ID",
# "account": "account_number",
# "instance_status": "should be there if in loop"
# "migrated_ami_id": "ami-identifier"
# }
def lambda_handler(event: Dict[str, Any], context: Any) -> str:
"""Handle signaling and entry into the AWS Lambda."""
print("Received event: " + json.dumps(event, indent=2))
migrated_ami_id: str = event["migrated_ami_id"]
ami_state: Dict[str, Any] = ec2_client.describe_images(ImageIds=[migrated_ami_id])
return ami_state["Images"][0]["State"]
| it | 0.16749 | 2.432961 | 2 |
starfish/types.py | kne42/starfish | 0 | 13618 | # constants
from starfish.core.types import ( # noqa: F401
Axes,
Clip,
Coordinates,
CORE_DEPENDENCIES,
Features,
LOG,
OverlapStrategy,
PHYSICAL_COORDINATE_DIMENSION,
PhysicalCoordinateTypes,
STARFISH_EXTRAS_KEY,
TransformType,
)
from starfish.core.types import CoordinateValue, Number # noqa: F401
| # constants
from starfish.core.types import ( # noqa: F401
Axes,
Clip,
Coordinates,
CORE_DEPENDENCIES,
Features,
LOG,
OverlapStrategy,
PHYSICAL_COORDINATE_DIMENSION,
PhysicalCoordinateTypes,
STARFISH_EXTRAS_KEY,
TransformType,
)
from starfish.core.types import CoordinateValue, Number # noqa: F401
| fr | 0.188961 | 1.041555 | 1 |
python/alertsActor/rules/dangerKey.py | sdss/twistedAlertsActor | 0 | 13619 | <gh_stars>0
#!/usr/bin/env python
# encoding: utf-8
#
# dangerKey.py
#
# Created by <NAME> on 10 April 2019
import re, time
from yaml import YAMLObject
from alertsActor import log
class diskCheck(YAMLObject):
"""evaluate a disk keyword
"""
def __init__(self):
pass
def __call__(self, keyState):
"""The keyval is an enum ('Ok','Warning','Serious','Critical')
and the amount of free space (GB)
"""
keyval = keyState.keyword
if (keyval[0]).upper() == 'OK':
return "ok"
elif (keyval[0]).upper() == 'WARNING':
return "warn"
elif (keyval[0]).upper() == 'SERIOUS':
return "serious"
elif (keyval[0]).upper() == 'CRITICAL':
return "critical"
else:
return "info"
class doNothing(object):
"""camcheck alerts can't check themselves
dummy class to facilitate that
"""
def __init__(self):
pass
def __call__(self, keyState):
return keyState.severity
class camCheck(YAMLObject):
"""evaluate a camCheck alert
"""
def __init__(self):
# NEVER GETS CALLED!!!! -_-
pass
def generateCamCheckAlert(self, key, severity):
inst = key[:3]
side = key[3]
key = "camCheck." + key
instruments = ["boss"]
# most keywords will be SP[12][RB]
# check if they are and assign appropriate instruments
if inst in ["SP1", "SP2"]:
instruments.append("boss.{}".format(inst))
if side in ["R", "B"]:
instruments.append("boss.{}.{}".format(inst, side))
if severity in ["critical", "serious"]:
selfClear = False
addresses = self.emailAddresses
else:
selfClear = True
addresses = None
if key not in self.triggered:
self.triggered.append(key)
if key not in self.alertsActor.monitoring:
dumbCheck = doNothing()
self.alertsActor.addKey(key, severity=severity, checkAfter=120,
selfClear=selfClear, checker=dumbCheck,
keyword="'Reported by camCheck'",
instruments=instruments, emailAddresses=addresses,
emailDelay=0)
if self.alertsActor.monitoring[key].active:
self.alertsActor.monitoring[key].stampTime()
else:
self.alertsActor.monitoring[key].setActive(severity)
def __call__(self, keyState):
keyval = keyState.keyword
if self.alertsActor is None:
print("setting alertsActor for camCheck!!")
self.alertsActor = keyState.alertsActorReference
# do this only once hopefully
for i in ["boss.SP1", "boss.SP2", "boss.SP1.R", "boss.SP2.R",
"boss.SP1.B", "boss.SP2.B"]:
self.alertsActor.instrumentDown[i] = False
# print("CAMCHECK, len {}, type {}, key: {}".format(len(keyval), type(keyval), keyval))
log.info('CAMCHECK reported {}'.format(keyval))
if type(keyval) == str:
# could possibly try to fix this in hubModel casts, but easier here
keyval = [keyval]
if len(keyval) == 1 and keyval[0] == "None": # this is a bug somewhere upstream
keyval = []
for k in keyval:
if re.search(r"SP[12][RB][0-3]?CCDTemp", k):
self.generateCamCheckAlert(k, "critical")
elif re.search(r"SP[12]SecondaryDewarPress", k):
self.generateCamCheckAlert(k, "critical")
elif re.search(r"SP[12](DAQ|Mech|Micro)NotTalking", k):
self.generateCamCheckAlert(k, "critical")
elif re.search(r"DACS_SET", k):
self.generateCamCheckAlert(k, "critical")
elif re.search(r"SP[12]LN2Fill", k):
self.generateCamCheckAlert(k, "serious")
elif re.search(r"SP[12](Exec|Phase)Boot", k):
self.generateCamCheckAlert(k, "serious")
else:
self.generateCamCheckAlert(k, "warn")
for k in self.triggered:
if k.split(".")[-1] not in keyval: # b/c we know its camCheck already
self.alertsActor.monitoring[k].severity = "ok"
# now it can check itself and find out its cool
# and then decide to disappear if its acknowledged, etc etc
self.alertsActor.monitoring[k].checkKey()
self.triggered.remove(k)
# never flag camCheck, always monitored keys
return "ok"
class heartbeatCheck(YAMLObject):
"""check a heartbeat.
"""
def __init__(self):
pass
def __call__(self, keyState):
if time.time() - keyState.lastalive < keyState.checkAfter:
return "ok"
elif time.time() - keyState.lastalive > 5*keyState.checkAfter:
return "critical"
else:
return keyState.defaultSeverity
class above(YAMLObject):
"""literally: is the value too high
"""
def __init__(self):
pass
def __call__(self, keyState):
if keyState.keyword > keyState.dangerVal:
return keyState.defaultSeverity
else:
return "ok"
class below(YAMLObject):
"""literally: is the value too low
"""
def __init__(self):
pass
def __call__(self, keyState):
if keyState.keyword < keyState.dangerVal:
return keyState.defaultSeverity
else:
return "ok"
class neq(YAMLObject):
"""literally: is the value too low
"""
def __init__(self):
pass
def __call__(self, keyState):
if keyState.keyword != keyState.dangerVal:
return keyState.defaultSeverity
else:
return "ok"
class inList(YAMLObject):
"""is any value in the list "True", e.g. flagged
"""
def __init__(self):
pass
def __call__(self, keyState):
if [k for k in keyState.keyword if k]:
return keyState.defaultSeverity
else:
return "ok"
class firstElem(YAMLObject):
"""is any value in the list "True", e.g. flagged
"""
def __init__(self):
pass
def __call__(self, keyState):
if keyState.keyword[0] == keyState.dangerVal:
return keyState.defaultSeverity
else:
return "ok"
class default(object):
"""check equality to a dangerval
"""
def __init__(self):
pass
def __call__(self, keyState):
if keyState.keyword == keyState.dangerVal:
return keyState.defaultSeverity
else:
return "ok"
| #!/usr/bin/env python
# encoding: utf-8
#
# dangerKey.py
#
# Created by <NAME> on 10 April 2019
import re, time
from yaml import YAMLObject
from alertsActor import log
class diskCheck(YAMLObject):
"""evaluate a disk keyword
"""
def __init__(self):
pass
def __call__(self, keyState):
"""The keyval is an enum ('Ok','Warning','Serious','Critical')
and the amount of free space (GB)
"""
keyval = keyState.keyword
if (keyval[0]).upper() == 'OK':
return "ok"
elif (keyval[0]).upper() == 'WARNING':
return "warn"
elif (keyval[0]).upper() == 'SERIOUS':
return "serious"
elif (keyval[0]).upper() == 'CRITICAL':
return "critical"
else:
return "info"
class doNothing(object):
"""camcheck alerts can't check themselves
dummy class to facilitate that
"""
def __init__(self):
pass
def __call__(self, keyState):
return keyState.severity
class camCheck(YAMLObject):
"""evaluate a camCheck alert
"""
def __init__(self):
# NEVER GETS CALLED!!!! -_-
pass
def generateCamCheckAlert(self, key, severity):
inst = key[:3]
side = key[3]
key = "camCheck." + key
instruments = ["boss"]
# most keywords will be SP[12][RB]
# check if they are and assign appropriate instruments
if inst in ["SP1", "SP2"]:
instruments.append("boss.{}".format(inst))
if side in ["R", "B"]:
instruments.append("boss.{}.{}".format(inst, side))
if severity in ["critical", "serious"]:
selfClear = False
addresses = self.emailAddresses
else:
selfClear = True
addresses = None
if key not in self.triggered:
self.triggered.append(key)
if key not in self.alertsActor.monitoring:
dumbCheck = doNothing()
self.alertsActor.addKey(key, severity=severity, checkAfter=120,
selfClear=selfClear, checker=dumbCheck,
keyword="'Reported by camCheck'",
instruments=instruments, emailAddresses=addresses,
emailDelay=0)
if self.alertsActor.monitoring[key].active:
self.alertsActor.monitoring[key].stampTime()
else:
self.alertsActor.monitoring[key].setActive(severity)
def __call__(self, keyState):
keyval = keyState.keyword
if self.alertsActor is None:
print("setting alertsActor for camCheck!!")
self.alertsActor = keyState.alertsActorReference
# do this only once hopefully
for i in ["boss.SP1", "boss.SP2", "boss.SP1.R", "boss.SP2.R",
"boss.SP1.B", "boss.SP2.B"]:
self.alertsActor.instrumentDown[i] = False
# print("CAMCHECK, len {}, type {}, key: {}".format(len(keyval), type(keyval), keyval))
log.info('CAMCHECK reported {}'.format(keyval))
if type(keyval) == str:
# could possibly try to fix this in hubModel casts, but easier here
keyval = [keyval]
if len(keyval) == 1 and keyval[0] == "None": # this is a bug somewhere upstream
keyval = []
for k in keyval:
if re.search(r"SP[12][RB][0-3]?CCDTemp", k):
self.generateCamCheckAlert(k, "critical")
elif re.search(r"SP[12]SecondaryDewarPress", k):
self.generateCamCheckAlert(k, "critical")
elif re.search(r"SP[12](DAQ|Mech|Micro)NotTalking", k):
self.generateCamCheckAlert(k, "critical")
elif re.search(r"DACS_SET", k):
self.generateCamCheckAlert(k, "critical")
elif re.search(r"SP[12]LN2Fill", k):
self.generateCamCheckAlert(k, "serious")
elif re.search(r"SP[12](Exec|Phase)Boot", k):
self.generateCamCheckAlert(k, "serious")
else:
self.generateCamCheckAlert(k, "warn")
for k in self.triggered:
if k.split(".")[-1] not in keyval: # b/c we know its camCheck already
self.alertsActor.monitoring[k].severity = "ok"
# now it can check itself and find out its cool
# and then decide to disappear if its acknowledged, etc etc
self.alertsActor.monitoring[k].checkKey()
self.triggered.remove(k)
# never flag camCheck, always monitored keys
return "ok"
class heartbeatCheck(YAMLObject):
"""check a heartbeat.
"""
def __init__(self):
pass
def __call__(self, keyState):
if time.time() - keyState.lastalive < keyState.checkAfter:
return "ok"
elif time.time() - keyState.lastalive > 5*keyState.checkAfter:
return "critical"
else:
return keyState.defaultSeverity
class above(YAMLObject):
"""literally: is the value too high
"""
def __init__(self):
pass
def __call__(self, keyState):
if keyState.keyword > keyState.dangerVal:
return keyState.defaultSeverity
else:
return "ok"
class below(YAMLObject):
"""literally: is the value too low
"""
def __init__(self):
pass
def __call__(self, keyState):
if keyState.keyword < keyState.dangerVal:
return keyState.defaultSeverity
else:
return "ok"
class neq(YAMLObject):
"""literally: is the value too low
"""
def __init__(self):
pass
def __call__(self, keyState):
if keyState.keyword != keyState.dangerVal:
return keyState.defaultSeverity
else:
return "ok"
class inList(YAMLObject):
"""is any value in the list "True", e.g. flagged
"""
def __init__(self):
pass
def __call__(self, keyState):
if [k for k in keyState.keyword if k]:
return keyState.defaultSeverity
else:
return "ok"
class firstElem(YAMLObject):
"""is any value in the list "True", e.g. flagged
"""
def __init__(self):
pass
def __call__(self, keyState):
if keyState.keyword[0] == keyState.dangerVal:
return keyState.defaultSeverity
else:
return "ok"
class default(object):
"""check equality to a dangerval
"""
def __init__(self):
pass
def __call__(self, keyState):
if keyState.keyword == keyState.dangerVal:
return keyState.defaultSeverity
else:
return "ok" | pt | 0.124553 | 2.557649 | 3 |
jsonsubschema/old/_jsonschema.py | lukeenterprise/json-subschema | 1 | 13620 | '''
Created on June 24, 2019
@author: <NAME>
'''
import copy
import json
import sys
import math
import numbers
import intervals as I
from abc import ABC, abstractmethod
from greenery.lego import parse
from intervals import inf as infinity
import config
import _constants
from canoncalization import canoncalize_object
from _normalizer import lazy_normalize
from _utils import (
validate_schema,
print_db,
is_sub_interval_from_optional_ranges,
is_num,
is_list,
is_dict,
is_empty_dict_or_none,
is_dict_or_true,
one
)
class JSONschema(dict):
kw_defaults = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# self.validate()
self.updateKeys()
# self.canoncalize()
if self.isUninhabited():
sys.exit("Found an uninhabited type at: " + str(self))
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: ", name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: ", name)
def validate(self):
validate_schema(self)
def updateKeys(self):
for k, v in self.kw_defaults.items():
if k == "items":
k = "items_"
if k not in self.keys():
self[k] = v
def isBoolean(self):
return self.keys() & _constants.Jconnectors
def isUninhabited(self):
return self._isUninhabited()
def _isUninhabited(self):
pass
def meet(self, s2):
pass
def join(self, s2):
pass
def isSubtype(self, s2):
if s2 == {} or s2 == True or self == s2:
return True
return self._isSubtype(s2)
def isSubtype_handle_rhs(self, s2, isSubtype_cb):
if s2.isBoolean():
# TODO revisit all of this. They are wrong.
if "anyOf" in s2:
return any(self.isSubtype(s) for s in s2["anyOf"])
elif "allOf" in s2:
return all(self.isSubtype(s) for s in s2["allOf"])
elif "oneOf" in s2:
return one(self.isSubtype(s) for s in s2["oneOf"])
elif "not" in s2:
# TODO
print("No handling of not yet.")
return None
else:
print_db("cb on rhs")
return isSubtype_cb(self, s2)
class JSONTypeString(JSONschema):
kw_defaults = {"minLength": 0, "maxLength": infinity, "pattern": ".*"}
def __init__(self, s):
super().__init__(s)
def _isUninhabited(self):
return self.minLength > self.maxLength
def meet(self, s):
pass
def _isSubtype(self, s2):
def _isStringSubtype(self, s2):
if s2.type != "string":
return False
is_sub_interval = is_sub_interval_from_optional_ranges(
self.minLength, self.maxLength, s2.minLength, s2.maxLength)
if not is_sub_interval:
return False
#
# at this point, length is compatible,
# so we should now worry about pattern only.
if s2.pattern == None or s2.pattern == "":
return True
elif self.pattern == None or self.pattern == "":
return False
elif self.pattern == s2.pattern:
return True
else:
regex = parse(self.pattern)
regex2 = parse(s2.pattern)
result = regex & regex2.everythingbut()
if result.empty():
return True
else:
return False
return super().isSubtype_handle_rhs(s2, _isStringSubtype)
def JSONNumericFactory(s):
if s.get("type") == "number":
if s.get("multipleOf") and float(s.get("multipleOf")).is_integer():
s["type"] = "integer"
if s.get("minimum") != None: # -I.inf:
s["minimum"] = math.floor(s.get("minimum")) if s.get(
"exclusiveMinimum") else math.ceil(s.get("minimum"))
if s.get("maximum") != None: # I.inf:
s["maximum"] = math.ceil(s.get("maximum")) if s.get(
"exclusiveMaximum") else math.floor(s.get("maximum"))
return JSONTypeInteger(s)
else:
return JSONTypeNumber(s)
else:
return JSONTypeInteger(s)
class JSONTypeInteger(JSONschema):
kw_defaults = {"minimum": -infinity, "maximum": infinity,
"exclusiveMinimum": False, "exclusiveMaximum": False, "multipleOf": None}
def __init__(self, s):
super().__init__(s)
def build_interval_draft4(self):
if self.exclusiveMinimum and self.exclusiveMaximum:
self.interval = I.closed(self.minimum+1, self.maximum-1)
elif self.exclusiveMinimum:
self.interval = I.closed(self.minimum+1, self.maximum)
elif self.exclusiveMaximum:
self.interval = I.closed(self.minimum, self.maximum-1)
else:
self.interval = I.closed(self.minimum, self.maximum)
def _isUninhabited(self):
self.build_interval_draft4()
return self.interval.is_empty() or \
(self.multipleOf != None and self.multipleOf not in self.interval)
def meet(self, s):
pass
def _isSubtype(self, s2):
def _isIntegerSubtype(self, s2):
if s2.type not in ["integer", "number"]:
return False
#
is_sub_interval = self.interval in s2.interval
if not is_sub_interval:
print_db("num__00")
return False
#
if (self.multipleOf == s2.multipleOf) \
or (self.multipleOf != None and s2.multipleOf == None) \
or (self.multipleOf != None and s2.multipleOf != None and self.multipleOf % s2.multipleOf == 0) \
or (self.multipleOf == None and s2.multipleOf == 1):
print_db("num__02")
return True
if self.multipleOf == None and s2.multipleOf != None:
return False
return super().isSubtype_handle_rhs(s2, _isIntegerSubtype)
class JSONTypeNumber(JSONschema):
kw_defaults = {"minimum": -infinity, "maximum": infinity,
"exclusiveMinimum": False, "exclusiveMaximum": False, "multipleOf": None}
def __init__(self, s):
super().__init__(s)
def build_interval_draft4(self):
if self.exclusiveMinimum and self.exclusiveMaximum:
self.interval = I.open(self.minimum, self.maximum)
elif self.exclusiveMinimum:
self.interval = I.openclosed(self.minimum, self.maximum)
elif self.exclusiveMaximum:
self.interval = I.closedopen(self.minimum, self.maximum)
else:
self.interval = I.closed(self.minimum, self.maximum)
def _isUninhabited(self):
self.build_interval_draft4()
return self.interval.is_empty() or \
(self.multipleOf != None and self.multipleOf not in self.interval)
def meet(self, s):
pass
def _isSubtype(self, s2):
def _isNumberSubtype(self, s2):
if s2.type != "number":
return False
#
is_sub_interval = self.interval in s2.interval
if not is_sub_interval:
print_db("num__00")
return False
#
if self.type == "number" and s2.type == "integer":
print_db("num__01")
return False
#
if (self.multipleOf == s2.multipleOf) \
or (self.multipleOf != None and s2.multipleOf == None) \
or (self.multipleOf != None and s2.multipleOf != None and self.multipleOf % s2.multipleOf == 0) \
or (self.multipleOf == None and s2.multipleOf == 1):
print_db("num__02")
return True
return super().isSubtype_handle_rhs(s2, _isNumberSubtype)
class JSONTypeBoolean(JSONschema):
kw_defaults = {}
def __init__(self, s):
super().__init__(s)
def _isSubtype(self, s2):
def _isBooleanSubtype(self, s2):
if s2.type == "boolean":
return True
else:
return False
return super().isSubtype_handle_rhs(s2, _isBooleanSubtype)
class JSONTypeNull(JSONschema):
kw_defaults = {}
def __init__(self, s):
super().__init__(s)
def _isSubtype(self, s2):
def _isNullSubtype(self, s2):
if s2.type == "null":
return True
else:
return False
return super().isSubtype_handle_rhs(s2, _isNullSubtype)
class JSONTypeObject(JSONschema):
kw_defaults = {"properties": {}, "additionalProperties": {}, "required": [
], "minProperties": 0, "maxProperties": infinity, "dependencies": {}, "patternProperties": {}}
def __init__(self, s):
super().__init__(s)
def meet(self, s2):
pass
def _isSubtype(self, s2):
def _isObjectSubtype(self, s2):
pass
return super().isSubtype_handle_rhs(s2, _isObjectSubtype)
class JSONTypeArray(JSONschema):
kw_defaults = {"minItems": 0, "maxItems": infinity,
"items": JSONTypeObject({}), "additionalItems": JSONTypeObject({}), "uniqueItems": False}
def __init__(self, s):
super().__init__(s)
def _isUninhabited(self):
return (self.minItems > self.maxItems) or \
(is_list(self.items) and self.additionalItems ==
False and self.minItems > len(self.items))
def meet(self, s2):
pass
def _isSubtype(self, s2):
def _isArraySubtype(self, s2):
print_db("in array subtype")
if s2.type != "array":
return False
#
#
# self = JsonArray(self)
# s2 = JsonArray(s2)
#
# uninhabited = handle_uninhabited_types(self, s2)
# if uninhabited != None:
# return uninhabited
#
# -- minItems and maxItems
is_sub_interval = is_sub_interval_from_optional_ranges(
self.minItems, self.maxItems, s2.minItems, s2.maxItems)
# also takes care of {'items' = [..], 'additionalItems' = False}
if not is_sub_interval:
print_db("__01__")
return False
#
# -- uniqueItemsue
# TODO Double-check. Could be more subtle?
if not self.uniqueItems and s2.uniqueItems:
print_db("__02__")
return False
#
# -- items = {not empty}
# no need to check additionalItems
if is_dict(self.items_):
if is_dict(s2.items_):
print_db(self.items_)
print_db(s2.items_)
# if subschemachecker.Checker.is_subtype(self.items_, s2.items_):
if self.items_.isSubtype(s2.items_):
print_db("__05__")
return True
else:
print_db("__06__")
return False
elif is_list(s2.items_):
if s2.additionalItems == False:
print_db("__07__")
return False
elif s2.additionalItems == True:
for i in s2.items_:
# if not subschemachecker.Checker.is_subtype(self.items_, i):
if not self.items_.isSubtype(i):
print_db("__08__")
return False
print_db("__09__")
return True
elif is_dict(s2.additionalItems):
for i in s2.items_:
# if not subschemachecker.Checker.is_subtype(self.items_, i):
if not self.items_.isSubtype(i):
print_db("__10__")
return False
# if subschemachecker.Checker.is_subtype(self.items_, s2.additionalItems):
if self.items_.isSubtype(s2.additionalItems):
print_db("__11__")
return True
else:
print_db("__12__")
return False
#
elif is_list(self.items_):
print_db("lhs is list")
if is_dict(s2.items_):
if self.additionalItems == False:
for i in self.items_:
# if not subschemachecker.Checker.is_subtype(i, s2.items_):
if not i.isSubtype(s2.items_):
print_db("__13__")
return False
print_db("__14__")
return True
elif self.additionalItems == True:
for i in self.items_:
# if not subschemachecker.Checker.is_subtype(i, s2.items_):
if not i.isSubtype(s2.items_):
return False
return True
elif is_dict(self.additionalItems):
for i in self.items_:
# if not subschemachecker.Checker.is_subtype(i, s2.items_):
if not i.isSubtype(s2.items_):
return False
# if subschemachecker.Checker.is_subtype(self.additionalItems, s2.items_):
if self.additionalItems.isSubtype(s2.items_):
return True
else:
return False
# now lhs and rhs are lists
elif is_list(s2.items_):
print_db("lhs & rhs are lists")
len1 = len(self.items_)
len2 = len(s2.items_)
for i, j in zip(self.items_, s2.items_):
# if not subschemachecker.Checker.is_subtype(i, j):
if not i.isSubtype(j):
return False
if len1 == len2:
print_db("len1 == len2")
if self.additionalItems == s2.additionalItems:
return True
elif self.additionalItems == True and s2.additionalItems == False:
return False
elif self.additionalItems == False and s2.additionalItems == True:
return True
else:
# return subschemachecker.Checker.is_subtype(self.additionalItems, s2.additionalItems)
return self.additionalItems.isSubtype(s2.additionalItems)
elif len1 > len2:
diff = len1 - len2
for i in range(len1-diff, len1):
# if not subschemachecker.Checker.is_subtype(self.items_[i], s2.additionalItems):
if not self.items_[i].isSubtype(s2.additionalItems):
print_db("9999")
return False
print_db("8888")
return True
else: # len2 > len 1
# if self.additionalItems:
diff = len2 - len1
for i in range(len2 - diff, len2):
print_db("self.additionalItems",
self.additionalItems)
print_db(i, s2.items_[i])
# if not subschemachecker.Checker.is_subtype(self.additionalItems, s2.items_[i]):
if not self.additionalItems.isSubtype(s2.items_[i]):
print_db("!!!")
return False
# return subschemachecker.Checker.is_subtype(self.additionalItems, s2.additionalItems)
return self.additionalItems.isSubtype(s2.additionalItems)
return super().isSubtype_handle_rhs(s2, _isArraySubtype)
class JSONanyOf(JSONschema):
def meet(self, s):
pass
def _isSubtype(self, s2):
def _isAnyofSubtype(self, s2):
for s in self.anyOf:
if not s.isSubtype(s2):
return False
return True
return super().isSubtype_handle_rhs(s2, _isAnyofSubtype)
class JSONallOf(JSONschema):
def meet(self, s):
pass
def _isSubtype(Self, s2):
def _isAllOfSubtype(self, s2):
for s in self.allOf:
if not s.isSubtype(s2):
return False
return True
return super().isSubtype_handle_rhs(s2, _isAllOfSubtype)
class JSONoneOf(JSONschema):
def meet(self, s):
pass
def _isSubtype(self, s2):
sys.exit("onOf on the lhs is not supported yet.")
class JSONnot(JSONschema):
def meet(self, s):
pass
def _isSubtype(self, s):
pass
typeToConstructor = {
"string": JSONTypeString,
"integer": JSONNumericFactory,
"number": JSONNumericFactory,
"boolean": JSONTypeBoolean,
"null": JSONTypeNull,
"array": JSONTypeArray,
"object": JSONTypeObject
}
boolToConstructor = {
"anyOf": JSONanyOf,
"allOf": JSONallOf,
"oneOf": JSONoneOf,
"not": JSONnot
}
class JSONSchemaSubtypeFactory(json.JSONDecoder):
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(
self, object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, d):
print_db("object before canon.", d)
# return JSONSchemaSubtypeFactory.canoncalize_object(d)
return canoncalize_object(d)
# @staticmethod
# def canoncalize_object(d):
# validate_schema(d)
# if d == {}:
# return d
# t = d.get("type")
# if isinstance(t, list):
# return JSONSchemaSubtypeFactory.canoncalize_list_of_types(d)
# elif isinstance(t, str):
# return JSONSchemaSubtypeFactory.canoncalize_single_type(d)
# else:
# connectors = set(d.keys()) & set(_constants.Jconnectors)
# if connectors:
# return JSONSchemaSubtypeFactory.canoncalize_connectors(d)
# else:
# d["type"] = _constants.Jtypes
# return JSONSchemaSubtypeFactory.canoncalize_list_of_types(d)
# @staticmethod
# def canoncalize_list_of_types(d):
# t = d.get("type")
# choices = []
# for t_i in t:
# if t_i in typeToConstructor.keys():
# s_i = copy.deepcopy(d)
# s_i["type"] = t_i
# s_i = JSONSchemaSubtypeFactory.canoncalize_single_type(s_i)
# choices.append(s_i)
# else:
# print("Unknown schema type {} at:".format(t))
# print(d)
# print("Exiting...")
# sys.exit(1)
# d = {"anyOf": choices}
# # TODO do we need to return JSONanyOf ?
# return boolToConstructor.get("anyOf")(d)
# @staticmethod
# def canoncalize_single_type(d):
# t = d.get("type")
# # check type is known
# if t in typeToConstructor.keys():
# # remove irrelevant keywords
# tmp = copy.deepcopy(d)
# for k in tmp.keys():
# if k not in _constants.Jcommonkw and k not in _constants.JtypesToKeywords.get(t):
# d.pop(k)
# return typeToConstructor[t](d)
# else:
# print("Unknown schema type {} at:".format(t))
# print(d)
# print("Exiting...")
# sys.exit(1)
# @staticmethod
# def canoncalize_connectors(d):
# # TODO
# connectors = set(d.keys()) & set(_constants.Jconnectors)
# if len(connectors) == 1:
# return boolToConstructor[connectors.pop()](d)
# elif len(connectors) > 1:
# return boolToConstructor["allOf"]({"allOf": list({k: v} for k, v in d.items())})
# else:
# print("Something went wrong")
class JSONSubtypeChecker:
def __init__(self, s1, s2):
# validate_schema(s1)
# validate_schema(s2)
self.s1 = self.canoncalize_json(s1)
self.s2 = self.canoncalize_json(s2)
def canoncalize_json(self, obj):
if isinstance(obj, str) or isinstance(obj, numbers.Number) or isinstance(obj, bool) or isinstance(obj, type(None)) or isinstance(obj, list):
return obj
elif isinstance(obj, dict):
# return JSONSchemaSubtypeFactory.canoncalize_object(obj)
return canoncalize_object(obj)
def isSubtype(self):
return self.s1.isSubtype(self.s2)
if __name__ == "__main__":
s1_file = sys.argv[1]
s2_file = sys.argv[2]
print("Loading json schemas from:\n{}\n{}\n".format(s1_file, s2_file))
#######################################
with open(s1_file, 'r') as f1:
s1 = json.load(f1, cls=JSONSchemaSubtypeFactory)
with open(s2_file, 'r') as f2:
s2 = json.load(f2, cls=JSONSchemaSubtypeFactory)
print(s1)
print(s2)
print("Usage scenario 1:", s1.isSubtype(s2))
#######################################
with open(s1_file, 'r') as f1:
s1 = json.load(f1)
with open(s2_file, 'r') as f2:
s2 = json.load(f2)
print(s1)
print(s2)
print("Usage scenario 2:", JSONSubtypeChecker(s1, s2).isSubtype()) | '''
Created on June 24, 2019
@author: <NAME>
'''
import copy
import json
import sys
import math
import numbers
import intervals as I
from abc import ABC, abstractmethod
from greenery.lego import parse
from intervals import inf as infinity
import config
import _constants
from canoncalization import canoncalize_object
from _normalizer import lazy_normalize
from _utils import (
validate_schema,
print_db,
is_sub_interval_from_optional_ranges,
is_num,
is_list,
is_dict,
is_empty_dict_or_none,
is_dict_or_true,
one
)
class JSONschema(dict):
kw_defaults = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# self.validate()
self.updateKeys()
# self.canoncalize()
if self.isUninhabited():
sys.exit("Found an uninhabited type at: " + str(self))
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: ", name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: ", name)
def validate(self):
validate_schema(self)
def updateKeys(self):
for k, v in self.kw_defaults.items():
if k == "items":
k = "items_"
if k not in self.keys():
self[k] = v
def isBoolean(self):
return self.keys() & _constants.Jconnectors
def isUninhabited(self):
return self._isUninhabited()
def _isUninhabited(self):
pass
def meet(self, s2):
pass
def join(self, s2):
pass
def isSubtype(self, s2):
if s2 == {} or s2 == True or self == s2:
return True
return self._isSubtype(s2)
def isSubtype_handle_rhs(self, s2, isSubtype_cb):
if s2.isBoolean():
# TODO revisit all of this. They are wrong.
if "anyOf" in s2:
return any(self.isSubtype(s) for s in s2["anyOf"])
elif "allOf" in s2:
return all(self.isSubtype(s) for s in s2["allOf"])
elif "oneOf" in s2:
return one(self.isSubtype(s) for s in s2["oneOf"])
elif "not" in s2:
# TODO
print("No handling of not yet.")
return None
else:
print_db("cb on rhs")
return isSubtype_cb(self, s2)
class JSONTypeString(JSONschema):
kw_defaults = {"minLength": 0, "maxLength": infinity, "pattern": ".*"}
def __init__(self, s):
super().__init__(s)
def _isUninhabited(self):
return self.minLength > self.maxLength
def meet(self, s):
pass
def _isSubtype(self, s2):
def _isStringSubtype(self, s2):
if s2.type != "string":
return False
is_sub_interval = is_sub_interval_from_optional_ranges(
self.minLength, self.maxLength, s2.minLength, s2.maxLength)
if not is_sub_interval:
return False
#
# at this point, length is compatible,
# so we should now worry about pattern only.
if s2.pattern == None or s2.pattern == "":
return True
elif self.pattern == None or self.pattern == "":
return False
elif self.pattern == s2.pattern:
return True
else:
regex = parse(self.pattern)
regex2 = parse(s2.pattern)
result = regex & regex2.everythingbut()
if result.empty():
return True
else:
return False
return super().isSubtype_handle_rhs(s2, _isStringSubtype)
def JSONNumericFactory(s):
if s.get("type") == "number":
if s.get("multipleOf") and float(s.get("multipleOf")).is_integer():
s["type"] = "integer"
if s.get("minimum") != None: # -I.inf:
s["minimum"] = math.floor(s.get("minimum")) if s.get(
"exclusiveMinimum") else math.ceil(s.get("minimum"))
if s.get("maximum") != None: # I.inf:
s["maximum"] = math.ceil(s.get("maximum")) if s.get(
"exclusiveMaximum") else math.floor(s.get("maximum"))
return JSONTypeInteger(s)
else:
return JSONTypeNumber(s)
else:
return JSONTypeInteger(s)
class JSONTypeInteger(JSONschema):
kw_defaults = {"minimum": -infinity, "maximum": infinity,
"exclusiveMinimum": False, "exclusiveMaximum": False, "multipleOf": None}
def __init__(self, s):
super().__init__(s)
def build_interval_draft4(self):
if self.exclusiveMinimum and self.exclusiveMaximum:
self.interval = I.closed(self.minimum+1, self.maximum-1)
elif self.exclusiveMinimum:
self.interval = I.closed(self.minimum+1, self.maximum)
elif self.exclusiveMaximum:
self.interval = I.closed(self.minimum, self.maximum-1)
else:
self.interval = I.closed(self.minimum, self.maximum)
def _isUninhabited(self):
self.build_interval_draft4()
return self.interval.is_empty() or \
(self.multipleOf != None and self.multipleOf not in self.interval)
def meet(self, s):
pass
def _isSubtype(self, s2):
def _isIntegerSubtype(self, s2):
if s2.type not in ["integer", "number"]:
return False
#
is_sub_interval = self.interval in s2.interval
if not is_sub_interval:
print_db("num__00")
return False
#
if (self.multipleOf == s2.multipleOf) \
or (self.multipleOf != None and s2.multipleOf == None) \
or (self.multipleOf != None and s2.multipleOf != None and self.multipleOf % s2.multipleOf == 0) \
or (self.multipleOf == None and s2.multipleOf == 1):
print_db("num__02")
return True
if self.multipleOf == None and s2.multipleOf != None:
return False
return super().isSubtype_handle_rhs(s2, _isIntegerSubtype)
class JSONTypeNumber(JSONschema):
kw_defaults = {"minimum": -infinity, "maximum": infinity,
"exclusiveMinimum": False, "exclusiveMaximum": False, "multipleOf": None}
def __init__(self, s):
super().__init__(s)
def build_interval_draft4(self):
if self.exclusiveMinimum and self.exclusiveMaximum:
self.interval = I.open(self.minimum, self.maximum)
elif self.exclusiveMinimum:
self.interval = I.openclosed(self.minimum, self.maximum)
elif self.exclusiveMaximum:
self.interval = I.closedopen(self.minimum, self.maximum)
else:
self.interval = I.closed(self.minimum, self.maximum)
def _isUninhabited(self):
self.build_interval_draft4()
return self.interval.is_empty() or \
(self.multipleOf != None and self.multipleOf not in self.interval)
def meet(self, s):
pass
def _isSubtype(self, s2):
def _isNumberSubtype(self, s2):
if s2.type != "number":
return False
#
is_sub_interval = self.interval in s2.interval
if not is_sub_interval:
print_db("num__00")
return False
#
if self.type == "number" and s2.type == "integer":
print_db("num__01")
return False
#
if (self.multipleOf == s2.multipleOf) \
or (self.multipleOf != None and s2.multipleOf == None) \
or (self.multipleOf != None and s2.multipleOf != None and self.multipleOf % s2.multipleOf == 0) \
or (self.multipleOf == None and s2.multipleOf == 1):
print_db("num__02")
return True
return super().isSubtype_handle_rhs(s2, _isNumberSubtype)
class JSONTypeBoolean(JSONschema):
kw_defaults = {}
def __init__(self, s):
super().__init__(s)
def _isSubtype(self, s2):
def _isBooleanSubtype(self, s2):
if s2.type == "boolean":
return True
else:
return False
return super().isSubtype_handle_rhs(s2, _isBooleanSubtype)
class JSONTypeNull(JSONschema):
kw_defaults = {}
def __init__(self, s):
super().__init__(s)
def _isSubtype(self, s2):
def _isNullSubtype(self, s2):
if s2.type == "null":
return True
else:
return False
return super().isSubtype_handle_rhs(s2, _isNullSubtype)
class JSONTypeObject(JSONschema):
kw_defaults = {"properties": {}, "additionalProperties": {}, "required": [
], "minProperties": 0, "maxProperties": infinity, "dependencies": {}, "patternProperties": {}}
def __init__(self, s):
super().__init__(s)
def meet(self, s2):
pass
def _isSubtype(self, s2):
def _isObjectSubtype(self, s2):
pass
return super().isSubtype_handle_rhs(s2, _isObjectSubtype)
class JSONTypeArray(JSONschema):
kw_defaults = {"minItems": 0, "maxItems": infinity,
"items": JSONTypeObject({}), "additionalItems": JSONTypeObject({}), "uniqueItems": False}
def __init__(self, s):
super().__init__(s)
def _isUninhabited(self):
return (self.minItems > self.maxItems) or \
(is_list(self.items) and self.additionalItems ==
False and self.minItems > len(self.items))
def meet(self, s2):
pass
def _isSubtype(self, s2):
def _isArraySubtype(self, s2):
print_db("in array subtype")
if s2.type != "array":
return False
#
#
# self = JsonArray(self)
# s2 = JsonArray(s2)
#
# uninhabited = handle_uninhabited_types(self, s2)
# if uninhabited != None:
# return uninhabited
#
# -- minItems and maxItems
is_sub_interval = is_sub_interval_from_optional_ranges(
self.minItems, self.maxItems, s2.minItems, s2.maxItems)
# also takes care of {'items' = [..], 'additionalItems' = False}
if not is_sub_interval:
print_db("__01__")
return False
#
# -- uniqueItemsue
# TODO Double-check. Could be more subtle?
if not self.uniqueItems and s2.uniqueItems:
print_db("__02__")
return False
#
# -- items = {not empty}
# no need to check additionalItems
if is_dict(self.items_):
if is_dict(s2.items_):
print_db(self.items_)
print_db(s2.items_)
# if subschemachecker.Checker.is_subtype(self.items_, s2.items_):
if self.items_.isSubtype(s2.items_):
print_db("__05__")
return True
else:
print_db("__06__")
return False
elif is_list(s2.items_):
if s2.additionalItems == False:
print_db("__07__")
return False
elif s2.additionalItems == True:
for i in s2.items_:
# if not subschemachecker.Checker.is_subtype(self.items_, i):
if not self.items_.isSubtype(i):
print_db("__08__")
return False
print_db("__09__")
return True
elif is_dict(s2.additionalItems):
for i in s2.items_:
# if not subschemachecker.Checker.is_subtype(self.items_, i):
if not self.items_.isSubtype(i):
print_db("__10__")
return False
# if subschemachecker.Checker.is_subtype(self.items_, s2.additionalItems):
if self.items_.isSubtype(s2.additionalItems):
print_db("__11__")
return True
else:
print_db("__12__")
return False
#
elif is_list(self.items_):
print_db("lhs is list")
if is_dict(s2.items_):
if self.additionalItems == False:
for i in self.items_:
# if not subschemachecker.Checker.is_subtype(i, s2.items_):
if not i.isSubtype(s2.items_):
print_db("__13__")
return False
print_db("__14__")
return True
elif self.additionalItems == True:
for i in self.items_:
# if not subschemachecker.Checker.is_subtype(i, s2.items_):
if not i.isSubtype(s2.items_):
return False
return True
elif is_dict(self.additionalItems):
for i in self.items_:
# if not subschemachecker.Checker.is_subtype(i, s2.items_):
if not i.isSubtype(s2.items_):
return False
# if subschemachecker.Checker.is_subtype(self.additionalItems, s2.items_):
if self.additionalItems.isSubtype(s2.items_):
return True
else:
return False
# now lhs and rhs are lists
elif is_list(s2.items_):
print_db("lhs & rhs are lists")
len1 = len(self.items_)
len2 = len(s2.items_)
for i, j in zip(self.items_, s2.items_):
# if not subschemachecker.Checker.is_subtype(i, j):
if not i.isSubtype(j):
return False
if len1 == len2:
print_db("len1 == len2")
if self.additionalItems == s2.additionalItems:
return True
elif self.additionalItems == True and s2.additionalItems == False:
return False
elif self.additionalItems == False and s2.additionalItems == True:
return True
else:
# return subschemachecker.Checker.is_subtype(self.additionalItems, s2.additionalItems)
return self.additionalItems.isSubtype(s2.additionalItems)
elif len1 > len2:
diff = len1 - len2
for i in range(len1-diff, len1):
# if not subschemachecker.Checker.is_subtype(self.items_[i], s2.additionalItems):
if not self.items_[i].isSubtype(s2.additionalItems):
print_db("9999")
return False
print_db("8888")
return True
else: # len2 > len 1
# if self.additionalItems:
diff = len2 - len1
for i in range(len2 - diff, len2):
print_db("self.additionalItems",
self.additionalItems)
print_db(i, s2.items_[i])
# if not subschemachecker.Checker.is_subtype(self.additionalItems, s2.items_[i]):
if not self.additionalItems.isSubtype(s2.items_[i]):
print_db("!!!")
return False
# return subschemachecker.Checker.is_subtype(self.additionalItems, s2.additionalItems)
return self.additionalItems.isSubtype(s2.additionalItems)
return super().isSubtype_handle_rhs(s2, _isArraySubtype)
class JSONanyOf(JSONschema):
def meet(self, s):
pass
def _isSubtype(self, s2):
def _isAnyofSubtype(self, s2):
for s in self.anyOf:
if not s.isSubtype(s2):
return False
return True
return super().isSubtype_handle_rhs(s2, _isAnyofSubtype)
class JSONallOf(JSONschema):
def meet(self, s):
pass
def _isSubtype(Self, s2):
def _isAllOfSubtype(self, s2):
for s in self.allOf:
if not s.isSubtype(s2):
return False
return True
return super().isSubtype_handle_rhs(s2, _isAllOfSubtype)
class JSONoneOf(JSONschema):
def meet(self, s):
pass
def _isSubtype(self, s2):
sys.exit("onOf on the lhs is not supported yet.")
class JSONnot(JSONschema):
def meet(self, s):
pass
def _isSubtype(self, s):
pass
typeToConstructor = {
"string": JSONTypeString,
"integer": JSONNumericFactory,
"number": JSONNumericFactory,
"boolean": JSONTypeBoolean,
"null": JSONTypeNull,
"array": JSONTypeArray,
"object": JSONTypeObject
}
boolToConstructor = {
"anyOf": JSONanyOf,
"allOf": JSONallOf,
"oneOf": JSONoneOf,
"not": JSONnot
}
class JSONSchemaSubtypeFactory(json.JSONDecoder):
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(
self, object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, d):
print_db("object before canon.", d)
# return JSONSchemaSubtypeFactory.canoncalize_object(d)
return canoncalize_object(d)
# @staticmethod
# def canoncalize_object(d):
# validate_schema(d)
# if d == {}:
# return d
# t = d.get("type")
# if isinstance(t, list):
# return JSONSchemaSubtypeFactory.canoncalize_list_of_types(d)
# elif isinstance(t, str):
# return JSONSchemaSubtypeFactory.canoncalize_single_type(d)
# else:
# connectors = set(d.keys()) & set(_constants.Jconnectors)
# if connectors:
# return JSONSchemaSubtypeFactory.canoncalize_connectors(d)
# else:
# d["type"] = _constants.Jtypes
# return JSONSchemaSubtypeFactory.canoncalize_list_of_types(d)
# @staticmethod
# def canoncalize_list_of_types(d):
# t = d.get("type")
# choices = []
# for t_i in t:
# if t_i in typeToConstructor.keys():
# s_i = copy.deepcopy(d)
# s_i["type"] = t_i
# s_i = JSONSchemaSubtypeFactory.canoncalize_single_type(s_i)
# choices.append(s_i)
# else:
# print("Unknown schema type {} at:".format(t))
# print(d)
# print("Exiting...")
# sys.exit(1)
# d = {"anyOf": choices}
# # TODO do we need to return JSONanyOf ?
# return boolToConstructor.get("anyOf")(d)
# @staticmethod
# def canoncalize_single_type(d):
# t = d.get("type")
# # check type is known
# if t in typeToConstructor.keys():
# # remove irrelevant keywords
# tmp = copy.deepcopy(d)
# for k in tmp.keys():
# if k not in _constants.Jcommonkw and k not in _constants.JtypesToKeywords.get(t):
# d.pop(k)
# return typeToConstructor[t](d)
# else:
# print("Unknown schema type {} at:".format(t))
# print(d)
# print("Exiting...")
# sys.exit(1)
# @staticmethod
# def canoncalize_connectors(d):
# # TODO
# connectors = set(d.keys()) & set(_constants.Jconnectors)
# if len(connectors) == 1:
# return boolToConstructor[connectors.pop()](d)
# elif len(connectors) > 1:
# return boolToConstructor["allOf"]({"allOf": list({k: v} for k, v in d.items())})
# else:
# print("Something went wrong")
class JSONSubtypeChecker:
def __init__(self, s1, s2):
# validate_schema(s1)
# validate_schema(s2)
self.s1 = self.canoncalize_json(s1)
self.s2 = self.canoncalize_json(s2)
def canoncalize_json(self, obj):
if isinstance(obj, str) or isinstance(obj, numbers.Number) or isinstance(obj, bool) or isinstance(obj, type(None)) or isinstance(obj, list):
return obj
elif isinstance(obj, dict):
# return JSONSchemaSubtypeFactory.canoncalize_object(obj)
return canoncalize_object(obj)
def isSubtype(self):
return self.s1.isSubtype(self.s2)
if __name__ == "__main__":
s1_file = sys.argv[1]
s2_file = sys.argv[2]
print("Loading json schemas from:\n{}\n{}\n".format(s1_file, s2_file))
#######################################
with open(s1_file, 'r') as f1:
s1 = json.load(f1, cls=JSONSchemaSubtypeFactory)
with open(s2_file, 'r') as f2:
s2 = json.load(f2, cls=JSONSchemaSubtypeFactory)
print(s1)
print(s2)
print("Usage scenario 1:", s1.isSubtype(s2))
#######################################
with open(s1_file, 'r') as f1:
s1 = json.load(f1)
with open(s2_file, 'r') as f2:
s2 = json.load(f2)
print(s1)
print(s2)
print("Usage scenario 2:", JSONSubtypeChecker(s1, s2).isSubtype()) | pt | 0.127366 | 2.256122 | 2 |
eruditio/shared_apps/django_community/utils.py | genghisu/eruditio | 0 | 13621 | """
Various utilities functions used by django_community and
other apps to perform authentication related tasks.
"""
import hashlib, re
import django.forms as forms
from django.core.exceptions import ObjectDoesNotExist
from django.forms import ValidationError
import django.http as http
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib.auth import logout as auth_logout
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from django_community.models import UserOpenID, UserProfile
def openid_logout(request):
"""
Clears session which effectively logs out the current
OpenId user.
"""
request.session.flush()
def handle_logout(request):
"""
Log out.
"""
auth_logout(request)
def get_logged_user(request):
"""
Returns the current user who is logged in, checks for openid user first,
then for regular user, return None if no user is currently logged in
"""
if settings.OPENID_ENABLED and hasattr(request, 'openid'):
user = UserOpenID.objects.get_for_openid(request, request.openid)
if not user:
user = request.user
return user
def handle_login(request, data):
"""
Logs the user in based on form data from django_community.LoginForm.
"""
user = authenticate(username = data.get('username', None),
password = data.get('password', None))
user_object = User.objects.get(username = data.get('username', None))
if user is not None:
login(request, user)
return user
def handle_signup(request, data):
"""
Signs a user up based on form data from django_community.SignupForm.
"""
from django.contrib.auth.models import get_hexdigest
username = data.get('username', None)
email = data.get('email', None)
password = data.get('password', None)
try:
user = User.objects.get(username = username, email = email)
except ObjectDoesNotExist:
user = User(username = username, email = email)
user.save()
user.set_password(password)
user_profile = UserProfile.objects.get_user_profile(user)
user = authenticate(username = username, password = password)
login(request, user)
return user
def get_or_create_from_openid(openid):
"""
Returns an User with the given openid or
creates a new user and associates openid with that user.
"""
try:
user = User.objects.get(username = openid)
except ObjectDoesNotExist:
password = hashlib.sha256(openid).hexdigest()
user = User(username = openid, email = '', password = password)
user.save()
user.display_name = "%s_%s" % ('user', str(user.id))
user.save()
return user
def generate_random_user_name():
"""
Generates a random user name user_{user_id}_{salt}
to be used for creating new users.
"""
import random
current_users = User.objects.all().order_by('-id')
if current_users:
next_id = current_users[0].id + 1
else:
next_id = 1
random_salt = random.randint(1, 5000)
return 'user_%s_%s' % (str(next_id), str(random_salt))
def create_user_from_openid(request, openid):
"""
Creates a new User object associated with the given
openid.
"""
from django_community.config import OPENID_FIELD_MAPPING
from django_utils.request_helpers import get_ip
username = generate_random_user_name()
profile_attributes = {}
for attribute in OPENID_FIELD_MAPPING.keys():
mapped_attribute = OPENID_FIELD_MAPPING[attribute]
if openid.sreg and openid.sreg.get(attribute, ''):
profile_attributes[mapped_attribute] = openid.sreg.get(attribute, '')
new_user = User(username = username)
new_user.save()
new_openid = UserOpenID(openid = openid.openid, user = new_user)
new_openid.save()
new_user_profile = UserProfile.objects.get_user_profile(new_user)
for filled_attribute in profile_attributes.keys():
setattr(new_user, filled_attribute, profile_attributes[filled_attribute])
new_user_profile.save()
return new_user
def get_anon_user(request):
"""
Returns an anonmymous user corresponding to this IP address if one exists.
Else create an anonymous user and return it.
"""
try:
anon_user = User.objects.get(username = generate_anon_user_name(request))
except ObjectDoesNotExist:
anon_user = create_anon_user(request)
return anon_user
def create_anon_user(request):
"""
Creates a new anonymous user based on the ip provided by the request
object.
"""
anon_user_name = generate_anon_user_name(request)
anon_user = User(username = anon_user_name)
anon_user.save()
user_profile = UserProfile(user = anon_user, display_name = 'anonymous')
user_profile.save()
return anon_user
def generate_anon_user_name(request):
"""
Generate an anonymous user name based on and ip address.
"""
from django_utils.request_helpers import get_ip
ip = get_ip(request)
return "anon_user_%s" % (str(ip))
def is_anon_user(user):
"""
Determine if an user is anonymous or not.
"""
return user.username[0:10] == 'anon_user_'
def is_random(name):
"""
Determine if a user has a randomly generated display name.
"""
if len(name.split('_')) and name.startswith('user'):
return True
else:
return False
def process_ax_data(user, ax_data):
"""
Process OpenID AX data.
"""
import django_openidconsumer.config
emails = ax_data.get(django_openidconsumer.config.URI_GROUPS.get('email').get('type_uri', ''), '')
display_names = ax_data.get(django_openidconsumer.config.URI_GROUPS.get('alias').get('type_uri', ''), '')
if emails and not user.email.strip():
user.email = emails[0]
user.save()
if not user.profile.display_name.strip() or is_random(user.profile.display_name):
if display_names:
user.profile.display_name = display_names[0]
elif emails:
user.profile.display_name = emails[0].split('@')[0]
user.profile.save() | """
Various utilities functions used by django_community and
other apps to perform authentication related tasks.
"""
import hashlib, re
import django.forms as forms
from django.core.exceptions import ObjectDoesNotExist
from django.forms import ValidationError
import django.http as http
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib.auth import logout as auth_logout
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from django_community.models import UserOpenID, UserProfile
def openid_logout(request):
"""
Clears session which effectively logs out the current
OpenId user.
"""
request.session.flush()
def handle_logout(request):
"""
Log out.
"""
auth_logout(request)
def get_logged_user(request):
"""
Returns the current user who is logged in, checks for openid user first,
then for regular user, return None if no user is currently logged in
"""
if settings.OPENID_ENABLED and hasattr(request, 'openid'):
user = UserOpenID.objects.get_for_openid(request, request.openid)
if not user:
user = request.user
return user
def handle_login(request, data):
"""
Logs the user in based on form data from django_community.LoginForm.
"""
user = authenticate(username = data.get('username', None),
password = data.get('password', None))
user_object = User.objects.get(username = data.get('username', None))
if user is not None:
login(request, user)
return user
def handle_signup(request, data):
"""
Signs a user up based on form data from django_community.SignupForm.
"""
from django.contrib.auth.models import get_hexdigest
username = data.get('username', None)
email = data.get('email', None)
password = data.get('password', None)
try:
user = User.objects.get(username = username, email = email)
except ObjectDoesNotExist:
user = User(username = username, email = email)
user.save()
user.set_password(password)
user_profile = UserProfile.objects.get_user_profile(user)
user = authenticate(username = username, password = password)
login(request, user)
return user
def get_or_create_from_openid(openid):
"""
Returns an User with the given openid or
creates a new user and associates openid with that user.
"""
try:
user = User.objects.get(username = openid)
except ObjectDoesNotExist:
password = hashlib.sha256(openid).hexdigest()
user = User(username = openid, email = '', password = password)
user.save()
user.display_name = "%s_%s" % ('user', str(user.id))
user.save()
return user
def generate_random_user_name():
"""
Generates a random user name user_{user_id}_{salt}
to be used for creating new users.
"""
import random
current_users = User.objects.all().order_by('-id')
if current_users:
next_id = current_users[0].id + 1
else:
next_id = 1
random_salt = random.randint(1, 5000)
return 'user_%s_%s' % (str(next_id), str(random_salt))
def create_user_from_openid(request, openid):
"""
Creates a new User object associated with the given
openid.
"""
from django_community.config import OPENID_FIELD_MAPPING
from django_utils.request_helpers import get_ip
username = generate_random_user_name()
profile_attributes = {}
for attribute in OPENID_FIELD_MAPPING.keys():
mapped_attribute = OPENID_FIELD_MAPPING[attribute]
if openid.sreg and openid.sreg.get(attribute, ''):
profile_attributes[mapped_attribute] = openid.sreg.get(attribute, '')
new_user = User(username = username)
new_user.save()
new_openid = UserOpenID(openid = openid.openid, user = new_user)
new_openid.save()
new_user_profile = UserProfile.objects.get_user_profile(new_user)
for filled_attribute in profile_attributes.keys():
setattr(new_user, filled_attribute, profile_attributes[filled_attribute])
new_user_profile.save()
return new_user
def get_anon_user(request):
"""
Returns an anonmymous user corresponding to this IP address if one exists.
Else create an anonymous user and return it.
"""
try:
anon_user = User.objects.get(username = generate_anon_user_name(request))
except ObjectDoesNotExist:
anon_user = create_anon_user(request)
return anon_user
def create_anon_user(request):
"""
Creates a new anonymous user based on the ip provided by the request
object.
"""
anon_user_name = generate_anon_user_name(request)
anon_user = User(username = anon_user_name)
anon_user.save()
user_profile = UserProfile(user = anon_user, display_name = 'anonymous')
user_profile.save()
return anon_user
def generate_anon_user_name(request):
"""
Generate an anonymous user name based on and ip address.
"""
from django_utils.request_helpers import get_ip
ip = get_ip(request)
return "anon_user_%s" % (str(ip))
def is_anon_user(user):
"""
Determine if an user is anonymous or not.
"""
return user.username[0:10] == 'anon_user_'
def is_random(name):
"""
Determine if a user has a randomly generated display name.
"""
if len(name.split('_')) and name.startswith('user'):
return True
else:
return False
def process_ax_data(user, ax_data):
"""
Process OpenID AX data.
"""
import django_openidconsumer.config
emails = ax_data.get(django_openidconsumer.config.URI_GROUPS.get('email').get('type_uri', ''), '')
display_names = ax_data.get(django_openidconsumer.config.URI_GROUPS.get('alias').get('type_uri', ''), '')
if emails and not user.email.strip():
user.email = emails[0]
user.save()
if not user.profile.display_name.strip() or is_random(user.profile.display_name):
if display_names:
user.profile.display_name = display_names[0]
elif emails:
user.profile.display_name = emails[0].split('@')[0]
user.profile.save() | pt | 0.189038 | 2.444992 | 2 |
launch/test_motion.launch.py | RoboJackets/robocup-software | 200 | 13622 | import os
from pathlib import Path
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import IncludeLaunchDescription, SetEnvironmentVariable, Shutdown
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch_ros.actions import Node
def generate_launch_description():
bringup_dir = Path(get_package_share_directory('rj_robocup'))
launch_dir = bringup_dir / 'launch'
stdout_linebuf_envvar = SetEnvironmentVariable(
'RCUTILS_CONSOLE_STDOUT_LINE_BUFFERED', '1')
grsim = Node(package='rj_robocup', executable='grSim', arguments=[])
radio = Node(package='rj_robocup',
executable='sim_radio_node',
output='screen',
on_exit=Shutdown())
control = Node(package='rj_robocup',
executable='control_node',
output='screen',
on_exit=Shutdown())
config_server = Node(package='rj_robocup',
executable='config_server',
output='screen',
on_exit=Shutdown())
vision_receiver_launch_path = str(launch_dir / "vision_receiver.launch.py")
vision_receiver = IncludeLaunchDescription(
PythonLaunchDescriptionSource(vision_receiver_launch_path))
ref_receiver = Node(package='rj_robocup',
executable='internal_referee_node',
output='screen',
on_exit=Shutdown())
vision_filter_launch_path = str(launch_dir / "vision_filter.launch.py")
vision_filter = IncludeLaunchDescription(
PythonLaunchDescriptionSource(vision_filter_launch_path))
return LaunchDescription([
grsim, stdout_linebuf_envvar, config_server, radio, control,
vision_receiver, vision_filter, ref_receiver
])
| import os
from pathlib import Path
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import IncludeLaunchDescription, SetEnvironmentVariable, Shutdown
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch_ros.actions import Node
def generate_launch_description():
bringup_dir = Path(get_package_share_directory('rj_robocup'))
launch_dir = bringup_dir / 'launch'
stdout_linebuf_envvar = SetEnvironmentVariable(
'RCUTILS_CONSOLE_STDOUT_LINE_BUFFERED', '1')
grsim = Node(package='rj_robocup', executable='grSim', arguments=[])
radio = Node(package='rj_robocup',
executable='sim_radio_node',
output='screen',
on_exit=Shutdown())
control = Node(package='rj_robocup',
executable='control_node',
output='screen',
on_exit=Shutdown())
config_server = Node(package='rj_robocup',
executable='config_server',
output='screen',
on_exit=Shutdown())
vision_receiver_launch_path = str(launch_dir / "vision_receiver.launch.py")
vision_receiver = IncludeLaunchDescription(
PythonLaunchDescriptionSource(vision_receiver_launch_path))
ref_receiver = Node(package='rj_robocup',
executable='internal_referee_node',
output='screen',
on_exit=Shutdown())
vision_filter_launch_path = str(launch_dir / "vision_filter.launch.py")
vision_filter = IncludeLaunchDescription(
PythonLaunchDescriptionSource(vision_filter_launch_path))
return LaunchDescription([
grsim, stdout_linebuf_envvar, config_server, radio, control,
vision_receiver, vision_filter, ref_receiver
])
| none | 1 | 2.225633 | 2 |
demo.py | nikp29/eDensiometer | 2 | 13623 | # A Rapid Proof of Concept for the eDensiometer
# Copyright 2018, <NAME>. All Rights Reserved. Created with contributions from <NAME>.
# Imports
from PIL import Image
from pprint import pprint
import numpy as np
import time as time_
def millis(): # from https://stackoverflow.com/questions/5998245/get-current-time-in-milliseconds-in-python/6000198#6000198
return int(round(time_.time() * 1000))
start = millis()
# Constants
# BRIGHT_CUTOFF = 175
RED_CUTOFF = 200
GREEN_CUTOFF = 150
BLUE_CUTOFF = 200
# Pull from test.jpg image in local directory
temp = np.asarray(Image.open('test.jpg'))
print(temp.shape)
# Variable Initialization
result = np.zeros((temp.shape[0], temp.shape[1], temp.shape[2]))
temp_bright = np.zeros((temp.shape[0], temp.shape[1]))
count_total = 0
count_open = 0
# Cycle through image
for row in range(0, temp.shape[0]):
for element in range(0, temp.shape[1]):
count_total += 1
temp_bright[row, element] = (int(temp[row][element][0]) + int(temp[row][element][1]) + int(temp[row][element][2]))/3
# bright = temp_bright[row][element] > BRIGHT_CUTOFF
red_enough = temp[row][element][0] > RED_CUTOFF
green_enough = temp[row][element][1] > GREEN_CUTOFF
blue_enough = temp[row][element][2] > BLUE_CUTOFF
if red_enough and green_enough and blue_enough:
# print(temp[row, element])
count_open += 1
result[row, element] = [255, 255, 255]
# Save filtered image as final.jpg
final = Image.fromarray(result.astype('uint8'), 'RGB')
final.save('final.jpg')
# Return/Print Percent Coverage
percent_open = count_open/count_total
percent_cover = 1 - percent_open
end = millis()
print("Percent Open: " + str(percent_open))
print("Percent Cover: " + str(percent_cover))
runtime = end-start
print("Runtime in MS: " + str(runtime)) | # A Rapid Proof of Concept for the eDensiometer
# Copyright 2018, <NAME>. All Rights Reserved. Created with contributions from <NAME>.
# Imports
from PIL import Image
from pprint import pprint
import numpy as np
import time as time_
def millis(): # from https://stackoverflow.com/questions/5998245/get-current-time-in-milliseconds-in-python/6000198#6000198
return int(round(time_.time() * 1000))
start = millis()
# Constants
# BRIGHT_CUTOFF = 175
RED_CUTOFF = 200
GREEN_CUTOFF = 150
BLUE_CUTOFF = 200
# Pull from test.jpg image in local directory
temp = np.asarray(Image.open('test.jpg'))
print(temp.shape)
# Variable Initialization
result = np.zeros((temp.shape[0], temp.shape[1], temp.shape[2]))
temp_bright = np.zeros((temp.shape[0], temp.shape[1]))
count_total = 0
count_open = 0
# Cycle through image
for row in range(0, temp.shape[0]):
for element in range(0, temp.shape[1]):
count_total += 1
temp_bright[row, element] = (int(temp[row][element][0]) + int(temp[row][element][1]) + int(temp[row][element][2]))/3
# bright = temp_bright[row][element] > BRIGHT_CUTOFF
red_enough = temp[row][element][0] > RED_CUTOFF
green_enough = temp[row][element][1] > GREEN_CUTOFF
blue_enough = temp[row][element][2] > BLUE_CUTOFF
if red_enough and green_enough and blue_enough:
# print(temp[row, element])
count_open += 1
result[row, element] = [255, 255, 255]
# Save filtered image as final.jpg
final = Image.fromarray(result.astype('uint8'), 'RGB')
final.save('final.jpg')
# Return/Print Percent Coverage
percent_open = count_open/count_total
percent_cover = 1 - percent_open
end = millis()
print("Percent Open: " + str(percent_open))
print("Percent Cover: " + str(percent_cover))
runtime = end-start
print("Runtime in MS: " + str(runtime)) | pt | 0.149441 | 2.750566 | 3 |
chart/script/provenance_ycsb_thruput.py | RUAN0007/nusthesis | 0 | 13624 | <reponame>RUAN0007/nusthesis
import sys
import os
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import matplotlib as mpl
import config
def main():
if 1 < len(sys.argv) :
diagram_path = sys.argv[1]
else:
diagram_path = ""
curDir = os.path.dirname(os.path.realpath(__file__))
data_path = os.path.join(curDir, "data", "provenance", "ycsb_thruput")
x_axis, series_names, series = config.parse(data_path)
# print x_axis
# print series_names
# print series
blk_sizes = x_axis
xlabels = [str(int(x)/100) for x in blk_sizes]
series_count = len(series_names)
width, offsets = config.compute_width_offsets(series_count)
f, (ax) = plt.subplots()
# # f.set_size_inches(, 4)
for i, series_name in enumerate(series_names):
series_data = series[series_name]
series_offsets = [offsets[i]] * len(series_data)
base_xticks = range(len(series_data))
xticks = config.sum_list(base_xticks, series_offsets)
# print xticks
# print series_name
# print series_data
ax.bar(xticks, series_data, width=width, color=config.colors[series_name], edgecolor='black',align='center', label=series_name)
# ax.set_title("Throughput")
ax.set(xlabel=r'# of txns per block (x100)', ylabel='tps')
ax.set_xticks(base_xticks)
ax.set_xticklabels(xlabels)
ax.set_ylim([0, 2500])
handles, labels = ax.get_legend_handles_labels()
f.legend(handles, labels,
loc='upper center', ncol=1, bbox_to_anchor=(0.47, 0.90),
columnspacing=1, handletextpad=1, fontsize=20)
if diagram_path == "":
plt.tight_layout()
plt.show()
else:
f.tight_layout()
f.savefig(diagram_path, bbox_inches='tight')
if __name__ == "__main__":
sys.exit(main()) | import sys
import os
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import matplotlib as mpl
import config
def main():
if 1 < len(sys.argv) :
diagram_path = sys.argv[1]
else:
diagram_path = ""
curDir = os.path.dirname(os.path.realpath(__file__))
data_path = os.path.join(curDir, "data", "provenance", "ycsb_thruput")
x_axis, series_names, series = config.parse(data_path)
# print x_axis
# print series_names
# print series
blk_sizes = x_axis
xlabels = [str(int(x)/100) for x in blk_sizes]
series_count = len(series_names)
width, offsets = config.compute_width_offsets(series_count)
f, (ax) = plt.subplots()
# # f.set_size_inches(, 4)
for i, series_name in enumerate(series_names):
series_data = series[series_name]
series_offsets = [offsets[i]] * len(series_data)
base_xticks = range(len(series_data))
xticks = config.sum_list(base_xticks, series_offsets)
# print xticks
# print series_name
# print series_data
ax.bar(xticks, series_data, width=width, color=config.colors[series_name], edgecolor='black',align='center', label=series_name)
# ax.set_title("Throughput")
ax.set(xlabel=r'# of txns per block (x100)', ylabel='tps')
ax.set_xticks(base_xticks)
ax.set_xticklabels(xlabels)
ax.set_ylim([0, 2500])
handles, labels = ax.get_legend_handles_labels()
f.legend(handles, labels,
loc='upper center', ncol=1, bbox_to_anchor=(0.47, 0.90),
columnspacing=1, handletextpad=1, fontsize=20)
if diagram_path == "":
plt.tight_layout()
plt.show()
else:
f.tight_layout()
f.savefig(diagram_path, bbox_inches='tight')
if __name__ == "__main__":
sys.exit(main()) | it | 0.12075 | 2.477667 | 2 |
core/handlers/filters_chat.py | Smashulica/nebula8 | 0 | 13625 | from core.utilities.functions import delete_message
from core.utilities.message import message
from core.database.repository.group import GroupRepository
"""
This function allows you to terminate the type
of file that contains a message on telegram and filter it
"""
def init(update, context):
apk = 'application/vnd.android.package-archive'
doc = 'application/msword'
docx = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
exe = 'application/x-ms-dos-executable'
gif = 'video/mp4'
jpg = 'image/jpeg'
mp3 = 'audio/mpeg'
pdf = 'application/pdf'
py = 'text/x-python'
svg = 'image/svg+xml'
txt = 'text/plain'
targz = 'application/x-compressed-tar'
wav = 'audio/x-wav'
xml = 'application/xml'
filezip = 'application/zip'
msg = update.effective_message
chat = update.effective_message.chat_id
group = GroupRepository().getById(chat)
if msg.document is not None:
#No APK Allowed
if msg.document.mime_type == apk and group['apk_filter'] == 1:
delete_message(update,context)
message(update, context, "#Automatic Filter Handler: <b>No APK Allowed!</b>")
#No DOC/DOCX Allowed
if msg.document.mime_type == doc or msg.document.mime_type == docx and group['docx_filter'] == 1:
delete_message(update,context)
message(update, context, "#Automatic Filter Handler: <b>No DOC/DOCX Allowed!</b>")
#No EXE Allowed
if msg.document.mime_type == exe and group['exe_filter'] == 1:
delete_message(update,context)
message(update, context, "#Automatic Filter Handler: <b>No EXE Allowed!</b>")
#No GIF Allowed
if msg.document.mime_type == gif and group['gif_filter'] == 1:
delete_message(update,context)
message(update, context, "#Automatic Filter Handler: <b>No GIF Allowed!</b>")
#No JPG Allowed
if msg.document.mime_type == jpg and group['jpg_filter'] == 1:
delete_message(update,context)
message(update, context, "#Automatic Filter Handler: <b>No JPG Allowed!</b>")
#No TARGZ Allowed
if msg.document.mime_type == targz and group['targz_filter'] == 1:
delete_message(update,context)
message(update, context, "#Automatic Filter Handler: <b>No TARGZ Allowed!</b>")
#No ZIP Allowed
if msg.document.mime_type == filezip and group['zip_filter'] == 1:
delete_message(update,context)
message(update, context, "#Automatic Filter Handler: <b>No ZIP Allowed!</b>")
if msg.document.mime_type == wav:
print("NO WAV ALLOWED")
if msg.document.mime_type == xml:
print("NO XML ALLOWED")
if msg.document.mime_type == mp3:
print("NO MP3 ALLOWED")
if msg.document.mime_type == pdf:
print("NO PDF ALLOWED")
if msg.document.mime_type == py:
print("NO PY ALLOWED")
if msg.document.mime_type == svg:
print("NO SVG ALLOWED")
if msg.document.mime_type == txt:
print("NO TXT ALLOWED") | from core.utilities.functions import delete_message
from core.utilities.message import message
from core.database.repository.group import GroupRepository
"""
This function allows you to terminate the type
of file that contains a message on telegram and filter it
"""
def init(update, context):
apk = 'application/vnd.android.package-archive'
doc = 'application/msword'
docx = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
exe = 'application/x-ms-dos-executable'
gif = 'video/mp4'
jpg = 'image/jpeg'
mp3 = 'audio/mpeg'
pdf = 'application/pdf'
py = 'text/x-python'
svg = 'image/svg+xml'
txt = 'text/plain'
targz = 'application/x-compressed-tar'
wav = 'audio/x-wav'
xml = 'application/xml'
filezip = 'application/zip'
msg = update.effective_message
chat = update.effective_message.chat_id
group = GroupRepository().getById(chat)
if msg.document is not None:
#No APK Allowed
if msg.document.mime_type == apk and group['apk_filter'] == 1:
delete_message(update,context)
message(update, context, "#Automatic Filter Handler: <b>No APK Allowed!</b>")
#No DOC/DOCX Allowed
if msg.document.mime_type == doc or msg.document.mime_type == docx and group['docx_filter'] == 1:
delete_message(update,context)
message(update, context, "#Automatic Filter Handler: <b>No DOC/DOCX Allowed!</b>")
#No EXE Allowed
if msg.document.mime_type == exe and group['exe_filter'] == 1:
delete_message(update,context)
message(update, context, "#Automatic Filter Handler: <b>No EXE Allowed!</b>")
#No GIF Allowed
if msg.document.mime_type == gif and group['gif_filter'] == 1:
delete_message(update,context)
message(update, context, "#Automatic Filter Handler: <b>No GIF Allowed!</b>")
#No JPG Allowed
if msg.document.mime_type == jpg and group['jpg_filter'] == 1:
delete_message(update,context)
message(update, context, "#Automatic Filter Handler: <b>No JPG Allowed!</b>")
#No TARGZ Allowed
if msg.document.mime_type == targz and group['targz_filter'] == 1:
delete_message(update,context)
message(update, context, "#Automatic Filter Handler: <b>No TARGZ Allowed!</b>")
#No ZIP Allowed
if msg.document.mime_type == filezip and group['zip_filter'] == 1:
delete_message(update,context)
message(update, context, "#Automatic Filter Handler: <b>No ZIP Allowed!</b>")
if msg.document.mime_type == wav:
print("NO WAV ALLOWED")
if msg.document.mime_type == xml:
print("NO XML ALLOWED")
if msg.document.mime_type == mp3:
print("NO MP3 ALLOWED")
if msg.document.mime_type == pdf:
print("NO PDF ALLOWED")
if msg.document.mime_type == py:
print("NO PY ALLOWED")
if msg.document.mime_type == svg:
print("NO SVG ALLOWED")
if msg.document.mime_type == txt:
print("NO TXT ALLOWED") | pt | 0.306846 | 2.730032 | 3 |
mipsplusplus/parser.py | alexsocha/mipsplusplus | 1 | 13626 | <reponame>alexsocha/mipsplusplus
from mipsplusplus import utils
from mipsplusplus import operations
OPERATOR_ORDERING = [
['addressof', 'not', 'neg'],
['*', '/', '%'],
['+', '-'],
['<<', '>>', '<<<', '>>>'],
['<', '>', '<=', '>='],
['==', '!='],
['and', 'or', 'xor', 'nor'],
['as']
]
EXPR_OPERATORS = set([op for ops in OPERATOR_ORDERING for op in ops] + ['(', ')'])
def splitExpression(expression):
squareBracketDepth = 0
isSingleQuote = False
isDoubleQuote = False
funcBracketDepth = 0
# Split expression on whitespace or single operators,
# given it isn't in single quotes, double quotes, square brackets,
# or within a a function such as alloc(...)
tokenList = ['']
tokenIdx = 0
i = 0
while i < len(expression):
char = expression[i]
if char == '\'': isSingleQuote = not isSingleQuote
if char == '"': isDoubleQuote = not isDoubleQuote
if isSingleQuote == False and isDoubleQuote == False:
if funcBracketDepth == 0:
if char == '[': squareBracketDepth += 1
elif char == ']': squareBracketDepth -= 1
elif char == '(':
isSysFunc = False
for func in utils.SYS_FUNCTIONS:
if tokenList[tokenIdx] == func: isSysFunc = True
if isSysFunc: funcBracketDepth += 1
if funcBracketDepth == 0 and squareBracketDepth == 0:
nextOperator = None
for op in EXPR_OPERATORS:
spacedOp = ' {} '.format(op) if op.isalnum() else op
if expression[i:].startswith(spacedOp):
if nextOperator is None or len(spacedOp) > len(nextOperator):
nextOperator = spacedOp
if char.isspace() or nextOperator is not None:
if tokenList[tokenIdx] != '':
tokenList += ['']
tokenIdx += 1
if nextOperator is not None:
tokenList[tokenIdx] += nextOperator.strip()
tokenList += ['']
tokenIdx += 1
i += len(nextOperator)-1
i += 1
while i < len(expression):
if not expression[i].isspace(): break
else: i += 1
continue
else:
if char == '(': funcBracketDepth += 1
elif char == ')': funcBracketDepth -= 1
tokenList[tokenIdx] += char
i += 1
if len(tokenList) > 0 and tokenList[-1] == '':
tokenList = tokenList[:-1]
# Convert minus sign to negative e.g. ['+', '-', '8'] => ['+', '-8']
newTokenList = []
tokenIdx = 0
while tokenIdx < len(tokenList):
if tokenList[tokenIdx] == '-' and tokenIdx < len(tokenList)-1:
if tokenIdx == 0 or tokenList[tokenIdx-1] in EXPR_OPERATORS:
newTokenList.append('-' + tokenList[tokenIdx+1])
tokenIdx += 2
continue
newTokenList.append(tokenList[tokenIdx])
tokenIdx += 1
return newTokenList
def infixToPostfix(tokenList, getToken = lambda item: item):
# Get priorities from ordering
priorities = {}
for (level, ops) in enumerate(OPERATOR_ORDERING):
priorities = {**priorities, **{op: len(OPERATOR_ORDERING)-level for op in ops}}
# Convert expression to reverse polish (postfix) notation
stack = []
output = []
for item in tokenList:
token = getToken(item)
if token not in EXPR_OPERATORS:
output.append(item)
elif token == '(':
stack.append(item)
elif token == ')':
while stack and getToken(stack[-1]) != '(':
output.append(stack.pop())
stack.pop()
else:
while stack and getToken(stack[-1]) != '(' and priorities[token] <= priorities[getToken(stack[-1])]:
output.append(stack.pop())
stack.append(item)
while stack: output.append(stack.pop())
return output
def isInBrackets(string, idx, b1='(', b2=')'):
bracketTeir = 0
for i, ch in enumerate(string):
if ch == b1: bracketTeir += 1
if ch == b2: bracketTeir -= 1
if i == idx: return bracketTeir > 0
def isTopLevel(string, idx):
if isInBrackets(string, idx, '(', ')'): return False
if isInBrackets(string, idx, '[', ']'): return False
isSingleQuote = False
isDoubleQuote = False
for i, ch in enumerate(string):
if ch == '\'': isSingleQuote = not isSingleQuote
if ch == '"': isDoubleQuote = not isDoubleQuote
if i == idx: return not isSingleQuote and not isDoubleQuote
| from mipsplusplus import utils
from mipsplusplus import operations
OPERATOR_ORDERING = [
['addressof', 'not', 'neg'],
['*', '/', '%'],
['+', '-'],
['<<', '>>', '<<<', '>>>'],
['<', '>', '<=', '>='],
['==', '!='],
['and', 'or', 'xor', 'nor'],
['as']
]
EXPR_OPERATORS = set([op for ops in OPERATOR_ORDERING for op in ops] + ['(', ')'])
def splitExpression(expression):
squareBracketDepth = 0
isSingleQuote = False
isDoubleQuote = False
funcBracketDepth = 0
# Split expression on whitespace or single operators,
# given it isn't in single quotes, double quotes, square brackets,
# or within a a function such as alloc(...)
tokenList = ['']
tokenIdx = 0
i = 0
while i < len(expression):
char = expression[i]
if char == '\'': isSingleQuote = not isSingleQuote
if char == '"': isDoubleQuote = not isDoubleQuote
if isSingleQuote == False and isDoubleQuote == False:
if funcBracketDepth == 0:
if char == '[': squareBracketDepth += 1
elif char == ']': squareBracketDepth -= 1
elif char == '(':
isSysFunc = False
for func in utils.SYS_FUNCTIONS:
if tokenList[tokenIdx] == func: isSysFunc = True
if isSysFunc: funcBracketDepth += 1
if funcBracketDepth == 0 and squareBracketDepth == 0:
nextOperator = None
for op in EXPR_OPERATORS:
spacedOp = ' {} '.format(op) if op.isalnum() else op
if expression[i:].startswith(spacedOp):
if nextOperator is None or len(spacedOp) > len(nextOperator):
nextOperator = spacedOp
if char.isspace() or nextOperator is not None:
if tokenList[tokenIdx] != '':
tokenList += ['']
tokenIdx += 1
if nextOperator is not None:
tokenList[tokenIdx] += nextOperator.strip()
tokenList += ['']
tokenIdx += 1
i += len(nextOperator)-1
i += 1
while i < len(expression):
if not expression[i].isspace(): break
else: i += 1
continue
else:
if char == '(': funcBracketDepth += 1
elif char == ')': funcBracketDepth -= 1
tokenList[tokenIdx] += char
i += 1
if len(tokenList) > 0 and tokenList[-1] == '':
tokenList = tokenList[:-1]
# Convert minus sign to negative e.g. ['+', '-', '8'] => ['+', '-8']
newTokenList = []
tokenIdx = 0
while tokenIdx < len(tokenList):
if tokenList[tokenIdx] == '-' and tokenIdx < len(tokenList)-1:
if tokenIdx == 0 or tokenList[tokenIdx-1] in EXPR_OPERATORS:
newTokenList.append('-' + tokenList[tokenIdx+1])
tokenIdx += 2
continue
newTokenList.append(tokenList[tokenIdx])
tokenIdx += 1
return newTokenList
def infixToPostfix(tokenList, getToken = lambda item: item):
# Get priorities from ordering
priorities = {}
for (level, ops) in enumerate(OPERATOR_ORDERING):
priorities = {**priorities, **{op: len(OPERATOR_ORDERING)-level for op in ops}}
# Convert expression to reverse polish (postfix) notation
stack = []
output = []
for item in tokenList:
token = getToken(item)
if token not in EXPR_OPERATORS:
output.append(item)
elif token == '(':
stack.append(item)
elif token == ')':
while stack and getToken(stack[-1]) != '(':
output.append(stack.pop())
stack.pop()
else:
while stack and getToken(stack[-1]) != '(' and priorities[token] <= priorities[getToken(stack[-1])]:
output.append(stack.pop())
stack.append(item)
while stack: output.append(stack.pop())
return output
def isInBrackets(string, idx, b1='(', b2=')'):
bracketTeir = 0
for i, ch in enumerate(string):
if ch == b1: bracketTeir += 1
if ch == b2: bracketTeir -= 1
if i == idx: return bracketTeir > 0
def isTopLevel(string, idx):
if isInBrackets(string, idx, '(', ')'): return False
if isInBrackets(string, idx, '[', ']'): return False
isSingleQuote = False
isDoubleQuote = False
for i, ch in enumerate(string):
if ch == '\'': isSingleQuote = not isSingleQuote
if ch == '"': isDoubleQuote = not isDoubleQuote
if i == idx: return not isSingleQuote and not isDoubleQuote | pt | 0.127046 | 2.824782 | 3 |
learn-to-code-with-python/10-Lists-Iteration/iterate-in-reverse-with-the-reversed-function.py | MaciejZurek/python_practicing | 0 | 13627 | the_simpsons = ["Homer", "Marge", "Bart", "Lisa", "Maggie"]
print(the_simpsons[::-1])
for char in the_simpsons[::-1]:
print(f"{char} has a total of {len(char)} characters.")
print(reversed(the_simpsons))
print(type(reversed(the_simpsons))) # generator object
for char in reversed(the_simpsons): # laduje za kazda iteracja jeden element listy, a nie cala liste od razu, dobre przy duzych listach
print(f"{char} has a total of {len(char)} characters.")
| the_simpsons = ["Homer", "Marge", "Bart", "Lisa", "Maggie"]
print(the_simpsons[::-1])
for char in the_simpsons[::-1]:
print(f"{char} has a total of {len(char)} characters.")
print(reversed(the_simpsons))
print(type(reversed(the_simpsons))) # generator object
for char in reversed(the_simpsons): # laduje za kazda iteracja jeden element listy, a nie cala liste od razu, dobre przy duzych listach
print(f"{char} has a total of {len(char)} characters.")
| it | 0.166832 | 4.275119 | 4 |
ghiaseddin/scripts/download-dataset-lfw10.py | yassersouri/ghiaseddin | 44 | 13628 | <filename>ghiaseddin/scripts/download-dataset-lfw10.py<gh_stars>10-100
from subprocess import call
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
import settings
data_zip_path = os.path.join(settings.lfw10_root, "LFW10.zip")
data_url = "http://cvit.iiit.ac.in/images/Projects/relativeParts/LFW10.zip"
# Downloading the data zip and extracting it
call(["wget",
"--continue", # do not download things again
"--tries=0", # try many times to finish the download
"--output-document=%s" % data_zip_path, # save it to the appropriate place
data_url])
call(["unzip -d %s %s" % (settings.lfw10_root, data_zip_path)], shell=True)
| <filename>ghiaseddin/scripts/download-dataset-lfw10.py<gh_stars>10-100
from subprocess import call
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
import settings
data_zip_path = os.path.join(settings.lfw10_root, "LFW10.zip")
data_url = "http://cvit.iiit.ac.in/images/Projects/relativeParts/LFW10.zip"
# Downloading the data zip and extracting it
call(["wget",
"--continue", # do not download things again
"--tries=0", # try many times to finish the download
"--output-document=%s" % data_zip_path, # save it to the appropriate place
data_url])
call(["unzip -d %s %s" % (settings.lfw10_root, data_zip_path)], shell=True)
| pt | 0.114578 | 2.488881 | 2 |
hops/dist_allreduce.py | Limmen/hops-util-py | 0 | 13629 | """
Utility functions to retrieve information about available services and setting up security for the Hops platform.
These utils facilitates development by hiding complexity for programs interacting with Hops services.
"""
import pydoop.hdfs
import subprocess
import os
import stat
import sys
import threading
import time
import socket
from hops import hdfs as hopshdfs
from hops import tensorboard
from hops import devices
from hops import util
import coordination_server
run_id = 0
def launch(spark_session, notebook):
""" Run notebook pointed to in HopsFS as a python file in mpirun
Args:
:spark_session: SparkSession object
:notebook: The path in HopsFS to the notebook
"""
global run_id
print('\nStarting TensorFlow job, follow your progress on TensorBoard in Jupyter UI! \n')
sys.stdout.flush()
sc = spark_session.sparkContext
app_id = str(sc.applicationId)
conf_num = int(sc._conf.get("spark.executor.instances"))
#Each TF task should be run on 1 executor
nodeRDD = sc.parallelize(range(conf_num), conf_num)
server = coordination_server.Server(conf_num)
server_addr = server.start()
#Force execution on executor, since GPU is located on executor
nodeRDD.foreachPartition(prepare_func(app_id, run_id, notebook, server_addr))
print('Finished TensorFlow job \n')
print('Make sure to check /Logs/TensorFlow/' + app_id + '/runId.' + str(run_id) + ' for logfile and TensorBoard logdir')
def get_logdir(app_id):
global run_id
return hopshdfs.project_path() + '/Logs/TensorFlow/' + app_id + '/horovod/run.' + str(run_id)
def prepare_func(app_id, run_id, nb_path, server_addr):
def _wrapper_fun(iter):
for i in iter:
executor_num = i
client = coordination_server.Client(server_addr)
node_meta = {'host': get_ip_address(),
'executor_cwd': os.getcwd(),
'cuda_visible_devices_ordinals': devices.get_minor_gpu_device_numbers()}
client.register(node_meta)
t_gpus = threading.Thread(target=devices.print_periodic_gpu_utilization)
if devices.get_num_gpus() > 0:
t_gpus.start()
# Only spark executor with index 0 should create necessary HDFS directories and start mpirun
# Other executors simply block until index 0 reports mpirun is finished
clusterspec = client.await_reservations()
#pydoop.hdfs.dump('', os.environ['EXEC_LOGFILE'], user=hopshdfs.project_user())
#hopshdfs.init_logger()
#hopshdfs.log('Starting Spark executor with arguments')
gpu_str = '\n\nChecking for GPUs in the environment\n' + devices.get_gpu_info()
#hopshdfs.log(gpu_str)
print(gpu_str)
mpi_logfile_path = os.getcwd() + '/mpirun.log'
if os.path.exists(mpi_logfile_path):
os.remove(mpi_logfile_path)
mpi_logfile = open(mpi_logfile_path, 'w')
py_runnable = localize_scripts(nb_path, clusterspec)
# non-chief executor should not do mpirun
if not executor_num == 0:
client.await_mpirun_finished()
else:
hdfs_exec_logdir, hdfs_appid_logdir = hopshdfs.create_directories(app_id, run_id, param_string='Horovod')
tb_hdfs_path, tb_pid = tensorboard.register(hdfs_exec_logdir, hdfs_appid_logdir, 0)
mpi_cmd = 'HOROVOD_TIMELINE=' + tensorboard.logdir() + '/timeline.json' + \
' TENSORBOARD_LOGDIR=' + tensorboard.logdir() + \
' mpirun -np ' + str(get_num_ps(clusterspec)) + ' --hostfile ' + get_hosts_file(clusterspec) + \
' -bind-to none -map-by slot ' + \
' -x LD_LIBRARY_PATH ' + \
' -x HOROVOD_TIMELINE ' + \
' -x TENSORBOARD_LOGDIR ' + \
' -x NCCL_DEBUG=INFO ' + \
' -mca pml ob1 -mca btl ^openib ' + \
os.environ['PYSPARK_PYTHON'] + ' ' + py_runnable
mpi = subprocess.Popen(mpi_cmd,
shell=True,
stdout=mpi_logfile,
stderr=mpi_logfile,
preexec_fn=util.on_executor_exit('SIGTERM'))
t_log = threading.Thread(target=print_log)
t_log.start()
mpi.wait()
client.register_mpirun_finished()
if devices.get_num_gpus() > 0:
t_gpus.do_run = False
t_gpus.join()
return_code = mpi.returncode
if return_code != 0:
cleanup(tb_hdfs_path)
t_log.do_run = False
t_log.join()
raise Exception('mpirun FAILED, look in the logs for the error')
cleanup(tb_hdfs_path)
t_log.do_run = False
t_log.join()
return _wrapper_fun
def print_log():
mpi_logfile_path = os.getcwd() + '/mpirun.log'
mpi_logfile = open(mpi_logfile_path, 'r')
t = threading.currentThread()
while getattr(t, "do_run", True):
where = mpi_logfile.tell()
line = mpi_logfile.readline()
if not line:
time.sleep(1)
mpi_logfile.seek(where)
else:
print line
# Get the last outputs
line = mpi_logfile.readline()
while line:
where = mpi_logfile.tell()
print line
line = mpi_logfile.readline()
mpi_logfile.seek(where)
def cleanup(tb_hdfs_path):
hopshdfs.log('Performing cleanup')
handle = hopshdfs.get()
if not tb_hdfs_path == None and not tb_hdfs_path == '' and handle.exists(tb_hdfs_path):
handle.delete(tb_hdfs_path)
hopshdfs.kill_logger()
def get_ip_address():
"""Simple utility to get host IP address"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def get_hosts_string(clusterspec):
hosts_string = ''
for host in clusterspec:
hosts_string = hosts_string + ' ' + host['host'] + ':' + str(len(host['cuda_visible_devices_ordinals']))
def get_num_ps(clusterspec):
num = 0
for host in clusterspec:
num += len(host['cuda_visible_devices_ordinals'])
return num
def get_hosts_file(clusterspec):
hf = ''
host_file = os.getcwd() + '/host_file'
for host in clusterspec:
hf = hf + '\n' + host['host'] + ' ' + 'slots=' + str(len(host['cuda_visible_devices_ordinals']))
with open(host_file, 'w') as hostfile: hostfile.write(hf)
return host_file
def find_host_in_clusterspec(clusterspec, host):
for h in clusterspec:
if h['name'] == host:
return h
# The code generated by this function will be called in an eval, which changes the working_dir and cuda_visible_devices for process running mpirun
def generate_environment_script(clusterspec):
import_script = 'import os \n' \
'from hops import util'
export_script = ''
for host in clusterspec:
export_script += 'def export_workdir():\n' \
' if util.get_ip_address() == \"' + find_host_in_clusterspec(clusterspec, host['host'])['host'] + '\":\n' \
' os.chdir=\"' + host['executor_cwd'] + '\"\n' \
' os.environ["CUDA_DEVICE_ORDER"]=\"PCI_BUS_ID\" \n' \
' os.environ["CUDA_VISIBLE_DEVICES"]=\"' + ",".join(str(x) for x in host['cuda_visible_devices_ordinals']) + '\"\n'
return import_script + '\n' + export_script
def localize_scripts(nb_path, clusterspec):
# 1. Download the notebook as a string
fs_handle = hopshdfs.get_fs()
fd = fs_handle.open_file(nb_path, flags='r')
note = fd.read()
fd.close()
path, filename = os.path.split(nb_path)
f_nb = open(filename,"w+")
f_nb.write(note)
f_nb.flush()
f_nb.close()
# 2. Convert notebook to py file
jupyter_runnable = os.path.abspath(os.path.join(os.environ['PYSPARK_PYTHON'], os.pardir)) + '/jupyter'
conversion_cmd = jupyter_runnable + ' nbconvert --to python ' + filename
conversion = subprocess.Popen(conversion_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
conversion.wait()
stdout, stderr = conversion.communicate()
print(stdout)
print(stderr)
# 3. Prepend script to export environment variables and Make py file runnable
py_runnable = os.getcwd() + '/' + filename.split('.')[0] + '.py'
notebook = 'with open("generate_env.py", "r") as myfile:\n' \
' data=myfile.read()\n' \
' exec(data)\n'
with open(py_runnable, 'r') as original: data = original.read()
with open(py_runnable, 'w') as modified: modified.write(notebook + data)
st = os.stat(py_runnable)
os.chmod(py_runnable, st.st_mode | stat.S_IEXEC)
# 4. Localize generate_env.py script
environment_script = generate_environment_script(clusterspec)
generate_env_path = os.getcwd() + '/generate_env.py'
f_env = open(generate_env_path, "w+")
f_env.write(environment_script)
f_env.flush()
f_env.close()
# 5. Make generate_env.py runnable
st = os.stat(generate_env_path)
os.chmod(py_runnable, st.st_mode | stat.S_IEXEC)
return py_runnable | """
Utility functions to retrieve information about available services and setting up security for the Hops platform.
These utils facilitates development by hiding complexity for programs interacting with Hops services.
"""
import pydoop.hdfs
import subprocess
import os
import stat
import sys
import threading
import time
import socket
from hops import hdfs as hopshdfs
from hops import tensorboard
from hops import devices
from hops import util
import coordination_server
run_id = 0
def launch(spark_session, notebook):
""" Run notebook pointed to in HopsFS as a python file in mpirun
Args:
:spark_session: SparkSession object
:notebook: The path in HopsFS to the notebook
"""
global run_id
print('\nStarting TensorFlow job, follow your progress on TensorBoard in Jupyter UI! \n')
sys.stdout.flush()
sc = spark_session.sparkContext
app_id = str(sc.applicationId)
conf_num = int(sc._conf.get("spark.executor.instances"))
#Each TF task should be run on 1 executor
nodeRDD = sc.parallelize(range(conf_num), conf_num)
server = coordination_server.Server(conf_num)
server_addr = server.start()
#Force execution on executor, since GPU is located on executor
nodeRDD.foreachPartition(prepare_func(app_id, run_id, notebook, server_addr))
print('Finished TensorFlow job \n')
print('Make sure to check /Logs/TensorFlow/' + app_id + '/runId.' + str(run_id) + ' for logfile and TensorBoard logdir')
def get_logdir(app_id):
global run_id
return hopshdfs.project_path() + '/Logs/TensorFlow/' + app_id + '/horovod/run.' + str(run_id)
def prepare_func(app_id, run_id, nb_path, server_addr):
def _wrapper_fun(iter):
for i in iter:
executor_num = i
client = coordination_server.Client(server_addr)
node_meta = {'host': get_ip_address(),
'executor_cwd': os.getcwd(),
'cuda_visible_devices_ordinals': devices.get_minor_gpu_device_numbers()}
client.register(node_meta)
t_gpus = threading.Thread(target=devices.print_periodic_gpu_utilization)
if devices.get_num_gpus() > 0:
t_gpus.start()
# Only spark executor with index 0 should create necessary HDFS directories and start mpirun
# Other executors simply block until index 0 reports mpirun is finished
clusterspec = client.await_reservations()
#pydoop.hdfs.dump('', os.environ['EXEC_LOGFILE'], user=hopshdfs.project_user())
#hopshdfs.init_logger()
#hopshdfs.log('Starting Spark executor with arguments')
gpu_str = '\n\nChecking for GPUs in the environment\n' + devices.get_gpu_info()
#hopshdfs.log(gpu_str)
print(gpu_str)
mpi_logfile_path = os.getcwd() + '/mpirun.log'
if os.path.exists(mpi_logfile_path):
os.remove(mpi_logfile_path)
mpi_logfile = open(mpi_logfile_path, 'w')
py_runnable = localize_scripts(nb_path, clusterspec)
# non-chief executor should not do mpirun
if not executor_num == 0:
client.await_mpirun_finished()
else:
hdfs_exec_logdir, hdfs_appid_logdir = hopshdfs.create_directories(app_id, run_id, param_string='Horovod')
tb_hdfs_path, tb_pid = tensorboard.register(hdfs_exec_logdir, hdfs_appid_logdir, 0)
mpi_cmd = 'HOROVOD_TIMELINE=' + tensorboard.logdir() + '/timeline.json' + \
' TENSORBOARD_LOGDIR=' + tensorboard.logdir() + \
' mpirun -np ' + str(get_num_ps(clusterspec)) + ' --hostfile ' + get_hosts_file(clusterspec) + \
' -bind-to none -map-by slot ' + \
' -x LD_LIBRARY_PATH ' + \
' -x HOROVOD_TIMELINE ' + \
' -x TENSORBOARD_LOGDIR ' + \
' -x NCCL_DEBUG=INFO ' + \
' -mca pml ob1 -mca btl ^openib ' + \
os.environ['PYSPARK_PYTHON'] + ' ' + py_runnable
mpi = subprocess.Popen(mpi_cmd,
shell=True,
stdout=mpi_logfile,
stderr=mpi_logfile,
preexec_fn=util.on_executor_exit('SIGTERM'))
t_log = threading.Thread(target=print_log)
t_log.start()
mpi.wait()
client.register_mpirun_finished()
if devices.get_num_gpus() > 0:
t_gpus.do_run = False
t_gpus.join()
return_code = mpi.returncode
if return_code != 0:
cleanup(tb_hdfs_path)
t_log.do_run = False
t_log.join()
raise Exception('mpirun FAILED, look in the logs for the error')
cleanup(tb_hdfs_path)
t_log.do_run = False
t_log.join()
return _wrapper_fun
def print_log():
mpi_logfile_path = os.getcwd() + '/mpirun.log'
mpi_logfile = open(mpi_logfile_path, 'r')
t = threading.currentThread()
while getattr(t, "do_run", True):
where = mpi_logfile.tell()
line = mpi_logfile.readline()
if not line:
time.sleep(1)
mpi_logfile.seek(where)
else:
print line
# Get the last outputs
line = mpi_logfile.readline()
while line:
where = mpi_logfile.tell()
print line
line = mpi_logfile.readline()
mpi_logfile.seek(where)
def cleanup(tb_hdfs_path):
hopshdfs.log('Performing cleanup')
handle = hopshdfs.get()
if not tb_hdfs_path == None and not tb_hdfs_path == '' and handle.exists(tb_hdfs_path):
handle.delete(tb_hdfs_path)
hopshdfs.kill_logger()
def get_ip_address():
"""Simple utility to get host IP address"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def get_hosts_string(clusterspec):
hosts_string = ''
for host in clusterspec:
hosts_string = hosts_string + ' ' + host['host'] + ':' + str(len(host['cuda_visible_devices_ordinals']))
def get_num_ps(clusterspec):
num = 0
for host in clusterspec:
num += len(host['cuda_visible_devices_ordinals'])
return num
def get_hosts_file(clusterspec):
hf = ''
host_file = os.getcwd() + '/host_file'
for host in clusterspec:
hf = hf + '\n' + host['host'] + ' ' + 'slots=' + str(len(host['cuda_visible_devices_ordinals']))
with open(host_file, 'w') as hostfile: hostfile.write(hf)
return host_file
def find_host_in_clusterspec(clusterspec, host):
for h in clusterspec:
if h['name'] == host:
return h
# The code generated by this function will be called in an eval, which changes the working_dir and cuda_visible_devices for process running mpirun
def generate_environment_script(clusterspec):
import_script = 'import os \n' \
'from hops import util'
export_script = ''
for host in clusterspec:
export_script += 'def export_workdir():\n' \
' if util.get_ip_address() == \"' + find_host_in_clusterspec(clusterspec, host['host'])['host'] + '\":\n' \
' os.chdir=\"' + host['executor_cwd'] + '\"\n' \
' os.environ["CUDA_DEVICE_ORDER"]=\"PCI_BUS_ID\" \n' \
' os.environ["CUDA_VISIBLE_DEVICES"]=\"' + ",".join(str(x) for x in host['cuda_visible_devices_ordinals']) + '\"\n'
return import_script + '\n' + export_script
def localize_scripts(nb_path, clusterspec):
# 1. Download the notebook as a string
fs_handle = hopshdfs.get_fs()
fd = fs_handle.open_file(nb_path, flags='r')
note = fd.read()
fd.close()
path, filename = os.path.split(nb_path)
f_nb = open(filename,"w+")
f_nb.write(note)
f_nb.flush()
f_nb.close()
# 2. Convert notebook to py file
jupyter_runnable = os.path.abspath(os.path.join(os.environ['PYSPARK_PYTHON'], os.pardir)) + '/jupyter'
conversion_cmd = jupyter_runnable + ' nbconvert --to python ' + filename
conversion = subprocess.Popen(conversion_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
conversion.wait()
stdout, stderr = conversion.communicate()
print(stdout)
print(stderr)
# 3. Prepend script to export environment variables and Make py file runnable
py_runnable = os.getcwd() + '/' + filename.split('.')[0] + '.py'
notebook = 'with open("generate_env.py", "r") as myfile:\n' \
' data=myfile.read()\n' \
' exec(data)\n'
with open(py_runnable, 'r') as original: data = original.read()
with open(py_runnable, 'w') as modified: modified.write(notebook + data)
st = os.stat(py_runnable)
os.chmod(py_runnable, st.st_mode | stat.S_IEXEC)
# 4. Localize generate_env.py script
environment_script = generate_environment_script(clusterspec)
generate_env_path = os.getcwd() + '/generate_env.py'
f_env = open(generate_env_path, "w+")
f_env.write(environment_script)
f_env.flush()
f_env.close()
# 5. Make generate_env.py runnable
st = os.stat(generate_env_path)
os.chmod(py_runnable, st.st_mode | stat.S_IEXEC)
return py_runnable | pt | 0.15141 | 2.403674 | 2 |
api/migrations/versions/e956985ff509_.py | SnSation/Pokemart | 0 | 13630 | <gh_stars>0
"""empty message
Revision ID: e956985ff509
Revises: 4b471bbc<PASSWORD>
Create Date: 2020-12-02 22:47:08.536332
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e956985ff509'
down_revision = '4<PASSWORD>1<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('national_pokemon', 'description')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('national_pokemon', sa.Column('description', sa.VARCHAR(length=500), autoincrement=False, nullable=True))
# ### end Alembic commands ###
| """empty message
Revision ID: e956985ff509
Revises: 4b471bbc<PASSWORD>
Create Date: 2020-12-02 22:47:08.536332
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e956985ff509'
down_revision = '4<PASSWORD>1<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('national_pokemon', 'description')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('national_pokemon', sa.Column('description', sa.VARCHAR(length=500), autoincrement=False, nullable=True))
# ### end Alembic commands ### | it | 0.197964 | 1.327889 | 1 |
tests/features/steps/ahk_steps.py | epth/ahk | 1 | 13631 | from behave.matchers import RegexMatcher
from ahk import AHK
from behave_classy import step_impl_base
Base = step_impl_base()
class AHKSteps(AHK, Base):
@Base.given(u'the mouse position is ({xpos:d}, {ypos:d})')
def given_mouse_move(self, xpos, ypos):
self.mouse_move(x=xpos, y=ypos)
@Base.when(u'I move the mouse (UP|DOWN|LEFT|RIGHT) (\d+)px', matcher=RegexMatcher)
def move_direction(self, direction, px):
px = int(px)
if direction in ('UP', 'DOWN'):
axis = 'y'
else:
axis = 'x'
if direction in ('LEFT', 'UP'):
px = px * -1
kwargs = {axis: px, 'relative': True}
self.mouse_move(**kwargs)
@Base.then(u'I expect the mouse position to be ({xpos:d}, {ypos:d})')
def check_position(self, xpos, ypos):
x, y = self.mouse_position
assert x == xpos
assert y == ypos
AHKSteps().register() | from behave.matchers import RegexMatcher
from ahk import AHK
from behave_classy import step_impl_base
Base = step_impl_base()
class AHKSteps(AHK, Base):
@Base.given(u'the mouse position is ({xpos:d}, {ypos:d})')
def given_mouse_move(self, xpos, ypos):
self.mouse_move(x=xpos, y=ypos)
@Base.when(u'I move the mouse (UP|DOWN|LEFT|RIGHT) (\d+)px', matcher=RegexMatcher)
def move_direction(self, direction, px):
px = int(px)
if direction in ('UP', 'DOWN'):
axis = 'y'
else:
axis = 'x'
if direction in ('LEFT', 'UP'):
px = px * -1
kwargs = {axis: px, 'relative': True}
self.mouse_move(**kwargs)
@Base.then(u'I expect the mouse position to be ({xpos:d}, {ypos:d})')
def check_position(self, xpos, ypos):
x, y = self.mouse_position
assert x == xpos
assert y == ypos
AHKSteps().register() | none | 1 | 2.370028 | 2 |
snakewm/apps/games/pong/bat.py | sigmaister/snakeware_os | 1,621 | 13632 | <filename>snakewm/apps/games/pong/bat.py
import pygame
from pygame.locals import *
class ControlScheme:
def __init__(self):
self.up = K_UP
self.down = K_DOWN
class Bat:
def __init__(self, start_pos, control_scheme, court_size):
self.control_scheme = control_scheme
self.move_up = False
self.move_down = False
self.move_speed = 450.0
self.court_size = court_size
self.length = 30.0
self.width = 5.0
self.position = [float(start_pos[0]), float(start_pos[1])]
self.rect = pygame.Rect((start_pos[0], start_pos[1]), (self.width, self.length))
self.colour = pygame.Color("#FFFFFF")
def process_event(self, event):
if event.type == KEYDOWN:
if event.key == self.control_scheme.up:
self.move_up = True
if event.key == self.control_scheme.down:
self.move_down = True
if event.type == KEYUP:
if event.key == self.control_scheme.up:
self.move_up = False
if event.key == self.control_scheme.down:
self.move_down = False
def update(self, dt):
if self.move_up:
self.position[1] -= dt * self.move_speed
if self.position[1] < 10.0:
self.position[1] = 10.0
self.rect.y = self.position[1]
if self.move_down:
self.position[1] += dt * self.move_speed
if self.position[1] > self.court_size[1] - self.length - 10:
self.position[1] = self.court_size[1] - self.length - 10
self.rect.y = self.position[1]
def render(self, screen):
pygame.draw.rect(screen, self.colour, self.rect)
| <filename>snakewm/apps/games/pong/bat.py
import pygame
from pygame.locals import *
class ControlScheme:
def __init__(self):
self.up = K_UP
self.down = K_DOWN
class Bat:
def __init__(self, start_pos, control_scheme, court_size):
self.control_scheme = control_scheme
self.move_up = False
self.move_down = False
self.move_speed = 450.0
self.court_size = court_size
self.length = 30.0
self.width = 5.0
self.position = [float(start_pos[0]), float(start_pos[1])]
self.rect = pygame.Rect((start_pos[0], start_pos[1]), (self.width, self.length))
self.colour = pygame.Color("#FFFFFF")
def process_event(self, event):
if event.type == KEYDOWN:
if event.key == self.control_scheme.up:
self.move_up = True
if event.key == self.control_scheme.down:
self.move_down = True
if event.type == KEYUP:
if event.key == self.control_scheme.up:
self.move_up = False
if event.key == self.control_scheme.down:
self.move_down = False
def update(self, dt):
if self.move_up:
self.position[1] -= dt * self.move_speed
if self.position[1] < 10.0:
self.position[1] = 10.0
self.rect.y = self.position[1]
if self.move_down:
self.position[1] += dt * self.move_speed
if self.position[1] > self.court_size[1] - self.length - 10:
self.position[1] = self.court_size[1] - self.length - 10
self.rect.y = self.position[1]
def render(self, screen):
pygame.draw.rect(screen, self.colour, self.rect)
| none | 1 | 2.883119 | 3 |
mimic/model/rackspace_image_store.py | ksheedlo/mimic | 141 | 13633 | <filename>mimic/model/rackspace_image_store.py
"""
An image store representing Rackspace specific images
"""
from __future__ import absolute_import, division, unicode_literals
import attr
from six import iteritems
from mimic.model.rackspace_images import (RackspaceWindowsImage,
RackspaceCentOSPVImage, RackspaceCentOSPVHMImage,
RackspaceCoreOSImage, RackspaceDebianImage,
RackspaceFedoraImage, RackspaceFreeBSDImage,
RackspaceGentooImage, RackspaceOpenSUSEImage,
RackspaceRedHatPVImage, RackspaceRedHatPVHMImage,
RackspaceUbuntuPVImage, RackspaceUbuntuPVHMImage,
RackspaceVyattaImage, RackspaceScientificImage,
RackspaceOnMetalCentOSImage, RackspaceOnMetalCoreOSImage,
RackspaceOnMetalDebianImage, RackspaceOnMetalFedoraImage,
RackspaceOnMetalUbuntuImage)
from mimic.model.rackspace_images import create_rackspace_images
@attr.s
class RackspaceImageStore(object):
"""
A store for images to share between nova_api and glance_api
:var image_list: list of Rackspace images
"""
image_list = attr.ib(default=attr.Factory(list))
def create_image_store(self, tenant_id):
"""
Generates the data for each image in each image class
"""
image_classes = [RackspaceWindowsImage, RackspaceCentOSPVImage,
RackspaceCentOSPVHMImage, RackspaceCoreOSImage, RackspaceDebianImage,
RackspaceFedoraImage, RackspaceFreeBSDImage, RackspaceGentooImage,
RackspaceOpenSUSEImage, RackspaceRedHatPVImage, RackspaceRedHatPVHMImage,
RackspaceUbuntuPVImage, RackspaceUbuntuPVHMImage, RackspaceVyattaImage,
RackspaceScientificImage, RackspaceOnMetalCentOSImage,
RackspaceOnMetalCoreOSImage, RackspaceOnMetalDebianImage,
RackspaceOnMetalFedoraImage, RackspaceOnMetalUbuntuImage]
if len(self.image_list) < 1:
for image_class in image_classes:
for image, image_spec in iteritems(image_class.images):
image_name = image
image_id = image_spec['id']
minRam = image_spec['minRam']
minDisk = image_spec['minDisk']
image_size = image_spec['OS-EXT-IMG-SIZE:size']
image = image_class(image_id=image_id, tenant_id=tenant_id,
image_size=image_size, name=image_name, minRam=minRam,
minDisk=minDisk)
if 'com.rackspace__1__ui_default_show' in image_spec:
image.set_is_default()
self.image_list.append(image)
self.image_list.extend(create_rackspace_images(tenant_id))
return self.image_list
def get_image_by_id(self, image_id):
"""
Get an image by its id
"""
for image in self.image_list:
if image_id == image.image_id:
return image
def add_image_to_store(self, image):
"""
Add a new image to the list of images
"""
self.image_list.append(image)
| <filename>mimic/model/rackspace_image_store.py
"""
An image store representing Rackspace specific images
"""
from __future__ import absolute_import, division, unicode_literals
import attr
from six import iteritems
from mimic.model.rackspace_images import (RackspaceWindowsImage,
RackspaceCentOSPVImage, RackspaceCentOSPVHMImage,
RackspaceCoreOSImage, RackspaceDebianImage,
RackspaceFedoraImage, RackspaceFreeBSDImage,
RackspaceGentooImage, RackspaceOpenSUSEImage,
RackspaceRedHatPVImage, RackspaceRedHatPVHMImage,
RackspaceUbuntuPVImage, RackspaceUbuntuPVHMImage,
RackspaceVyattaImage, RackspaceScientificImage,
RackspaceOnMetalCentOSImage, RackspaceOnMetalCoreOSImage,
RackspaceOnMetalDebianImage, RackspaceOnMetalFedoraImage,
RackspaceOnMetalUbuntuImage)
from mimic.model.rackspace_images import create_rackspace_images
@attr.s
class RackspaceImageStore(object):
"""
A store for images to share between nova_api and glance_api
:var image_list: list of Rackspace images
"""
image_list = attr.ib(default=attr.Factory(list))
def create_image_store(self, tenant_id):
"""
Generates the data for each image in each image class
"""
image_classes = [RackspaceWindowsImage, RackspaceCentOSPVImage,
RackspaceCentOSPVHMImage, RackspaceCoreOSImage, RackspaceDebianImage,
RackspaceFedoraImage, RackspaceFreeBSDImage, RackspaceGentooImage,
RackspaceOpenSUSEImage, RackspaceRedHatPVImage, RackspaceRedHatPVHMImage,
RackspaceUbuntuPVImage, RackspaceUbuntuPVHMImage, RackspaceVyattaImage,
RackspaceScientificImage, RackspaceOnMetalCentOSImage,
RackspaceOnMetalCoreOSImage, RackspaceOnMetalDebianImage,
RackspaceOnMetalFedoraImage, RackspaceOnMetalUbuntuImage]
if len(self.image_list) < 1:
for image_class in image_classes:
for image, image_spec in iteritems(image_class.images):
image_name = image
image_id = image_spec['id']
minRam = image_spec['minRam']
minDisk = image_spec['minDisk']
image_size = image_spec['OS-EXT-IMG-SIZE:size']
image = image_class(image_id=image_id, tenant_id=tenant_id,
image_size=image_size, name=image_name, minRam=minRam,
minDisk=minDisk)
if 'com.rackspace__1__ui_default_show' in image_spec:
image.set_is_default()
self.image_list.append(image)
self.image_list.extend(create_rackspace_images(tenant_id))
return self.image_list
def get_image_by_id(self, image_id):
"""
Get an image by its id
"""
for image in self.image_list:
if image_id == image.image_id:
return image
def add_image_to_store(self, image):
"""
Add a new image to the list of images
"""
self.image_list.append(image)
| pt | 0.223859 | 2.283602 | 2 |
samples/noxfile_config.py | ikuleshov/python-analytics-admin | 0 | 13634 | TEST_CONFIG_OVERRIDE = {
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
"gcloud_project_env": "BUILD_SPECIFIC_GCLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
"envs": {
"GA_TEST_PROPERTY_ID": "276206997",
"GA_TEST_ACCOUNT_ID": "199820965",
"GA_TEST_USER_LINK_ID": "103401743041912607932",
"GA_TEST_PROPERTY_USER_LINK_ID": "105231969274497648555",
"GA_TEST_ANDROID_APP_DATA_STREAM_ID": "2828100949",
"GA_TEST_IOS_APP_DATA_STREAM_ID": "2828089289",
"GA_TEST_WEB_DATA_STREAM_ID": "2828068992",
},
}
| TEST_CONFIG_OVERRIDE = {
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
"gcloud_project_env": "BUILD_SPECIFIC_GCLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
"envs": {
"GA_TEST_PROPERTY_ID": "276206997",
"GA_TEST_ACCOUNT_ID": "199820965",
"GA_TEST_USER_LINK_ID": "103401743041912607932",
"GA_TEST_PROPERTY_USER_LINK_ID": "105231969274497648555",
"GA_TEST_ANDROID_APP_DATA_STREAM_ID": "2828100949",
"GA_TEST_IOS_APP_DATA_STREAM_ID": "2828089289",
"GA_TEST_WEB_DATA_STREAM_ID": "2828068992",
},
}
| pt | 0.164428 | 1.458578 | 1 |
exercises/fr/test_01_09.py | tuanducdesign/spacy-course | 0 | 13635 | <gh_stars>0
def test():
assert "for ent in doc.ents" in __solution__, "Itères-tu sur les entités ?"
assert x_pro.text == "X Pro", "Es-tu certain que x_pro contient les bons tokens ?"
__msg__.good(
"Parfait ! Bien sur, tu n'as pas besoin de faire cela manuellement à chaque fois."
"Dans le prochain exercice, tu vas découvrir le matcher à base de règles de spaCy, "
"qui peut t'aider à trouver des mots et des phrases spécifiques dans un texte."
)
| def test():
assert "for ent in doc.ents" in __solution__, "Itères-tu sur les entités ?"
assert x_pro.text == "X Pro", "Es-tu certain que x_pro contient les bons tokens ?"
__msg__.good(
"Parfait ! Bien sur, tu n'as pas besoin de faire cela manuellement à chaque fois."
"Dans le prochain exercice, tu vas découvrir le matcher à base de règles de spaCy, "
"qui peut t'aider à trouver des mots et des phrases spécifiques dans un texte."
) | none | 1 | 2.438958 | 2 |
settings/channel_archiver/NIH.pressure_downstream_settings.py | bopopescu/Lauecollect | 0 | 13636 | filename = '//mx340hs/data/anfinrud_1903/Archive/NIH.pressure_downstream.txt' | filename = '//mx340hs/data/anfinrud_1903/Archive/NIH.pressure_downstream.txt' | none | 1 | 0.969736 | 1 |
sfepy/terms/terms_navier_stokes.py | vondrejc/sfepy | 0 | 13637 | import numpy as nm
from sfepy.linalg import dot_sequences
from sfepy.terms.terms import Term, terms
class DivGradTerm(Term):
r"""
Diffusion term.
:Definition:
.. math::
\int_{\Omega} \nu\ \nabla \ul{v} : \nabla \ul{u} \mbox{ , }
\int_{\Omega} \nu\ \nabla \ul{u} : \nabla \ul{w} \\
\int_{\Omega} \nabla \ul{v} : \nabla \ul{u} \mbox{ , }
\int_{\Omega} \nabla \ul{u} : \nabla \ul{w}
:Arguments 1:
- material : :math:`\nu` (viscosity, optional)
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
:Arguments 2:
- material : :math:`\nu` (viscosity, optional)
- parameter_1 : :math:`\ul{u}`
- parameter_2 : :math:`\ul{w}`
"""
name = 'dw_div_grad'
arg_types = (('opt_material', 'virtual', 'state'),
('opt_material', 'parameter_1', 'parameter_2'))
arg_shapes = {'opt_material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D', 'parameter_1' : 'D', 'parameter_2' : 'D'}
modes = ('weak', 'eval')
function = staticmethod(terms.term_ns_asm_div_grad)
def d_div_grad(self, out, grad1, grad2, mat, vg, fmode):
sh = grad1.shape
g1 = grad1.reshape((sh[0], sh[1], sh[2] * sh[3]))
g2 = grad2.reshape((sh[0], sh[1], sh[2] * sh[3]))
aux = mat * dot_sequences(g1[..., None], g2, 'ATB')[..., None]
if fmode == 2:
out[:] = aux
status = 0
else:
status = vg.integrate(out, aux, fmode)
return status
def get_fargs(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
if mat is None:
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
mat = nm.ones((1, n_qp, 1, 1), dtype=nm.float64)
if mode == 'weak':
if diff_var is None:
grad = self.get(state, 'grad').transpose((0, 1, 3, 2))
sh = grad.shape
grad = grad.reshape((sh[0], sh[1], sh[2] * sh[3], 1))
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return grad, mat, vg, fmode
elif mode == 'eval':
grad1 = self.get(virtual, 'grad')
grad2 = self.get(state, 'grad')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return grad1, grad2, mat, vg, fmode
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
return (n_el, 1, 1, 1), state.dtype
def set_arg_types(self):
if self.mode == 'weak':
self.function = terms.term_ns_asm_div_grad
else:
self.function = self.d_div_grad
class ConvectTerm(Term):
r"""
Nonlinear convective term.
:Definition:
.. math::
\int_{\Omega} ((\ul{u} \cdot \nabla) \ul{u}) \cdot \ul{v}
:Arguments:
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_convect'
arg_types = ('virtual', 'state')
arg_shapes = {'virtual' : ('D', 'state'), 'state' : 'D'}
function = staticmethod(terms.term_ns_asm_convect)
def get_fargs(self, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()
val_qp = self.get(state, 'val')
fmode = diff_var is not None
return grad, val_qp, vg, fmode
class LinearConvectTerm(Term):
r"""
Linearized convective term.
:Definition:
.. math::
\int_{\Omega} ((\ul{b} \cdot \nabla) \ul{u}) \cdot \ul{v}
.. math::
((\ul{b} \cdot \nabla) \ul{u})|_{qp}
:Arguments:
- virtual : :math:`\ul{v}`
- parameter : :math:`\ul{b}`
- state : :math:`\ul{u}`
"""
name = 'dw_lin_convect'
arg_types = ('virtual', 'parameter', 'state')
arg_shapes = {'virtual' : ('D', 'state'), 'parameter' : 'D', 'state' : 'D'}
function = staticmethod(terms.dw_lin_convect)
def get_fargs(self, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
val_qp = self.get(parameter, 'val')
if mode == 'weak':
if diff_var is None:
grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return grad, val_qp, vg, fmode
elif mode == 'qp':
grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()
fmode = 2
return grad, val_qp, vg, fmode
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
class StokesTerm(Term):
r"""
Stokes problem coupling term. Corresponds to weak forms of gradient and
divergence terms. Can be evaluated.
:Definition:
.. math::
\int_{\Omega} p\ \nabla \cdot \ul{v} \mbox{ , }
\int_{\Omega} q\ \nabla \cdot \ul{u}
\mbox{ or }
\int_{\Omega} c\ p\ \nabla \cdot \ul{v} \mbox{ , }
\int_{\Omega} c\ q\ \nabla \cdot \ul{u}
:Arguments 1:
- material : :math:`c` (optional)
- virtual : :math:`\ul{v}`
- state : :math:`p`
:Arguments 2:
- material : :math:`c` (optional)
- state : :math:`\ul{u}`
- virtual : :math:`q`
:Arguments 3:
- material : :math:`c` (optional)
- parameter_v : :math:`\ul{u}`
- parameter_s : :math:`p`
"""
name = 'dw_stokes'
arg_types = (('opt_material', 'virtual', 'state'),
('opt_material', 'state', 'virtual'),
('opt_material', 'parameter_v', 'parameter_s'))
arg_shapes = [{'opt_material' : '1, 1',
'virtual/grad' : ('D', None), 'state/grad' : 1,
'virtual/div' : (1, None), 'state/div' : 'D',
'parameter_v' : 'D', 'parameter_s' : 1},
{'opt_material' : None}]
modes = ('grad', 'div', 'eval')
@staticmethod
def d_eval(out, coef, vec_qp, div, vvg):
out_qp = coef * vec_qp * div
status = vvg.integrate(out, out_qp)
return status
def get_fargs(self, coef, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
if self.mode == 'grad':
qp_var, qp_name = svar, 'val'
else:
qp_var, qp_name = vvar, 'div'
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)
if coef is None:
coef = nm.ones((1, n_qp, 1, 1), dtype=nm.float64)
if mode == 'weak':
vvg, _ = self.get_mapping(vvar)
svg, _ = self.get_mapping(svar)
if diff_var is None:
val_qp = self.get(qp_var, qp_name)
fmode = 0
else:
val_qp = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return coef, val_qp, svg, vvg, fmode
elif mode == 'eval':
vvg, _ = self.get_mapping(vvar)
div = self.get(vvar, 'div')
vec_qp = self.get(svar, 'val')
return coef, vec_qp, div, vvg
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, coef, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)
return (n_el, 1, 1, 1), vvar.dtype
def set_arg_types(self):
self.function = {
'grad' : terms.dw_grad,
'div' : terms.dw_div,
'eval' : self.d_eval,
}[self.mode]
class GradTerm(Term):
r"""
Evaluate gradient of a scalar or vector field.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Omega} \nabla p \mbox{ or } \int_{\Omega} \nabla \ul{w}
.. math::
\mbox{vector for } K \from \Ical_h: \int_{T_K} \nabla p /
\int_{T_K} 1 \mbox{ or } \int_{T_K} \nabla \ul{w} /
\int_{T_K} 1
.. math::
(\nabla p)|_{qp} \mbox{ or } \nabla \ul{w}|_{qp}
:Arguments:
- parameter : :math:`p` or :math:`\ul{w}`
"""
name = 'ev_grad'
arg_types = ('parameter',)
arg_shapes = [{'parameter' : 1}, {'parameter' : 'D'}]
@staticmethod
def function(out, grad, vg, fmode):
if fmode == 2:
out[:] = grad
status = 0
else:
status = vg.integrate(out, grad, fmode)
return status
def get_fargs(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
grad = self.get(parameter, 'grad')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return grad, vg, fmode
def get_eval_shape(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, dim, n_c), parameter.dtype
class DivTerm(Term):
r"""
Evaluate divergence of a vector field.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Omega} \nabla \cdot \ul{u}
.. math::
\mbox{vector for } K \from \Ical_h:
\int_{T_K} \nabla \cdot \ul{u} / \int_{T_K} 1
.. math::
(\nabla \cdot \ul{u})|_{qp}
:Arguments:
- parameter : :math:`\ul{u}`
"""
name = 'ev_div'
arg_types = ('parameter',)
arg_shapes = {'parameter' : 'D'}
@staticmethod
def function(out, div, vg, fmode):
if fmode == 2:
out[:] = div
status = 0
else:
status = vg.integrate(out, div, fmode)
return status
def get_fargs(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
div = self.get(parameter, 'div')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return div, vg, fmode
def get_eval_shape(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, 1, 1), parameter.dtype
class DivOperatorTerm(Term):
r"""
Weighted divergence term of a test function.
:Definition:
.. math::
\int_{\Omega} \nabla \cdot \ul{v} \mbox { or } \int_{\Omega} c \nabla
\cdot \ul{v}
:Arguments:
- material : :math:`c` (optional)
- virtual : :math:`\ul{v}`
"""
name = 'dw_div'
arg_types = ('opt_material', 'virtual')
arg_shapes = [{'opt_material' : '1, 1', 'virtual' : ('D', None)},
{'opt_material' : None}]
@staticmethod
def function(out, mat, vg):
div_bf = vg.bfg
n_el, n_qp, dim, n_ep = div_bf.shape
div_bf = div_bf.reshape((n_el, n_qp, dim * n_ep, 1))
div_bf = nm.ascontiguousarray(div_bf)
if mat is not None:
status = vg.integrate(out, mat * div_bf)
else:
status = vg.integrate(out, div_bf)
return status
def get_fargs(self, mat, virtual,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(virtual)
return mat, vg
class GradDivStabilizationTerm(Term):
r"""
Grad-div stabilization term ( :math:`\gamma` is a global stabilization
parameter).
:Definition:
.. math::
\gamma \int_{\Omega} (\nabla\cdot\ul{u}) \cdot (\nabla\cdot\ul{v})
:Arguments:
- material : :math:`\gamma`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_st_grad_div'
arg_types = ('material', 'virtual', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D'}
function = staticmethod(terms.dw_st_grad_div)
def get_fargs(self, gamma, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
if diff_var is None:
div = self.get(state, 'div')
fmode = 0
else:
div = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return div, gamma, vg, fmode
from sfepy.terms.terms_diffusion import LaplaceTerm
class PSPGPStabilizationTerm(LaplaceTerm):
r"""
PSPG stabilization term, pressure part ( :math:`\tau` is a local
stabilization parameter), alias to Laplace term dw_laplace.
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \tau_K\ \nabla p \cdot \nabla q
:Arguments:
- material : :math:`\tau_K`
- virtual : :math:`q`
- state : :math:`p`
"""
name = 'dw_st_pspg_p'
class PSPGCStabilizationTerm(Term):
r"""
PSPG stabilization term, convective part ( :math:`\tau` is a local
stabilization parameter).
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \tau_K\ ((\ul{b} \cdot \nabla) \ul{u})
\cdot \nabla q
:Arguments:
- material : :math:`\tau_K`
- virtual : :math:`q`
- parameter : :math:`\ul{b}`
- state : :math:`\ul{u}`
"""
name = 'dw_st_pspg_c'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : (1, None),
'parameter' : 'D', 'state' : 'D'}
function = staticmethod(terms.dw_st_pspg_c)
def get_fargs(self, tau, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
sap, svg = self.get_approximation(virtual)
vap, vvg = self.get_approximation(state)
val_qp = self.get(parameter, 'val')
conn = vap.get_connectivity(self.region, self.integration)
if diff_var is None:
fmode = 0
else:
fmode = 1
return val_qp, state(), tau, svg, vvg, conn, fmode
class SUPGPStabilizationTerm(Term):
r"""
SUPG stabilization term, pressure part ( :math:`\delta` is a local
stabilization parameter).
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \delta_K\ \nabla p\cdot ((\ul{b} \cdot
\nabla) \ul{v})
:Arguments:
- material : :math:`\delta_K`
- virtual : :math:`\ul{v}`
- parameter : :math:`\ul{b}`
- state : :math:`p`
"""
name = 'dw_st_supg_p'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', None),
'parameter' : 'D', 'state' : 1}
function = staticmethod(terms.dw_st_supg_p)
def get_fargs(self, delta, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vvg, _ = self.get_mapping(virtual)
svg, _ = self.get_mapping(state)
val_qp = self.get(parameter, 'val')
if diff_var is None:
grad = self.get(state, 'grad')
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return val_qp, grad, delta, vvg, svg, fmode
class SUPGCStabilizationTerm(Term):
r"""
SUPG stabilization term, convective part ( :math:`\delta` is a local
stabilization parameter).
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \delta_K\ ((\ul{b} \cdot \nabla)
\ul{u})\cdot ((\ul{b} \cdot \nabla) \ul{v})
:Arguments:
- material : :math:`\delta_K`
- virtual : :math:`\ul{v}`
- parameter : :math:`\ul{b}`
- state : :math:`\ul{u}`
"""
name = 'dw_st_supg_c'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'parameter' : 'D', 'state' : 'D'}
function = staticmethod(terms.dw_st_supg_c)
def get_fargs(self, delta, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
ap, vg = self.get_approximation(virtual)
val_qp = self.get(parameter, 'val')
conn = ap.get_connectivity(self.region, self.integration)
if diff_var is None:
fmode = 0
else:
fmode = 1
return val_qp, state(), delta, vg, conn, fmode
| import numpy as nm
from sfepy.linalg import dot_sequences
from sfepy.terms.terms import Term, terms
class DivGradTerm(Term):
r"""
Diffusion term.
:Definition:
.. math::
\int_{\Omega} \nu\ \nabla \ul{v} : \nabla \ul{u} \mbox{ , }
\int_{\Omega} \nu\ \nabla \ul{u} : \nabla \ul{w} \\
\int_{\Omega} \nabla \ul{v} : \nabla \ul{u} \mbox{ , }
\int_{\Omega} \nabla \ul{u} : \nabla \ul{w}
:Arguments 1:
- material : :math:`\nu` (viscosity, optional)
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
:Arguments 2:
- material : :math:`\nu` (viscosity, optional)
- parameter_1 : :math:`\ul{u}`
- parameter_2 : :math:`\ul{w}`
"""
name = 'dw_div_grad'
arg_types = (('opt_material', 'virtual', 'state'),
('opt_material', 'parameter_1', 'parameter_2'))
arg_shapes = {'opt_material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D', 'parameter_1' : 'D', 'parameter_2' : 'D'}
modes = ('weak', 'eval')
function = staticmethod(terms.term_ns_asm_div_grad)
def d_div_grad(self, out, grad1, grad2, mat, vg, fmode):
sh = grad1.shape
g1 = grad1.reshape((sh[0], sh[1], sh[2] * sh[3]))
g2 = grad2.reshape((sh[0], sh[1], sh[2] * sh[3]))
aux = mat * dot_sequences(g1[..., None], g2, 'ATB')[..., None]
if fmode == 2:
out[:] = aux
status = 0
else:
status = vg.integrate(out, aux, fmode)
return status
def get_fargs(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
if mat is None:
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
mat = nm.ones((1, n_qp, 1, 1), dtype=nm.float64)
if mode == 'weak':
if diff_var is None:
grad = self.get(state, 'grad').transpose((0, 1, 3, 2))
sh = grad.shape
grad = grad.reshape((sh[0], sh[1], sh[2] * sh[3], 1))
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return grad, mat, vg, fmode
elif mode == 'eval':
grad1 = self.get(virtual, 'grad')
grad2 = self.get(state, 'grad')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return grad1, grad2, mat, vg, fmode
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
return (n_el, 1, 1, 1), state.dtype
def set_arg_types(self):
if self.mode == 'weak':
self.function = terms.term_ns_asm_div_grad
else:
self.function = self.d_div_grad
class ConvectTerm(Term):
r"""
Nonlinear convective term.
:Definition:
.. math::
\int_{\Omega} ((\ul{u} \cdot \nabla) \ul{u}) \cdot \ul{v}
:Arguments:
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_convect'
arg_types = ('virtual', 'state')
arg_shapes = {'virtual' : ('D', 'state'), 'state' : 'D'}
function = staticmethod(terms.term_ns_asm_convect)
def get_fargs(self, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()
val_qp = self.get(state, 'val')
fmode = diff_var is not None
return grad, val_qp, vg, fmode
class LinearConvectTerm(Term):
r"""
Linearized convective term.
:Definition:
.. math::
\int_{\Omega} ((\ul{b} \cdot \nabla) \ul{u}) \cdot \ul{v}
.. math::
((\ul{b} \cdot \nabla) \ul{u})|_{qp}
:Arguments:
- virtual : :math:`\ul{v}`
- parameter : :math:`\ul{b}`
- state : :math:`\ul{u}`
"""
name = 'dw_lin_convect'
arg_types = ('virtual', 'parameter', 'state')
arg_shapes = {'virtual' : ('D', 'state'), 'parameter' : 'D', 'state' : 'D'}
function = staticmethod(terms.dw_lin_convect)
def get_fargs(self, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
val_qp = self.get(parameter, 'val')
if mode == 'weak':
if diff_var is None:
grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return grad, val_qp, vg, fmode
elif mode == 'qp':
grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()
fmode = 2
return grad, val_qp, vg, fmode
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
class StokesTerm(Term):
r"""
Stokes problem coupling term. Corresponds to weak forms of gradient and
divergence terms. Can be evaluated.
:Definition:
.. math::
\int_{\Omega} p\ \nabla \cdot \ul{v} \mbox{ , }
\int_{\Omega} q\ \nabla \cdot \ul{u}
\mbox{ or }
\int_{\Omega} c\ p\ \nabla \cdot \ul{v} \mbox{ , }
\int_{\Omega} c\ q\ \nabla \cdot \ul{u}
:Arguments 1:
- material : :math:`c` (optional)
- virtual : :math:`\ul{v}`
- state : :math:`p`
:Arguments 2:
- material : :math:`c` (optional)
- state : :math:`\ul{u}`
- virtual : :math:`q`
:Arguments 3:
- material : :math:`c` (optional)
- parameter_v : :math:`\ul{u}`
- parameter_s : :math:`p`
"""
name = 'dw_stokes'
arg_types = (('opt_material', 'virtual', 'state'),
('opt_material', 'state', 'virtual'),
('opt_material', 'parameter_v', 'parameter_s'))
arg_shapes = [{'opt_material' : '1, 1',
'virtual/grad' : ('D', None), 'state/grad' : 1,
'virtual/div' : (1, None), 'state/div' : 'D',
'parameter_v' : 'D', 'parameter_s' : 1},
{'opt_material' : None}]
modes = ('grad', 'div', 'eval')
@staticmethod
def d_eval(out, coef, vec_qp, div, vvg):
out_qp = coef * vec_qp * div
status = vvg.integrate(out, out_qp)
return status
def get_fargs(self, coef, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
if self.mode == 'grad':
qp_var, qp_name = svar, 'val'
else:
qp_var, qp_name = vvar, 'div'
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)
if coef is None:
coef = nm.ones((1, n_qp, 1, 1), dtype=nm.float64)
if mode == 'weak':
vvg, _ = self.get_mapping(vvar)
svg, _ = self.get_mapping(svar)
if diff_var is None:
val_qp = self.get(qp_var, qp_name)
fmode = 0
else:
val_qp = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return coef, val_qp, svg, vvg, fmode
elif mode == 'eval':
vvg, _ = self.get_mapping(vvar)
div = self.get(vvar, 'div')
vec_qp = self.get(svar, 'val')
return coef, vec_qp, div, vvg
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, coef, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)
return (n_el, 1, 1, 1), vvar.dtype
def set_arg_types(self):
self.function = {
'grad' : terms.dw_grad,
'div' : terms.dw_div,
'eval' : self.d_eval,
}[self.mode]
class GradTerm(Term):
r"""
Evaluate gradient of a scalar or vector field.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Omega} \nabla p \mbox{ or } \int_{\Omega} \nabla \ul{w}
.. math::
\mbox{vector for } K \from \Ical_h: \int_{T_K} \nabla p /
\int_{T_K} 1 \mbox{ or } \int_{T_K} \nabla \ul{w} /
\int_{T_K} 1
.. math::
(\nabla p)|_{qp} \mbox{ or } \nabla \ul{w}|_{qp}
:Arguments:
- parameter : :math:`p` or :math:`\ul{w}`
"""
name = 'ev_grad'
arg_types = ('parameter',)
arg_shapes = [{'parameter' : 1}, {'parameter' : 'D'}]
@staticmethod
def function(out, grad, vg, fmode):
if fmode == 2:
out[:] = grad
status = 0
else:
status = vg.integrate(out, grad, fmode)
return status
def get_fargs(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
grad = self.get(parameter, 'grad')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return grad, vg, fmode
def get_eval_shape(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, dim, n_c), parameter.dtype
class DivTerm(Term):
r"""
Evaluate divergence of a vector field.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Omega} \nabla \cdot \ul{u}
.. math::
\mbox{vector for } K \from \Ical_h:
\int_{T_K} \nabla \cdot \ul{u} / \int_{T_K} 1
.. math::
(\nabla \cdot \ul{u})|_{qp}
:Arguments:
- parameter : :math:`\ul{u}`
"""
name = 'ev_div'
arg_types = ('parameter',)
arg_shapes = {'parameter' : 'D'}
@staticmethod
def function(out, div, vg, fmode):
if fmode == 2:
out[:] = div
status = 0
else:
status = vg.integrate(out, div, fmode)
return status
def get_fargs(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
div = self.get(parameter, 'div')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return div, vg, fmode
def get_eval_shape(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, 1, 1), parameter.dtype
class DivOperatorTerm(Term):
r"""
Weighted divergence term of a test function.
:Definition:
.. math::
\int_{\Omega} \nabla \cdot \ul{v} \mbox { or } \int_{\Omega} c \nabla
\cdot \ul{v}
:Arguments:
- material : :math:`c` (optional)
- virtual : :math:`\ul{v}`
"""
name = 'dw_div'
arg_types = ('opt_material', 'virtual')
arg_shapes = [{'opt_material' : '1, 1', 'virtual' : ('D', None)},
{'opt_material' : None}]
@staticmethod
def function(out, mat, vg):
div_bf = vg.bfg
n_el, n_qp, dim, n_ep = div_bf.shape
div_bf = div_bf.reshape((n_el, n_qp, dim * n_ep, 1))
div_bf = nm.ascontiguousarray(div_bf)
if mat is not None:
status = vg.integrate(out, mat * div_bf)
else:
status = vg.integrate(out, div_bf)
return status
def get_fargs(self, mat, virtual,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(virtual)
return mat, vg
class GradDivStabilizationTerm(Term):
r"""
Grad-div stabilization term ( :math:`\gamma` is a global stabilization
parameter).
:Definition:
.. math::
\gamma \int_{\Omega} (\nabla\cdot\ul{u}) \cdot (\nabla\cdot\ul{v})
:Arguments:
- material : :math:`\gamma`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_st_grad_div'
arg_types = ('material', 'virtual', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D'}
function = staticmethod(terms.dw_st_grad_div)
def get_fargs(self, gamma, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
if diff_var is None:
div = self.get(state, 'div')
fmode = 0
else:
div = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return div, gamma, vg, fmode
from sfepy.terms.terms_diffusion import LaplaceTerm
class PSPGPStabilizationTerm(LaplaceTerm):
r"""
PSPG stabilization term, pressure part ( :math:`\tau` is a local
stabilization parameter), alias to Laplace term dw_laplace.
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \tau_K\ \nabla p \cdot \nabla q
:Arguments:
- material : :math:`\tau_K`
- virtual : :math:`q`
- state : :math:`p`
"""
name = 'dw_st_pspg_p'
class PSPGCStabilizationTerm(Term):
r"""
PSPG stabilization term, convective part ( :math:`\tau` is a local
stabilization parameter).
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \tau_K\ ((\ul{b} \cdot \nabla) \ul{u})
\cdot \nabla q
:Arguments:
- material : :math:`\tau_K`
- virtual : :math:`q`
- parameter : :math:`\ul{b}`
- state : :math:`\ul{u}`
"""
name = 'dw_st_pspg_c'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : (1, None),
'parameter' : 'D', 'state' : 'D'}
function = staticmethod(terms.dw_st_pspg_c)
def get_fargs(self, tau, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
sap, svg = self.get_approximation(virtual)
vap, vvg = self.get_approximation(state)
val_qp = self.get(parameter, 'val')
conn = vap.get_connectivity(self.region, self.integration)
if diff_var is None:
fmode = 0
else:
fmode = 1
return val_qp, state(), tau, svg, vvg, conn, fmode
class SUPGPStabilizationTerm(Term):
r"""
SUPG stabilization term, pressure part ( :math:`\delta` is a local
stabilization parameter).
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \delta_K\ \nabla p\cdot ((\ul{b} \cdot
\nabla) \ul{v})
:Arguments:
- material : :math:`\delta_K`
- virtual : :math:`\ul{v}`
- parameter : :math:`\ul{b}`
- state : :math:`p`
"""
name = 'dw_st_supg_p'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', None),
'parameter' : 'D', 'state' : 1}
function = staticmethod(terms.dw_st_supg_p)
def get_fargs(self, delta, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vvg, _ = self.get_mapping(virtual)
svg, _ = self.get_mapping(state)
val_qp = self.get(parameter, 'val')
if diff_var is None:
grad = self.get(state, 'grad')
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return val_qp, grad, delta, vvg, svg, fmode
class SUPGCStabilizationTerm(Term):
r"""
SUPG stabilization term, convective part ( :math:`\delta` is a local
stabilization parameter).
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \delta_K\ ((\ul{b} \cdot \nabla)
\ul{u})\cdot ((\ul{b} \cdot \nabla) \ul{v})
:Arguments:
- material : :math:`\delta_K`
- virtual : :math:`\ul{v}`
- parameter : :math:`\ul{b}`
- state : :math:`\ul{u}`
"""
name = 'dw_st_supg_c'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'parameter' : 'D', 'state' : 'D'}
function = staticmethod(terms.dw_st_supg_c)
def get_fargs(self, delta, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
ap, vg = self.get_approximation(virtual)
val_qp = self.get(parameter, 'val')
conn = ap.get_connectivity(self.region, self.integration)
if diff_var is None:
fmode = 0
else:
fmode = 1
return val_qp, state(), delta, vg, conn, fmode
| fr | 0.218429 | 2.491091 | 2 |
saleor/unurshop/crawler/migrations/0013_auto_20210921_0452.py | nlkhagva/saleor | 0 | 13638 | # Generated by Django 3.1.1 on 2021-09-21 04:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crawler', '0012_auto_20210921_0451'),
]
operations = [
migrations.AlterField(
model_name='crawlerline',
name='ustatus',
field=models.PositiveIntegerField(blank=True, default=1, null=True),
),
]
| # Generated by Django 3.1.1 on 2021-09-21 04:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crawler', '0012_auto_20210921_0451'),
]
operations = [
migrations.AlterField(
model_name='crawlerline',
name='ustatus',
field=models.PositiveIntegerField(blank=True, default=1, null=True),
),
]
| es | 0.126498 | 1.39169 | 1 |
src/cli/examples/oss-fuzz-target.py | gdhuper/onefuzz | 1 | 13639 | #!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
import os
import sys
import tempfile
from subprocess import PIPE, CalledProcessError, check_call # nosec
from typing import List, Optional
from onefuzztypes.models import NotificationConfig
from onefuzztypes.primitives import PoolName
from onefuzz.api import Command, Onefuzz
from onefuzz.cli import execute_api
SANITIZERS = ["address", "dataflow", "memory", "undefined"]
class Ossfuzz(Command):
def build(self, project: str, sanitizer: str) -> None:
""" Build the latest oss-fuzz target """
self.logger.info("building %s:%s", project, sanitizer)
cmd = [
"docker",
"run",
"--rm",
"-ti",
"-e",
"SANITIZER=%s" % sanitizer,
"--mount",
"src=%s,target=/out,type=bind" % os.getcwd(),
"gcr.io/oss-fuzz/%s" % project,
"compile",
]
check_call(cmd, stderr=PIPE, stdout=PIPE)
def fuzz(
self,
project: str,
build: str,
pool: PoolName,
sanitizers: Optional[List[str]] = None,
notification_config: Optional[NotificationConfig] = None,
) -> None:
""" Build & Launch all of the libFuzzer targets for a given project """
if sanitizers is None:
sanitizers = SANITIZERS
for sanitizer in sanitizers:
with tempfile.TemporaryDirectory() as tmpdir:
os.chdir(tmpdir)
try:
self.build(project, sanitizer)
except CalledProcessError:
self.logger.warning("building %s:%s failed", project, sanitizer)
continue
self.logger.info("launching %s:%s build:%s", project, sanitizer, build)
self.onefuzz.template.ossfuzz.libfuzzer(
project,
"%s:%s" % (sanitizer, build),
pool,
max_target_count=0,
sync_inputs=True,
notification_config=notification_config,
)
def stop(self, project: str) -> None:
for job in self.onefuzz.jobs.list():
if job.config.project != project:
continue
if job.config.build != "base":
continue
self.logger.info("stopping %s: %s", job.job_id, job.state)
self.onefuzz.jobs.delete(job.job_id)
def main() -> int:
return execute_api(
Ossfuzz(Onefuzz(), logging.getLogger("ossfuzz")), [Command], "0.0.1"
)
if __name__ == "__main__":
sys.exit(main())
| #!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
import os
import sys
import tempfile
from subprocess import PIPE, CalledProcessError, check_call # nosec
from typing import List, Optional
from onefuzztypes.models import NotificationConfig
from onefuzztypes.primitives import PoolName
from onefuzz.api import Command, Onefuzz
from onefuzz.cli import execute_api
SANITIZERS = ["address", "dataflow", "memory", "undefined"]
class Ossfuzz(Command):
def build(self, project: str, sanitizer: str) -> None:
""" Build the latest oss-fuzz target """
self.logger.info("building %s:%s", project, sanitizer)
cmd = [
"docker",
"run",
"--rm",
"-ti",
"-e",
"SANITIZER=%s" % sanitizer,
"--mount",
"src=%s,target=/out,type=bind" % os.getcwd(),
"gcr.io/oss-fuzz/%s" % project,
"compile",
]
check_call(cmd, stderr=PIPE, stdout=PIPE)
def fuzz(
self,
project: str,
build: str,
pool: PoolName,
sanitizers: Optional[List[str]] = None,
notification_config: Optional[NotificationConfig] = None,
) -> None:
""" Build & Launch all of the libFuzzer targets for a given project """
if sanitizers is None:
sanitizers = SANITIZERS
for sanitizer in sanitizers:
with tempfile.TemporaryDirectory() as tmpdir:
os.chdir(tmpdir)
try:
self.build(project, sanitizer)
except CalledProcessError:
self.logger.warning("building %s:%s failed", project, sanitizer)
continue
self.logger.info("launching %s:%s build:%s", project, sanitizer, build)
self.onefuzz.template.ossfuzz.libfuzzer(
project,
"%s:%s" % (sanitizer, build),
pool,
max_target_count=0,
sync_inputs=True,
notification_config=notification_config,
)
def stop(self, project: str) -> None:
for job in self.onefuzz.jobs.list():
if job.config.project != project:
continue
if job.config.build != "base":
continue
self.logger.info("stopping %s: %s", job.job_id, job.state)
self.onefuzz.jobs.delete(job.job_id)
def main() -> int:
return execute_api(
Ossfuzz(Onefuzz(), logging.getLogger("ossfuzz")), [Command], "0.0.1"
)
if __name__ == "__main__":
sys.exit(main())
| pt | 0.117354 | 2.24939 | 2 |
stubs/workspaces.py | claytonbrown/troposphere | 0 | 13640 | <reponame>claytonbrown/troposphere
from . import AWSObject, AWSProperty
from .validators import *
from .constants import *
# -------------------------------------------
class WorkSpacesWorkspace(AWSObject):
"""# AWS::WorkSpaces::Workspace - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html",
"Properties": {
"BundleId": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-bundleid",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Conditional"
},
"DirectoryId": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-directoryid",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Conditional"
},
"RootVolumeEncryptionEnabled": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-rootvolumeencryptionenabled",
"PrimitiveType": "Boolean",
"Required": false,
"UpdateType": "Conditional"
},
"UserName": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-username",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Immutable"
},
"UserVolumeEncryptionEnabled": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-uservolumeencryptionenabled",
"PrimitiveType": "Boolean",
"Required": false,
"UpdateType": "Conditional"
},
"VolumeEncryptionKey": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-volumeencryptionkey",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Conditional"
}
}
}
"""
resource_type = "AWS::WorkSpaces::Workspace"
props = {
'BundleId': (basestring, True, 'Conditional', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-bundleid'),
'DirectoryId': (basestring, True, 'Conditional', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-directoryid'),
'RootVolumeEncryptionEnabled': (boolean, False, 'Conditional', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-rootvolumeencryptionenabled'),
'UserName': (basestring, True, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-username'),
'UserVolumeEncryptionEnabled': (boolean, False, 'Conditional', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-uservolumeencryptionenabled'),
'VolumeEncryptionKey': (basestring, False, 'Conditional', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-volumeencryptionkey')
}
| from . import AWSObject, AWSProperty
from .validators import *
from .constants import *
# -------------------------------------------
class WorkSpacesWorkspace(AWSObject):
"""# AWS::WorkSpaces::Workspace - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html",
"Properties": {
"BundleId": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-bundleid",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Conditional"
},
"DirectoryId": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-directoryid",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Conditional"
},
"RootVolumeEncryptionEnabled": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-rootvolumeencryptionenabled",
"PrimitiveType": "Boolean",
"Required": false,
"UpdateType": "Conditional"
},
"UserName": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-username",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Immutable"
},
"UserVolumeEncryptionEnabled": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-uservolumeencryptionenabled",
"PrimitiveType": "Boolean",
"Required": false,
"UpdateType": "Conditional"
},
"VolumeEncryptionKey": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-volumeencryptionkey",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Conditional"
}
}
}
"""
resource_type = "AWS::WorkSpaces::Workspace"
props = {
'BundleId': (basestring, True, 'Conditional', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-bundleid'),
'DirectoryId': (basestring, True, 'Conditional', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-directoryid'),
'RootVolumeEncryptionEnabled': (boolean, False, 'Conditional', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-rootvolumeencryptionenabled'),
'UserName': (basestring, True, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-username'),
'UserVolumeEncryptionEnabled': (boolean, False, 'Conditional', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-uservolumeencryptionenabled'),
'VolumeEncryptionKey': (basestring, False, 'Conditional', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-workspaces-workspace.html#cfn-workspaces-workspace-volumeencryptionkey')
} | pt | 0.179342 | 1.948927 | 2 |
hystrix/command.py | grofers/hystrix-py | 93 | 13641 | <gh_stars>10-100
"""
Used to wrap code that will execute potentially risky functionality
(typically meaning a service call over the network) with fault and latency
tolerance, statistics and performance metrics capture, circuit breaker and
bulkhead functionality.
"""
from __future__ import absolute_import
import logging
import six
from hystrix.group import Group
from hystrix.command_metrics import CommandMetrics
from hystrix.command_properties import CommandProperties
log = logging.getLogger(__name__)
# TODO: Change this to an AbstractCommandMetaclass
class CommandMetaclass(type):
__blacklist__ = ('Command', 'CommandMetaclass')
def __new__(cls, name, bases, attrs):
# Command key initialization
command_key = attrs.get('command_key') or name
new_class = type.__new__(cls, command_key, bases, attrs)
if name in cls.__blacklist__:
return new_class
# TODO: Check instance CommandProperties here?
command_properties_defaults = attrs.get('command_properties_defaults')
if command_properties_defaults is None:
command_properties_defaults = CommandProperties.setter()
# Properties initialization
properties_strategy = attrs.get('properties_strategy')
if properties_strategy is None:
properties_strategy = CommandProperties(
command_key, command_properties_defaults)
setattr(new_class, 'properties', properties_strategy)
# Pool key
# This defines which pool this command should run on.
# It uses the pool_key if provided, then defaults to use Group key.
# It can then be overridden by a property if defined so it can be
# changed at runtime.
pool_key = attrs.get('pool_key')
# Group key initialization
group_key = attrs.get('group_key') or '{}Group'.format(command_key)
NewGroup = type(group_key, (Group,),
dict(group_key=group_key, pool_key=pool_key))
setattr(new_class, 'group', NewGroup())
setattr(new_class, 'group_key', group_key)
setattr(new_class, 'command_key', command_key)
# Metrics initialization
command_metrics_key = '{}CommandMetrics'.format(command_key)
# TODO: Check instance CommandMetrics here?
metrics = attrs.get('metrics')
if metrics is None:
NewCommandMetrics = type(
command_metrics_key, (CommandMetrics,),
dict(command_metrics_key=command_metrics_key,
group_key=group_key, pool_key=pool_key))
metrics = NewCommandMetrics(properties=properties_strategy)
setattr(new_class, 'metrics', metrics)
return new_class
# TODO: Change this to inherit from an AbstractCommand
class Command(six.with_metaclass(CommandMetaclass, object)):
command_key = None
group_key = None
def __init__(self, group_key=None, command_key=None,
pool_key=None, circuit_breaker=None, pool=None,
command_properties_defaults=None,
pool_properties_defaults=None, metrics=None,
fallback_semaphore=None, execution_semaphore=None,
properties_strategy=None, execution_hook=None, timeout=None):
self.timeout = timeout
def run(self):
raise NotImplementedError('Subclasses must implement this method.')
def fallback(self):
raise NotImplementedError('Subclasses must implement this method.')
def cache(self):
raise NotImplementedError('Subclasses must implement this method.')
def execute(self, timeout=None):
timeout = timeout or self.timeout
future = self.group.pool.submit(self.run)
try:
return future.result(timeout)
except Exception:
log.exception('exception calling run for {}'.format(self))
log.info('run raises {}'.format(future.exception))
try:
log.info('trying fallback for {}'.format(self))
future = self.group.pool.submit(self.fallback)
return future.result(timeout)
except Exception:
log.exception('exception calling fallback for {}'.format(self))
log.info('run() raised {}'.format(future.exception))
log.info('trying cache for {}'.format(self))
future = self.group.pool.submit(self.cache)
return future.result(timeout)
def observe(self, timeout=None):
timeout = timeout or self.timeout
return self.__async(timeout=timeout)
def queue(self, timeout=None):
timeout = timeout or self.timeout
return self.__async(timeout=timeout)
def __async(self, timeout=None):
timeout = timeout or self.timeout
future = self.group.pool.submit(self.run)
try:
# Call result() to check for exception
future.result(timeout)
return future
except Exception:
log.exception('exception calling run for {}'.format(self))
log.info('run raised {}'.format(future.exception))
try:
log.info('trying fallback for {}'.format(self))
future = self.group.pool.submit(self.fallback)
# Call result() to check for exception
future.result(timeout)
return future
except Exception:
log.exception('exception calling fallback for {}'.format(self))
log.info('fallback raised {}'.format(future.exception))
log.info('trying cache for {}'.format(self))
return self.group.pool.submit(self.cache)
| """
Used to wrap code that will execute potentially risky functionality
(typically meaning a service call over the network) with fault and latency
tolerance, statistics and performance metrics capture, circuit breaker and
bulkhead functionality.
"""
from __future__ import absolute_import
import logging
import six
from hystrix.group import Group
from hystrix.command_metrics import CommandMetrics
from hystrix.command_properties import CommandProperties
log = logging.getLogger(__name__)
# TODO: Change this to an AbstractCommandMetaclass
class CommandMetaclass(type):
__blacklist__ = ('Command', 'CommandMetaclass')
def __new__(cls, name, bases, attrs):
# Command key initialization
command_key = attrs.get('command_key') or name
new_class = type.__new__(cls, command_key, bases, attrs)
if name in cls.__blacklist__:
return new_class
# TODO: Check instance CommandProperties here?
command_properties_defaults = attrs.get('command_properties_defaults')
if command_properties_defaults is None:
command_properties_defaults = CommandProperties.setter()
# Properties initialization
properties_strategy = attrs.get('properties_strategy')
if properties_strategy is None:
properties_strategy = CommandProperties(
command_key, command_properties_defaults)
setattr(new_class, 'properties', properties_strategy)
# Pool key
# This defines which pool this command should run on.
# It uses the pool_key if provided, then defaults to use Group key.
# It can then be overridden by a property if defined so it can be
# changed at runtime.
pool_key = attrs.get('pool_key')
# Group key initialization
group_key = attrs.get('group_key') or '{}Group'.format(command_key)
NewGroup = type(group_key, (Group,),
dict(group_key=group_key, pool_key=pool_key))
setattr(new_class, 'group', NewGroup())
setattr(new_class, 'group_key', group_key)
setattr(new_class, 'command_key', command_key)
# Metrics initialization
command_metrics_key = '{}CommandMetrics'.format(command_key)
# TODO: Check instance CommandMetrics here?
metrics = attrs.get('metrics')
if metrics is None:
NewCommandMetrics = type(
command_metrics_key, (CommandMetrics,),
dict(command_metrics_key=command_metrics_key,
group_key=group_key, pool_key=pool_key))
metrics = NewCommandMetrics(properties=properties_strategy)
setattr(new_class, 'metrics', metrics)
return new_class
# TODO: Change this to inherit from an AbstractCommand
class Command(six.with_metaclass(CommandMetaclass, object)):
command_key = None
group_key = None
def __init__(self, group_key=None, command_key=None,
pool_key=None, circuit_breaker=None, pool=None,
command_properties_defaults=None,
pool_properties_defaults=None, metrics=None,
fallback_semaphore=None, execution_semaphore=None,
properties_strategy=None, execution_hook=None, timeout=None):
self.timeout = timeout
def run(self):
raise NotImplementedError('Subclasses must implement this method.')
def fallback(self):
raise NotImplementedError('Subclasses must implement this method.')
def cache(self):
raise NotImplementedError('Subclasses must implement this method.')
def execute(self, timeout=None):
timeout = timeout or self.timeout
future = self.group.pool.submit(self.run)
try:
return future.result(timeout)
except Exception:
log.exception('exception calling run for {}'.format(self))
log.info('run raises {}'.format(future.exception))
try:
log.info('trying fallback for {}'.format(self))
future = self.group.pool.submit(self.fallback)
return future.result(timeout)
except Exception:
log.exception('exception calling fallback for {}'.format(self))
log.info('run() raised {}'.format(future.exception))
log.info('trying cache for {}'.format(self))
future = self.group.pool.submit(self.cache)
return future.result(timeout)
def observe(self, timeout=None):
timeout = timeout or self.timeout
return self.__async(timeout=timeout)
def queue(self, timeout=None):
timeout = timeout or self.timeout
return self.__async(timeout=timeout)
def __async(self, timeout=None):
timeout = timeout or self.timeout
future = self.group.pool.submit(self.run)
try:
# Call result() to check for exception
future.result(timeout)
return future
except Exception:
log.exception('exception calling run for {}'.format(self))
log.info('run raised {}'.format(future.exception))
try:
log.info('trying fallback for {}'.format(self))
future = self.group.pool.submit(self.fallback)
# Call result() to check for exception
future.result(timeout)
return future
except Exception:
log.exception('exception calling fallback for {}'.format(self))
log.info('fallback raised {}'.format(future.exception))
log.info('trying cache for {}'.format(self))
return self.group.pool.submit(self.cache) | pt | 0.157535 | 2.367502 | 2 |
glue/viewers/table/qt/data_viewer.py | HPLegion/glue | 550 | 13642 | <gh_stars>100-1000
import os
from functools import lru_cache
import numpy as np
from qtpy.QtCore import Qt
from qtpy import QtCore, QtGui, QtWidgets
from matplotlib.colors import ColorConverter
from glue.utils.qt import get_qapp
from glue.config import viewer_tool
from glue.core import BaseData, Data
from glue.utils.qt import load_ui
from glue.viewers.common.qt.data_viewer import DataViewer
from glue.viewers.common.qt.toolbar import BasicToolbar
from glue.viewers.common.tool import CheckableTool
from glue.viewers.common.layer_artist import LayerArtist
from glue.core.subset import ElementSubsetState
from glue.utils.colors import alpha_blend_colors
from glue.utils.qt import mpl_to_qt_color, messagebox_on_error
from glue.core.exceptions import IncompatibleAttribute
from glue.viewers.table.compat import update_table_viewer_state
try:
import dask.array as da
DASK_INSTALLED = True
except ImportError:
DASK_INSTALLED = False
__all__ = ['TableViewer', 'TableLayerArtist']
COLOR_CONVERTER = ColorConverter()
class DataTableModel(QtCore.QAbstractTableModel):
def __init__(self, table_viewer):
super(DataTableModel, self).__init__()
if table_viewer.data.ndim != 1:
raise ValueError("Can only use Table widget for 1D data")
self._table_viewer = table_viewer
self._data = table_viewer.data
self.show_coords = False
self.order = np.arange(self._data.shape[0])
self._update_visible()
def data_changed(self):
top_left = self.index(0, 0)
bottom_right = self.index(self.columnCount(), self.rowCount())
self._update_visible()
self.data_by_row_and_column.cache_clear()
self.dataChanged.emit(top_left, bottom_right)
self.layoutChanged.emit()
@property
def columns(self):
if self.show_coords:
return self._data.components
else:
return self._data.main_components + self._data.derived_components
def columnCount(self, index=None):
return len(self.columns)
def rowCount(self, index=None):
# Qt bug: Crashes on tables bigger than this
return min(self.order_visible.size, 71582788)
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return None
if orientation == Qt.Horizontal:
column_name = self.columns[section].label
units = self._data.get_component(self.columns[section]).units
if units != '':
column_name += "\n{0}".format(units)
return column_name
elif orientation == Qt.Vertical:
return str(self.order_visible[section])
def data(self, index, role):
if not index.isValid():
return None
return self.data_by_row_and_column(index.row(), index.column(), role)
# The data() method gets called many times, often with the same parameters,
# for example if bringing the window to the foreground/background, shifting
# up/down/left/right by one cell, etc. This can be very slow when e.g. dask
# columns are present so we cache the most recent 65536 calls which should
# have a reasonably sensible memory footprint.
@lru_cache(maxsize=65536)
def data_by_row_and_column(self, row, column, role):
if role == Qt.DisplayRole:
c = self.columns[column]
idx = self.order_visible[row]
comp = self._data[c]
value = comp[idx]
if isinstance(value, bytes):
return value.decode('ascii')
else:
if DASK_INSTALLED and isinstance(value, da.Array):
return str(value.compute())
else:
return str(comp[idx])
elif role == Qt.BackgroundRole:
idx = self.order_visible[row]
# Find all subsets that this index is part of
colors = []
for layer_artist in self._table_viewer.layers[::-1]:
if isinstance(layer_artist.layer, BaseData):
continue
if layer_artist.visible:
subset = layer_artist.layer
try:
if subset.to_mask(view=slice(idx, idx + 1))[0]:
colors.append(subset.style.color)
except IncompatibleAttribute as exc:
# Only disable the layer if enabled, as otherwise we
# will recursively call clear and _refresh, causing
# an infinite loop and performance issues.
if layer_artist.enabled:
layer_artist.disable_invalid_attributes(*exc.args)
else:
layer_artist.enabled = True
# Blend the colors using alpha blending
if len(colors) > 0:
color = alpha_blend_colors(colors, additional_alpha=0.5)
color = mpl_to_qt_color(color)
return QtGui.QBrush(color)
def sort(self, column, ascending):
c = self.columns[column]
comp = self._data.get_component(c)
self.order = np.argsort(comp.data)
if ascending == Qt.DescendingOrder:
self.order = self.order[::-1]
self._update_visible()
self.data_by_row_and_column.cache_clear()
self.layoutChanged.emit()
def _update_visible(self):
"""
Given which layers are visible or not, convert order to order_visible.
"""
self.data_by_row_and_column.cache_clear()
# First, if the data layer is visible, show all rows
for layer_artist in self._table_viewer.layers:
if layer_artist.visible and isinstance(layer_artist.layer, BaseData):
self.order_visible = self.order
return
# If not then we need to show only the rows with visible subsets
visible = np.zeros(self.order.shape, dtype=bool)
for layer_artist in self._table_viewer.layers:
if layer_artist.visible:
mask = layer_artist.layer.to_mask()
if DASK_INSTALLED and isinstance(mask, da.Array):
mask = mask.compute()
visible |= mask
self.order_visible = self.order[visible]
class TableLayerArtist(LayerArtist):
def __init__(self, table_viewer, viewer_state, layer_state=None, layer=None):
self._table_viewer = table_viewer
super(TableLayerArtist, self).__init__(viewer_state,
layer_state=layer_state,
layer=layer)
self.redraw()
def _refresh(self):
self._table_viewer.model.data_changed()
def redraw(self):
self._refresh()
def update(self):
self._refresh()
def clear(self):
self._refresh()
@viewer_tool
class RowSelectTool(CheckableTool):
tool_id = 'table:rowselect'
icon = 'glue_row_select'
action_text = 'Select row(s)'
tool_tip = ('Select rows by clicking on rows and pressing enter '
'once the selection is ready to be applied')
status_tip = ('CLICK to select, press ENTER to finalize selection, '
'ALT+CLICK or ALT+UP/DOWN to apply selection immediately')
def __init__(self, viewer):
super(RowSelectTool, self).__init__(viewer)
self.deactivate()
def activate(self):
self.viewer.ui.table.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
def deactivate(self):
# Don't do anything if the viewer has already been closed
if self.viewer is None:
return
self.viewer.ui.table.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.viewer.ui.table.clearSelection()
class TableViewWithSelectionSignal(QtWidgets.QTableView):
selection_changed = QtCore.Signal()
def selectionChanged(self, *args, **kwargs):
self.selection_changed.emit()
super(TableViewWithSelectionSignal, self).selectionChanged(*args, **kwargs)
class TableViewer(DataViewer):
LABEL = "Table Viewer"
_toolbar_cls = BasicToolbar
_data_artist_cls = TableLayerArtist
_subset_artist_cls = TableLayerArtist
inherit_tools = False
tools = ['table:rowselect']
def __init__(self, session, state=None, parent=None, widget=None):
super(TableViewer, self).__init__(session, state=state, parent=parent)
self.ui = load_ui('data_viewer.ui',
directory=os.path.dirname(__file__))
self.setCentralWidget(self.ui)
hdr = self.ui.table.horizontalHeader()
hdr.setStretchLastSection(True)
hdr.setSectionResizeMode(hdr.Interactive)
hdr = self.ui.table.verticalHeader()
hdr.setSectionResizeMode(hdr.Interactive)
self.data = None
self.model = None
self.ui.table.selection_changed.connect(self.selection_changed)
self.state.add_callback('layers', self._on_layers_changed)
self._on_layers_changed()
def selection_changed(self):
app = get_qapp()
if app.queryKeyboardModifiers() == Qt.AltModifier:
self.finalize_selection(clear=False)
def keyPressEvent(self, event):
if self.toolbar.active_tool is self.toolbar.tools['table:rowselect']:
if event.key() in [Qt.Key_Enter, Qt.Key_Return]:
self.finalize_selection()
super(TableViewer, self).keyPressEvent(event)
def finalize_selection(self, clear=True):
model = self.ui.table.selectionModel()
selected_rows = [self.model.order_visible[x.row()] for x in model.selectedRows()]
subset_state = ElementSubsetState(indices=selected_rows, data=self.data)
mode = self.session.edit_subset_mode
mode.update(self._data, subset_state, focus_data=self.data)
if clear:
# We block the signals here to make sure that we don't update
# the subset again once the selection is cleared.
self.ui.table.blockSignals(True)
self.ui.table.clearSelection()
self.ui.table.blockSignals(False)
def _on_layers_changed(self, *args):
for layer_state in self.state.layers:
if isinstance(layer_state.layer, BaseData):
break
else:
return
self.data = layer_state.layer
self.setUpdatesEnabled(False)
self.model = DataTableModel(self)
self.ui.table.setModel(self.model)
self.setUpdatesEnabled(True)
@messagebox_on_error("Failed to add data")
def add_data(self, data):
with self._layer_artist_container.ignore_empty():
self.state.layers[:] = []
return super(TableViewer, self).add_data(data)
@messagebox_on_error("Failed to add subset")
def add_subset(self, subset):
if self.data is None:
self.add_data(subset.data)
self.state.layers[0].visible = False
elif subset.data != self.data:
raise ValueError("subset parent data does not match existing table data")
return super(TableViewer, self).add_subset(subset)
@property
def window_title(self):
if len(self.state.layers) > 0:
return 'Table: ' + self.state.layers[0].layer.label
else:
return 'Table'
def closeEvent(self, event):
"""
On close, Qt seems to scan through the entire model
if the data set is big. To sidestep that,
we swap out with a tiny data set before closing
"""
super(TableViewer, self).closeEvent(event)
if self.model is not None:
self.model._data = Data(x=[0])
event.accept()
def get_layer_artist(self, cls, layer=None, layer_state=None):
return cls(self, self.state, layer=layer, layer_state=layer_state)
@staticmethod
def update_viewer_state(rec, context):
return update_table_viewer_state(rec, context)
| import os
from functools import lru_cache
import numpy as np
from qtpy.QtCore import Qt
from qtpy import QtCore, QtGui, QtWidgets
from matplotlib.colors import ColorConverter
from glue.utils.qt import get_qapp
from glue.config import viewer_tool
from glue.core import BaseData, Data
from glue.utils.qt import load_ui
from glue.viewers.common.qt.data_viewer import DataViewer
from glue.viewers.common.qt.toolbar import BasicToolbar
from glue.viewers.common.tool import CheckableTool
from glue.viewers.common.layer_artist import LayerArtist
from glue.core.subset import ElementSubsetState
from glue.utils.colors import alpha_blend_colors
from glue.utils.qt import mpl_to_qt_color, messagebox_on_error
from glue.core.exceptions import IncompatibleAttribute
from glue.viewers.table.compat import update_table_viewer_state
try:
import dask.array as da
DASK_INSTALLED = True
except ImportError:
DASK_INSTALLED = False
__all__ = ['TableViewer', 'TableLayerArtist']
COLOR_CONVERTER = ColorConverter()
class DataTableModel(QtCore.QAbstractTableModel):
def __init__(self, table_viewer):
super(DataTableModel, self).__init__()
if table_viewer.data.ndim != 1:
raise ValueError("Can only use Table widget for 1D data")
self._table_viewer = table_viewer
self._data = table_viewer.data
self.show_coords = False
self.order = np.arange(self._data.shape[0])
self._update_visible()
def data_changed(self):
top_left = self.index(0, 0)
bottom_right = self.index(self.columnCount(), self.rowCount())
self._update_visible()
self.data_by_row_and_column.cache_clear()
self.dataChanged.emit(top_left, bottom_right)
self.layoutChanged.emit()
@property
def columns(self):
if self.show_coords:
return self._data.components
else:
return self._data.main_components + self._data.derived_components
def columnCount(self, index=None):
return len(self.columns)
def rowCount(self, index=None):
# Qt bug: Crashes on tables bigger than this
return min(self.order_visible.size, 71582788)
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return None
if orientation == Qt.Horizontal:
column_name = self.columns[section].label
units = self._data.get_component(self.columns[section]).units
if units != '':
column_name += "\n{0}".format(units)
return column_name
elif orientation == Qt.Vertical:
return str(self.order_visible[section])
def data(self, index, role):
if not index.isValid():
return None
return self.data_by_row_and_column(index.row(), index.column(), role)
# The data() method gets called many times, often with the same parameters,
# for example if bringing the window to the foreground/background, shifting
# up/down/left/right by one cell, etc. This can be very slow when e.g. dask
# columns are present so we cache the most recent 65536 calls which should
# have a reasonably sensible memory footprint.
@lru_cache(maxsize=65536)
def data_by_row_and_column(self, row, column, role):
if role == Qt.DisplayRole:
c = self.columns[column]
idx = self.order_visible[row]
comp = self._data[c]
value = comp[idx]
if isinstance(value, bytes):
return value.decode('ascii')
else:
if DASK_INSTALLED and isinstance(value, da.Array):
return str(value.compute())
else:
return str(comp[idx])
elif role == Qt.BackgroundRole:
idx = self.order_visible[row]
# Find all subsets that this index is part of
colors = []
for layer_artist in self._table_viewer.layers[::-1]:
if isinstance(layer_artist.layer, BaseData):
continue
if layer_artist.visible:
subset = layer_artist.layer
try:
if subset.to_mask(view=slice(idx, idx + 1))[0]:
colors.append(subset.style.color)
except IncompatibleAttribute as exc:
# Only disable the layer if enabled, as otherwise we
# will recursively call clear and _refresh, causing
# an infinite loop and performance issues.
if layer_artist.enabled:
layer_artist.disable_invalid_attributes(*exc.args)
else:
layer_artist.enabled = True
# Blend the colors using alpha blending
if len(colors) > 0:
color = alpha_blend_colors(colors, additional_alpha=0.5)
color = mpl_to_qt_color(color)
return QtGui.QBrush(color)
def sort(self, column, ascending):
c = self.columns[column]
comp = self._data.get_component(c)
self.order = np.argsort(comp.data)
if ascending == Qt.DescendingOrder:
self.order = self.order[::-1]
self._update_visible()
self.data_by_row_and_column.cache_clear()
self.layoutChanged.emit()
def _update_visible(self):
"""
Given which layers are visible or not, convert order to order_visible.
"""
self.data_by_row_and_column.cache_clear()
# First, if the data layer is visible, show all rows
for layer_artist in self._table_viewer.layers:
if layer_artist.visible and isinstance(layer_artist.layer, BaseData):
self.order_visible = self.order
return
# If not then we need to show only the rows with visible subsets
visible = np.zeros(self.order.shape, dtype=bool)
for layer_artist in self._table_viewer.layers:
if layer_artist.visible:
mask = layer_artist.layer.to_mask()
if DASK_INSTALLED and isinstance(mask, da.Array):
mask = mask.compute()
visible |= mask
self.order_visible = self.order[visible]
class TableLayerArtist(LayerArtist):
def __init__(self, table_viewer, viewer_state, layer_state=None, layer=None):
self._table_viewer = table_viewer
super(TableLayerArtist, self).__init__(viewer_state,
layer_state=layer_state,
layer=layer)
self.redraw()
def _refresh(self):
self._table_viewer.model.data_changed()
def redraw(self):
self._refresh()
def update(self):
self._refresh()
def clear(self):
self._refresh()
@viewer_tool
class RowSelectTool(CheckableTool):
tool_id = 'table:rowselect'
icon = 'glue_row_select'
action_text = 'Select row(s)'
tool_tip = ('Select rows by clicking on rows and pressing enter '
'once the selection is ready to be applied')
status_tip = ('CLICK to select, press ENTER to finalize selection, '
'ALT+CLICK or ALT+UP/DOWN to apply selection immediately')
def __init__(self, viewer):
super(RowSelectTool, self).__init__(viewer)
self.deactivate()
def activate(self):
self.viewer.ui.table.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
def deactivate(self):
# Don't do anything if the viewer has already been closed
if self.viewer is None:
return
self.viewer.ui.table.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.viewer.ui.table.clearSelection()
class TableViewWithSelectionSignal(QtWidgets.QTableView):
selection_changed = QtCore.Signal()
def selectionChanged(self, *args, **kwargs):
self.selection_changed.emit()
super(TableViewWithSelectionSignal, self).selectionChanged(*args, **kwargs)
class TableViewer(DataViewer):
LABEL = "Table Viewer"
_toolbar_cls = BasicToolbar
_data_artist_cls = TableLayerArtist
_subset_artist_cls = TableLayerArtist
inherit_tools = False
tools = ['table:rowselect']
def __init__(self, session, state=None, parent=None, widget=None):
super(TableViewer, self).__init__(session, state=state, parent=parent)
self.ui = load_ui('data_viewer.ui',
directory=os.path.dirname(__file__))
self.setCentralWidget(self.ui)
hdr = self.ui.table.horizontalHeader()
hdr.setStretchLastSection(True)
hdr.setSectionResizeMode(hdr.Interactive)
hdr = self.ui.table.verticalHeader()
hdr.setSectionResizeMode(hdr.Interactive)
self.data = None
self.model = None
self.ui.table.selection_changed.connect(self.selection_changed)
self.state.add_callback('layers', self._on_layers_changed)
self._on_layers_changed()
def selection_changed(self):
app = get_qapp()
if app.queryKeyboardModifiers() == Qt.AltModifier:
self.finalize_selection(clear=False)
def keyPressEvent(self, event):
if self.toolbar.active_tool is self.toolbar.tools['table:rowselect']:
if event.key() in [Qt.Key_Enter, Qt.Key_Return]:
self.finalize_selection()
super(TableViewer, self).keyPressEvent(event)
def finalize_selection(self, clear=True):
model = self.ui.table.selectionModel()
selected_rows = [self.model.order_visible[x.row()] for x in model.selectedRows()]
subset_state = ElementSubsetState(indices=selected_rows, data=self.data)
mode = self.session.edit_subset_mode
mode.update(self._data, subset_state, focus_data=self.data)
if clear:
# We block the signals here to make sure that we don't update
# the subset again once the selection is cleared.
self.ui.table.blockSignals(True)
self.ui.table.clearSelection()
self.ui.table.blockSignals(False)
def _on_layers_changed(self, *args):
for layer_state in self.state.layers:
if isinstance(layer_state.layer, BaseData):
break
else:
return
self.data = layer_state.layer
self.setUpdatesEnabled(False)
self.model = DataTableModel(self)
self.ui.table.setModel(self.model)
self.setUpdatesEnabled(True)
@messagebox_on_error("Failed to add data")
def add_data(self, data):
with self._layer_artist_container.ignore_empty():
self.state.layers[:] = []
return super(TableViewer, self).add_data(data)
@messagebox_on_error("Failed to add subset")
def add_subset(self, subset):
if self.data is None:
self.add_data(subset.data)
self.state.layers[0].visible = False
elif subset.data != self.data:
raise ValueError("subset parent data does not match existing table data")
return super(TableViewer, self).add_subset(subset)
@property
def window_title(self):
if len(self.state.layers) > 0:
return 'Table: ' + self.state.layers[0].layer.label
else:
return 'Table'
def closeEvent(self, event):
"""
On close, Qt seems to scan through the entire model
if the data set is big. To sidestep that,
we swap out with a tiny data set before closing
"""
super(TableViewer, self).closeEvent(event)
if self.model is not None:
self.model._data = Data(x=[0])
event.accept()
def get_layer_artist(self, cls, layer=None, layer_state=None):
return cls(self, self.state, layer=layer, layer_state=layer_state)
@staticmethod
def update_viewer_state(rec, context):
return update_table_viewer_state(rec, context) | pt | 0.143982 | 1.90526 | 2 |
azure-servicefabric/azure/servicefabric/models/restore_partition_description.py | JonathanGailliez/azure-sdk-for-python | 1 | 13643 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RestorePartitionDescription(Model):
"""Specifies the parameters needed to trigger a restore of a specific
partition.
All required parameters must be populated in order to send to Azure.
:param backup_id: Required. Unique backup ID.
:type backup_id: str
:param backup_location: Required. Location of the backup relative to the
backup storage specified/ configured.
:type backup_location: str
:param backup_storage: Location of the backup from where the partition
will be restored.
:type backup_storage: ~azure.servicefabric.models.BackupStorageDescription
"""
_validation = {
'backup_id': {'required': True},
'backup_location': {'required': True},
}
_attribute_map = {
'backup_id': {'key': 'BackupId', 'type': 'str'},
'backup_location': {'key': 'BackupLocation', 'type': 'str'},
'backup_storage': {'key': 'BackupStorage', 'type': 'BackupStorageDescription'},
}
def __init__(self, **kwargs):
super(RestorePartitionDescription, self).__init__(**kwargs)
self.backup_id = kwargs.get('backup_id', None)
self.backup_location = kwargs.get('backup_location', None)
self.backup_storage = kwargs.get('backup_storage', None)
| # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RestorePartitionDescription(Model):
"""Specifies the parameters needed to trigger a restore of a specific
partition.
All required parameters must be populated in order to send to Azure.
:param backup_id: Required. Unique backup ID.
:type backup_id: str
:param backup_location: Required. Location of the backup relative to the
backup storage specified/ configured.
:type backup_location: str
:param backup_storage: Location of the backup from where the partition
will be restored.
:type backup_storage: ~azure.servicefabric.models.BackupStorageDescription
"""
_validation = {
'backup_id': {'required': True},
'backup_location': {'required': True},
}
_attribute_map = {
'backup_id': {'key': 'BackupId', 'type': 'str'},
'backup_location': {'key': 'BackupLocation', 'type': 'str'},
'backup_storage': {'key': 'BackupStorage', 'type': 'BackupStorageDescription'},
}
def __init__(self, **kwargs):
super(RestorePartitionDescription, self).__init__(**kwargs)
self.backup_id = kwargs.get('backup_id', None)
self.backup_location = kwargs.get('backup_location', None)
self.backup_storage = kwargs.get('backup_storage', None)
| pt | 0.219751 | 1.98152 | 2 |
psq/queue.py | Tomesco/bookshelf-demo-project | 210 | 13644 | <gh_stars>100-1000
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from contextlib import contextmanager
import functools
import logging
from uuid import uuid4
import google.cloud.exceptions
from .globals import queue_context
from .storage import Storage
from .task import Task, TaskResult
from .utils import dumps, measure_time, unpickle, UnpickleError
logger = logging.getLogger(__name__)
PUBSUB_OBJECT_PREFIX = 'psq'
class Queue(object):
def __init__(self, publisher_client, subscriber_client, project,
name='default', storage=None, extra_context=None,
asynchronous=True):
self._async = asynchronous
self.name = name
self.project = project
if self._async:
self.publisher_client = publisher_client
self.subscriber_client = subscriber_client
self.topic_path = self._get_or_create_topic()
self.storage = storage or Storage()
self.subscription = None
self.extra_context = extra_context if extra_context else dummy_context
def _get_topic_path(self):
topic_name = '{}-{}'.format(PUBSUB_OBJECT_PREFIX, self.name)
return self.publisher_client.topic_path(self.project, topic_name)
def _get_or_create_topic(self):
topic_path = self._get_topic_path()
try:
self.publisher_client.get_topic(topic_path)
except google.cloud.exceptions.NotFound:
logger.info("Creating topic {}".format(topic_path))
try:
self.publisher_client.create_topic(topic_path)
except google.cloud.exceptions.Conflict:
# Another process created the topic before us, ignore.
pass
return topic_path
def _get_or_create_subscription(self):
"""Workers all share the same subscription so that tasks are
distributed across all workers."""
topic_path = self._get_topic_path()
subscription_name = '{}-{}-shared'.format(
PUBSUB_OBJECT_PREFIX, self.name)
subscription_path = self.subscriber_client.subscription_path(
self.project, subscription_name)
try:
self.subscriber_client.get_subscription(subscription_path)
except google.cloud.exceptions.NotFound:
logger.info("Creating shared subscription {}".format(
subscription_name))
try:
self.subscriber_client.create_subscription(
subscription_path, topic=topic_path)
except google.cloud.exceptions.Conflict:
# Another worker created the subscription before us, ignore.
pass
return subscription_path
def enqueue(self, f, *args, **kwargs):
"""Enqueues a function for the task queue to execute."""
task = Task(uuid4().hex, f, args, kwargs)
self.storage.put_task(task)
return self.enqueue_task(task)
def enqueue_task(self, task):
"""Enqueues a task directly. This is used when a task is retried or if
a task was manually created.
Note that this does not store the task.
"""
data = dumps(task)
if self._async:
self.publisher_client.publish(self.topic_path, data=data)
logger.info('Task {} queued.'.format(task.id))
else:
unpickled_task = unpickle(data)
logger.info(
'Executing task {} synchronously.'.format(unpickled_task.id)
)
with measure_time() as summary, self.queue_context():
unpickled_task.execute(queue=self)
summary(unpickled_task.summary())
return TaskResult(task.id, self)
@staticmethod
def _pubsub_message_callback(task_callback, message):
message.ack()
try:
task = unpickle(message.data)
task_callback(task)
except UnpickleError:
logger.exception('Failed to unpickle task {}.'.format(message))
def listen(self, callback):
if not self.subscription:
self.subscription = self._get_or_create_subscription()
message_callback = functools.partial(
self._pubsub_message_callback, callback)
return self.subscriber_client.subscribe(
self.subscription, callback=message_callback)
def cleanup(self):
"""Does nothing for this queue, but other queues types may use this to
perform clean-up after listening for tasks."""
pass
def queue_context(self):
"""
Returns a context manager that sets this queue as the current_queue
global. Similar to flask's app.app_context. This is used by the workers
to make the global available inside of task functions.
"""
return queue_context(self)
@contextmanager
def dummy_context():
yield
| # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from contextlib import contextmanager
import functools
import logging
from uuid import uuid4
import google.cloud.exceptions
from .globals import queue_context
from .storage import Storage
from .task import Task, TaskResult
from .utils import dumps, measure_time, unpickle, UnpickleError
logger = logging.getLogger(__name__)
PUBSUB_OBJECT_PREFIX = 'psq'
class Queue(object):
def __init__(self, publisher_client, subscriber_client, project,
name='default', storage=None, extra_context=None,
asynchronous=True):
self._async = asynchronous
self.name = name
self.project = project
if self._async:
self.publisher_client = publisher_client
self.subscriber_client = subscriber_client
self.topic_path = self._get_or_create_topic()
self.storage = storage or Storage()
self.subscription = None
self.extra_context = extra_context if extra_context else dummy_context
def _get_topic_path(self):
topic_name = '{}-{}'.format(PUBSUB_OBJECT_PREFIX, self.name)
return self.publisher_client.topic_path(self.project, topic_name)
def _get_or_create_topic(self):
topic_path = self._get_topic_path()
try:
self.publisher_client.get_topic(topic_path)
except google.cloud.exceptions.NotFound:
logger.info("Creating topic {}".format(topic_path))
try:
self.publisher_client.create_topic(topic_path)
except google.cloud.exceptions.Conflict:
# Another process created the topic before us, ignore.
pass
return topic_path
def _get_or_create_subscription(self):
"""Workers all share the same subscription so that tasks are
distributed across all workers."""
topic_path = self._get_topic_path()
subscription_name = '{}-{}-shared'.format(
PUBSUB_OBJECT_PREFIX, self.name)
subscription_path = self.subscriber_client.subscription_path(
self.project, subscription_name)
try:
self.subscriber_client.get_subscription(subscription_path)
except google.cloud.exceptions.NotFound:
logger.info("Creating shared subscription {}".format(
subscription_name))
try:
self.subscriber_client.create_subscription(
subscription_path, topic=topic_path)
except google.cloud.exceptions.Conflict:
# Another worker created the subscription before us, ignore.
pass
return subscription_path
def enqueue(self, f, *args, **kwargs):
"""Enqueues a function for the task queue to execute."""
task = Task(uuid4().hex, f, args, kwargs)
self.storage.put_task(task)
return self.enqueue_task(task)
def enqueue_task(self, task):
"""Enqueues a task directly. This is used when a task is retried or if
a task was manually created.
Note that this does not store the task.
"""
data = dumps(task)
if self._async:
self.publisher_client.publish(self.topic_path, data=data)
logger.info('Task {} queued.'.format(task.id))
else:
unpickled_task = unpickle(data)
logger.info(
'Executing task {} synchronously.'.format(unpickled_task.id)
)
with measure_time() as summary, self.queue_context():
unpickled_task.execute(queue=self)
summary(unpickled_task.summary())
return TaskResult(task.id, self)
@staticmethod
def _pubsub_message_callback(task_callback, message):
message.ack()
try:
task = unpickle(message.data)
task_callback(task)
except UnpickleError:
logger.exception('Failed to unpickle task {}.'.format(message))
def listen(self, callback):
if not self.subscription:
self.subscription = self._get_or_create_subscription()
message_callback = functools.partial(
self._pubsub_message_callback, callback)
return self.subscriber_client.subscribe(
self.subscription, callback=message_callback)
def cleanup(self):
"""Does nothing for this queue, but other queues types may use this to
perform clean-up after listening for tasks."""
pass
def queue_context(self):
"""
Returns a context manager that sets this queue as the current_queue
global. Similar to flask's app.app_context. This is used by the workers
to make the global available inside of task functions.
"""
return queue_context(self)
@contextmanager
def dummy_context():
yield | pt | 0.209721 | 2.008266 | 2 |
mac/google-cloud-sdk/lib/surface/access_context_manager/levels/update.py | bopopescu/cndw | 0 | 13645 | <filename>mac/google-cloud-sdk/lib/surface/access_context_manager/levels/update.py
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud access-context-manager levels update` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.accesscontextmanager import levels as levels_api
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.accesscontextmanager import levels
from googlecloudsdk.command_lib.accesscontextmanager import policies
@base.ReleaseTracks(base.ReleaseTrack.GA)
class UpdateLevelsGA(base.UpdateCommand):
"""Update an existing access level."""
_API_VERSION = 'v1'
@staticmethod
def Args(parser):
UpdateLevelsGA.ArgsVersioned(parser, version='v1')
@staticmethod
def ArgsVersioned(parser, version='v1'):
levels.AddResourceArg(parser, 'to update')
levels.AddLevelArgs(parser, version=version)
levels.AddLevelSpecArgs(parser, version=version)
def Run(self, args):
client = levels_api.Client(version=self._API_VERSION)
level_ref = args.CONCEPTS.level.Parse()
policies.ValidateAccessPolicyArg(level_ref, args)
mapper = levels.GetCombineFunctionEnumMapper(version=self._API_VERSION)
combine_function = mapper.GetEnumForChoice(args.combine_function)
return client.Patch(
level_ref,
description=args.description,
title=args.title,
combine_function=combine_function,
basic_level_conditions=args.basic_level_spec)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class UpdateLevelsBeta(UpdateLevelsGA):
_API_VERSION = 'v1beta'
@staticmethod
def Args(parser):
UpdateLevelsGA.ArgsVersioned(parser, version='v1beta')
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class UpdateLevelsAlpha(UpdateLevelsGA):
_API_VERSION = 'v1alpha'
@staticmethod
def Args(parser):
UpdateLevelsGA.ArgsVersioned(parser, version='v1alpha')
| <filename>mac/google-cloud-sdk/lib/surface/access_context_manager/levels/update.py
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud access-context-manager levels update` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.accesscontextmanager import levels as levels_api
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.accesscontextmanager import levels
from googlecloudsdk.command_lib.accesscontextmanager import policies
@base.ReleaseTracks(base.ReleaseTrack.GA)
class UpdateLevelsGA(base.UpdateCommand):
"""Update an existing access level."""
_API_VERSION = 'v1'
@staticmethod
def Args(parser):
UpdateLevelsGA.ArgsVersioned(parser, version='v1')
@staticmethod
def ArgsVersioned(parser, version='v1'):
levels.AddResourceArg(parser, 'to update')
levels.AddLevelArgs(parser, version=version)
levels.AddLevelSpecArgs(parser, version=version)
def Run(self, args):
client = levels_api.Client(version=self._API_VERSION)
level_ref = args.CONCEPTS.level.Parse()
policies.ValidateAccessPolicyArg(level_ref, args)
mapper = levels.GetCombineFunctionEnumMapper(version=self._API_VERSION)
combine_function = mapper.GetEnumForChoice(args.combine_function)
return client.Patch(
level_ref,
description=args.description,
title=args.title,
combine_function=combine_function,
basic_level_conditions=args.basic_level_spec)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class UpdateLevelsBeta(UpdateLevelsGA):
_API_VERSION = 'v1beta'
@staticmethod
def Args(parser):
UpdateLevelsGA.ArgsVersioned(parser, version='v1beta')
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class UpdateLevelsAlpha(UpdateLevelsGA):
_API_VERSION = 'v1alpha'
@staticmethod
def Args(parser):
UpdateLevelsGA.ArgsVersioned(parser, version='v1alpha')
| pt | 0.2076 | 2.037157 | 2 |
convert.py | lordcodingsound/autodj | 0 | 13646 | <filename>convert.py<gh_stars>0
import wave
import struct
import subprocess
import os
import opusenc
import base64
import zlib
import sys
tmp = sys.argv[1] + ".wav"
subprocess.Popen(["ffmpeg", "-i", sys.argv[1], "-ar", "48000", "-ac", "2", "-y", tmp], stdout=subprocess.PIPE, stderr=subprocess.PIPE).wait()
f = open(sys.argv[2], "wb")
e = zlib.compressobj(9)
c = 0
b = ""
opusenc.initialize(256000)
wf = wave.open(tmp)
while True:
rc = wf.readframes(480)
if len(rc) != 1920:
break
opus = opusenc.encode(rc)
b += base64.b64encode(opus).decode("utf-8") + "\n"
c += 1
if c >= 100:
c = 0
f.write(e.compress(b.encode()) + e.flush(zlib.Z_SYNC_FLUSH))
b = ""
f.write(e.compress(b.encode()) + e.flush(zlib.Z_SYNC_FLUSH))
f.close()
wf.close()
os.remove(tmp)
| <filename>convert.py<gh_stars>0
import wave
import struct
import subprocess
import os
import opusenc
import base64
import zlib
import sys
tmp = sys.argv[1] + ".wav"
subprocess.Popen(["ffmpeg", "-i", sys.argv[1], "-ar", "48000", "-ac", "2", "-y", tmp], stdout=subprocess.PIPE, stderr=subprocess.PIPE).wait()
f = open(sys.argv[2], "wb")
e = zlib.compressobj(9)
c = 0
b = ""
opusenc.initialize(256000)
wf = wave.open(tmp)
while True:
rc = wf.readframes(480)
if len(rc) != 1920:
break
opus = opusenc.encode(rc)
b += base64.b64encode(opus).decode("utf-8") + "\n"
c += 1
if c >= 100:
c = 0
f.write(e.compress(b.encode()) + e.flush(zlib.Z_SYNC_FLUSH))
b = ""
f.write(e.compress(b.encode()) + e.flush(zlib.Z_SYNC_FLUSH))
f.close()
wf.close()
os.remove(tmp)
| none | 1 | 2.35445 | 2 |
polliwog/tri/__init__.py | lace/polliwog | 18 | 13647 | <filename>polliwog/tri/__init__.py
from . import functions as _functions
from .functions import * # noqa: F401,F403
from .quad_faces import quads_to_tris
__all__ = _functions.__all__ + ["quads_to_tris"]
| <filename>polliwog/tri/__init__.py
from . import functions as _functions
from .functions import * # noqa: F401,F403
from .quad_faces import quads_to_tris
__all__ = _functions.__all__ + ["quads_to_tris"]
| fr | 0.2062 | 1.296335 | 1 |
packages/pegasus-api/src/Pegasus/api/replica_catalog.py | spxiwh/pegasus | 0 | 13648 | <reponame>spxiwh/pegasus
from collections import OrderedDict
from pathlib import Path
from typing import Dict, Optional, Set, Union
from ._utils import _chained
from .errors import DuplicateError
from .mixins import MetadataMixin
from .writable import Writable, _filter_out_nones
PEGASUS_VERSION = "5.0"
__all__ = ["File", "ReplicaCatalog"]
class _PFN:
"""A physical file name comprising site and path"""
def __init__(self, site, pfn):
self.site = site
self.pfn = pfn
def __eq__(self, other):
if isinstance(other, _PFN):
return self.site == other.site and self.pfn == other.pfn
return False
def __hash__(self):
return hash((self.site, self.pfn))
def __repr__(self):
return "<_PFN site: {}, pfn: {}>".format(self.site, self.pfn)
def __json__(self):
return {"site": self.site, "pfn": self.pfn}
class File(MetadataMixin):
"""
A workflow File. This class is used to represent the inputs and outputs of a
:py:class:`~Pegasus.api.workflow.Job`.
.. code-block:: python
# Example
input_file = File("data.txt").add_metadata(creator="ryan")
"""
def __init__(self, lfn: str, size: Optional[int] = None):
"""
:param lfn: a unique logical filename
:type lfn: str
:param size: size in bytes, defaults to None
:type size: int
"""
if not isinstance(lfn, str):
raise TypeError(
"invalid lfn: {lfn}; lfn must be of type str".format(lfn=lfn)
)
self.metadata = dict()
self.lfn = lfn
self.size = size
if size:
self.metadata["size"] = size
def __str__(self):
return self.lfn
def __hash__(self):
return hash(self.lfn)
def __eq__(self, other):
if isinstance(other, File):
return self.lfn == other.lfn
return False
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.lfn)
def __json__(self):
return _filter_out_nones(
{
"lfn": self.lfn,
"metadata": self.metadata if len(self.metadata) > 0 else None,
"size": self.size,
}
)
class _ReplicaCatalogEntry:
def __init__(
self,
lfn: str,
pfns: Set[_PFN],
checksum: Optional[Dict[str, str]] = None,
metadata: Optional[Dict[str, Union[int, str, float]]] = None,
regex: bool = False,
):
self.lfn = lfn
self.pfns = pfns
self.checksum = checksum or dict()
self.metadata = metadata or dict()
self.regex = regex
def __json__(self):
return _filter_out_nones(
{
"lfn": self.lfn,
"pfns": [pfn for pfn in self.pfns],
"checksum": self.checksum if len(self.checksum) > 0 else None,
"metadata": self.metadata if len(self.metadata) > 0 else None,
"regex": self.regex if self.regex else None,
}
)
class ReplicaCatalog(Writable):
"""Maintains a mapping of logical filenames to physical filenames. Any input
files to the workflow are specified here so that Pegasus knows where to
obtain them.
.. code-block:: python
# Example
if1 = File("if")
if2 = File("if2")
rc = ReplicaCatalog()\\
.add_replica("local", if1, "/nfs/u2/ryan/data.csv")\\
.add_replica("local", "if2", "/nfs/u2/ryan/data2.csv")\\
.write()
"""
_DEFAULT_FILENAME = "replicas.yml"
_SUPPORTED_CHECKSUMS = {"sha256"}
def __init__(self):
# Using key = (<lfn or pattern>, <is_regex>) to preserve insertion
# order of entries while distinguishing between regex and
# non regex entries
self.entries = OrderedDict()
@_chained
def add_regex_replica(
self,
site: str,
pattern: str,
pfn: Union[str, Path],
metadata: Optional[Dict[str, Union[int, str, float]]] = None,
):
r"""
add_regex_replica(self, site: str, pattern: str, pfn: Union[str, Path], metadata: Optional[Dict[str, Union[int, str, float]]] = None)
Add an entry to this replica catalog using a regular expression pattern.
Note that regular expressions should follow Java regular expression syntax
as the underlying code that handles this catalog is Java based.
.. code-block:: python
# Example 1: Match f<any-character>a i.e. faa, f.a, f0a, etc.
rc.add_regex_replica("local", "f.a", "/Volumes/data/input/f.a")
# Example 2: Using groupings
rc.add_regex_replica("local", "alpha\.(csv|txt|xml)", "/Volumes/data/input/[1]/[0]")
# If the file being looked up is alpha.csv, the pfn for the file will be
# generated as /Volumes/data/input/csv/alpha.csv
# Example 3: Specifying a default location for all lfns that don't match any
# regular expressions. Note that this should be the last entry into the replica
# catalog if used.
rc.add_regex_replica("local", ".*", Path("/Volumes/data") / "input/[0]")
:param site: the site at which this replica (file) resides
:type site: str
:param pattern: regular expression used to match a file
:type pattern: str
:param pfn: path to the file (may also be a pattern as shown in the example above)
:type pfn: Union[str, Path]
:param metadata: any metadata to be associated with the matched files, for example: :code:`{"creator": "pegasus"}`, defaults to None
:type metadata: Optional[Dict[str, Union[int, str, float]]]
:raises DuplicateError: Duplicate patterns with different PFNs are currently not supported
"""
metadata = metadata or dict()
# restricting pattern to single pfn (may be relaxed in future release)
if (pattern, True) in self.entries:
raise DuplicateError(
"Pattern: {} already exists in this replica catalog".format(pattern)
)
# handle Path obj if given for pfn
if isinstance(pfn, Path):
if not pfn.is_absolute():
raise ValueError(
"Invalid pfn: {}, the given pfn must be an absolute path".format(
pfn
)
)
pfn = str(pfn)
self.entries[(pattern, True)] = _ReplicaCatalogEntry(
lfn=pattern, pfns={_PFN(site, pfn)}, metadata=metadata, regex=True
)
@_chained
def add_replica(
self,
site: str,
lfn: Union[str, File],
pfn: Union[str, Path],
checksum: Optional[Dict[str, str]] = None,
metadata: Optional[Dict[str, Union[int, str, float]]] = None,
):
"""
add_replica(self, site: str, lfn: Union[str, File], pfn: Union[str, Path], checksum: Optional[Dict[str, str]] = None, metadata: Optiona[Dict[str, Union[int, str, float]]] = None)
Add an entry to this replica catalog.
.. code-block:: python
# Example 1
f = File("in.txt").add_metadata(creator="pegasus")
rc.add_replica("local", f, Path(".").resolve() / "in.txt")
# Example 2: Adding metadata and a checksum
rc.add_replica(
"local",
"in.txt",
"/home/ryan/wf/in.txt",
checksum={"sha256": "abc123"},
metadata={"creator": "pegasus"}
)
# Example 3: Adding multiple pfns for the same lfn (metadata and checksum will be
# updated for that lfn if given.
rc.add_replica("local", "in.txt", Path(".").resolve() / "in.txt")
rc.add_replica("condorpool", "in.txt", "/path/to/file/in.txt")
:param site: the site at which this replica (file) resides
:type site: str
:param lfn: logical file name
:type lfn: Union[str, File]
:param pfn: physical file name such as :code:`Path("f.txt").resolve()`, :code:`/home/ryan/file.txt`, or :code:`http://pegasus.isi.edu/file.txt`
:type pfn: Union[str, Path]
:param checksum: Dict containing checksums for this file. Currently only sha256 is given. This should be entered as :code:`{"sha256": <value>}`, defaults to None
:type checksum: Optional[Dict[str, str]], optional
:param metadata: metadata key value pairs associated with this lfn such as :code:`{"created": "Thu Jun 18 22:18:36 PDT 2020", "owner": "pegasus"}`, defaults to None
:type metadata: Optional[Dict[str, Union[int, str, float]]], optional
:raises ValueError: if pfn is given as a :code:`pathlib.Path`, it must be an absolute path
:raises ValueError: an unsupported checksum type was given
"""
# handle Path obj if given for pfn
if isinstance(pfn, Path):
if not pfn.is_absolute():
raise ValueError(
"Invalid pfn: {}, the given path must be an absolute path".format(
str(pfn)
)
)
pfn = str(pfn)
metadata = metadata or dict()
checksum = checksum or dict()
# File might contain metadata that should be included
if isinstance(lfn, File):
if lfn.metadata:
metadata.update(lfn.metadata)
lfn = lfn.lfn
# ensure supported checksum type given
if len(checksum) > 0:
for checksum_type in checksum:
if checksum_type.lower() not in ReplicaCatalog._SUPPORTED_CHECKSUMS:
raise ValueError(
"Invalid checksum: {}, supported checksum types are: {}".format(
checksum_type, ReplicaCatalog._SUPPORTED_CHECKSUMS
)
)
# if an entry with the given lfn already exists, update it
# else create and add a new one
if (lfn, False) in self.entries:
self.entries[(lfn, False)].pfns.add(_PFN(site, pfn))
self.entries[(lfn, False)].checksum.update(checksum)
self.entries[(lfn, False)].metadata.update(metadata)
else:
self.entries[(lfn, False)] = _ReplicaCatalogEntry(
lfn,
{_PFN(site, pfn)},
checksum=checksum,
metadata=metadata,
regex=False,
)
def __json__(self):
return OrderedDict(
[
("pegasus", PEGASUS_VERSION),
("replicas", [v for _, v in self.entries.items()]),
]
)
| from collections import OrderedDict
from pathlib import Path
from typing import Dict, Optional, Set, Union
from ._utils import _chained
from .errors import DuplicateError
from .mixins import MetadataMixin
from .writable import Writable, _filter_out_nones
PEGASUS_VERSION = "5.0"
__all__ = ["File", "ReplicaCatalog"]
class _PFN:
"""A physical file name comprising site and path"""
def __init__(self, site, pfn):
self.site = site
self.pfn = pfn
def __eq__(self, other):
if isinstance(other, _PFN):
return self.site == other.site and self.pfn == other.pfn
return False
def __hash__(self):
return hash((self.site, self.pfn))
def __repr__(self):
return "<_PFN site: {}, pfn: {}>".format(self.site, self.pfn)
def __json__(self):
return {"site": self.site, "pfn": self.pfn}
class File(MetadataMixin):
"""
A workflow File. This class is used to represent the inputs and outputs of a
:py:class:`~Pegasus.api.workflow.Job`.
.. code-block:: python
# Example
input_file = File("data.txt").add_metadata(creator="ryan")
"""
def __init__(self, lfn: str, size: Optional[int] = None):
"""
:param lfn: a unique logical filename
:type lfn: str
:param size: size in bytes, defaults to None
:type size: int
"""
if not isinstance(lfn, str):
raise TypeError(
"invalid lfn: {lfn}; lfn must be of type str".format(lfn=lfn)
)
self.metadata = dict()
self.lfn = lfn
self.size = size
if size:
self.metadata["size"] = size
def __str__(self):
return self.lfn
def __hash__(self):
return hash(self.lfn)
def __eq__(self, other):
if isinstance(other, File):
return self.lfn == other.lfn
return False
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.lfn)
def __json__(self):
return _filter_out_nones(
{
"lfn": self.lfn,
"metadata": self.metadata if len(self.metadata) > 0 else None,
"size": self.size,
}
)
class _ReplicaCatalogEntry:
def __init__(
self,
lfn: str,
pfns: Set[_PFN],
checksum: Optional[Dict[str, str]] = None,
metadata: Optional[Dict[str, Union[int, str, float]]] = None,
regex: bool = False,
):
self.lfn = lfn
self.pfns = pfns
self.checksum = checksum or dict()
self.metadata = metadata or dict()
self.regex = regex
def __json__(self):
return _filter_out_nones(
{
"lfn": self.lfn,
"pfns": [pfn for pfn in self.pfns],
"checksum": self.checksum if len(self.checksum) > 0 else None,
"metadata": self.metadata if len(self.metadata) > 0 else None,
"regex": self.regex if self.regex else None,
}
)
class ReplicaCatalog(Writable):
"""Maintains a mapping of logical filenames to physical filenames. Any input
files to the workflow are specified here so that Pegasus knows where to
obtain them.
.. code-block:: python
# Example
if1 = File("if")
if2 = File("if2")
rc = ReplicaCatalog()\\
.add_replica("local", if1, "/nfs/u2/ryan/data.csv")\\
.add_replica("local", "if2", "/nfs/u2/ryan/data2.csv")\\
.write()
"""
_DEFAULT_FILENAME = "replicas.yml"
_SUPPORTED_CHECKSUMS = {"sha256"}
def __init__(self):
# Using key = (<lfn or pattern>, <is_regex>) to preserve insertion
# order of entries while distinguishing between regex and
# non regex entries
self.entries = OrderedDict()
@_chained
def add_regex_replica(
self,
site: str,
pattern: str,
pfn: Union[str, Path],
metadata: Optional[Dict[str, Union[int, str, float]]] = None,
):
r"""
add_regex_replica(self, site: str, pattern: str, pfn: Union[str, Path], metadata: Optional[Dict[str, Union[int, str, float]]] = None)
Add an entry to this replica catalog using a regular expression pattern.
Note that regular expressions should follow Java regular expression syntax
as the underlying code that handles this catalog is Java based.
.. code-block:: python
# Example 1: Match f<any-character>a i.e. faa, f.a, f0a, etc.
rc.add_regex_replica("local", "f.a", "/Volumes/data/input/f.a")
# Example 2: Using groupings
rc.add_regex_replica("local", "alpha\.(csv|txt|xml)", "/Volumes/data/input/[1]/[0]")
# If the file being looked up is alpha.csv, the pfn for the file will be
# generated as /Volumes/data/input/csv/alpha.csv
# Example 3: Specifying a default location for all lfns that don't match any
# regular expressions. Note that this should be the last entry into the replica
# catalog if used.
rc.add_regex_replica("local", ".*", Path("/Volumes/data") / "input/[0]")
:param site: the site at which this replica (file) resides
:type site: str
:param pattern: regular expression used to match a file
:type pattern: str
:param pfn: path to the file (may also be a pattern as shown in the example above)
:type pfn: Union[str, Path]
:param metadata: any metadata to be associated with the matched files, for example: :code:`{"creator": "pegasus"}`, defaults to None
:type metadata: Optional[Dict[str, Union[int, str, float]]]
:raises DuplicateError: Duplicate patterns with different PFNs are currently not supported
"""
metadata = metadata or dict()
# restricting pattern to single pfn (may be relaxed in future release)
if (pattern, True) in self.entries:
raise DuplicateError(
"Pattern: {} already exists in this replica catalog".format(pattern)
)
# handle Path obj if given for pfn
if isinstance(pfn, Path):
if not pfn.is_absolute():
raise ValueError(
"Invalid pfn: {}, the given pfn must be an absolute path".format(
pfn
)
)
pfn = str(pfn)
self.entries[(pattern, True)] = _ReplicaCatalogEntry(
lfn=pattern, pfns={_PFN(site, pfn)}, metadata=metadata, regex=True
)
@_chained
def add_replica(
self,
site: str,
lfn: Union[str, File],
pfn: Union[str, Path],
checksum: Optional[Dict[str, str]] = None,
metadata: Optional[Dict[str, Union[int, str, float]]] = None,
):
"""
add_replica(self, site: str, lfn: Union[str, File], pfn: Union[str, Path], checksum: Optional[Dict[str, str]] = None, metadata: Optiona[Dict[str, Union[int, str, float]]] = None)
Add an entry to this replica catalog.
.. code-block:: python
# Example 1
f = File("in.txt").add_metadata(creator="pegasus")
rc.add_replica("local", f, Path(".").resolve() / "in.txt")
# Example 2: Adding metadata and a checksum
rc.add_replica(
"local",
"in.txt",
"/home/ryan/wf/in.txt",
checksum={"sha256": "abc123"},
metadata={"creator": "pegasus"}
)
# Example 3: Adding multiple pfns for the same lfn (metadata and checksum will be
# updated for that lfn if given.
rc.add_replica("local", "in.txt", Path(".").resolve() / "in.txt")
rc.add_replica("condorpool", "in.txt", "/path/to/file/in.txt")
:param site: the site at which this replica (file) resides
:type site: str
:param lfn: logical file name
:type lfn: Union[str, File]
:param pfn: physical file name such as :code:`Path("f.txt").resolve()`, :code:`/home/ryan/file.txt`, or :code:`http://pegasus.isi.edu/file.txt`
:type pfn: Union[str, Path]
:param checksum: Dict containing checksums for this file. Currently only sha256 is given. This should be entered as :code:`{"sha256": <value>}`, defaults to None
:type checksum: Optional[Dict[str, str]], optional
:param metadata: metadata key value pairs associated with this lfn such as :code:`{"created": "Thu Jun 18 22:18:36 PDT 2020", "owner": "pegasus"}`, defaults to None
:type metadata: Optional[Dict[str, Union[int, str, float]]], optional
:raises ValueError: if pfn is given as a :code:`pathlib.Path`, it must be an absolute path
:raises ValueError: an unsupported checksum type was given
"""
# handle Path obj if given for pfn
if isinstance(pfn, Path):
if not pfn.is_absolute():
raise ValueError(
"Invalid pfn: {}, the given path must be an absolute path".format(
str(pfn)
)
)
pfn = str(pfn)
metadata = metadata or dict()
checksum = checksum or dict()
# File might contain metadata that should be included
if isinstance(lfn, File):
if lfn.metadata:
metadata.update(lfn.metadata)
lfn = lfn.lfn
# ensure supported checksum type given
if len(checksum) > 0:
for checksum_type in checksum:
if checksum_type.lower() not in ReplicaCatalog._SUPPORTED_CHECKSUMS:
raise ValueError(
"Invalid checksum: {}, supported checksum types are: {}".format(
checksum_type, ReplicaCatalog._SUPPORTED_CHECKSUMS
)
)
# if an entry with the given lfn already exists, update it
# else create and add a new one
if (lfn, False) in self.entries:
self.entries[(lfn, False)].pfns.add(_PFN(site, pfn))
self.entries[(lfn, False)].checksum.update(checksum)
self.entries[(lfn, False)].metadata.update(metadata)
else:
self.entries[(lfn, False)] = _ReplicaCatalogEntry(
lfn,
{_PFN(site, pfn)},
checksum=checksum,
metadata=metadata,
regex=False,
)
def __json__(self):
return OrderedDict(
[
("pegasus", PEGASUS_VERSION),
("replicas", [v for _, v in self.entries.items()]),
]
) | pt | 0.116544 | 2.308634 | 2 |
importanize/groups.py | xiachufang/importanize | 0 | 13649 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import itertools
import operator
from collections import OrderedDict, defaultdict
from functools import reduce
import six
from .formatters import DEFAULT_FORMATTER, DEFAULT_LENGTH
from .utils import is_site_package, is_std_lib
@six.python_2_unicode_compatible
class BaseImportGroup(object):
def __init__(self, config=None, **kwargs):
self.config = config or {}
self.statements = kwargs.get("statements", [])
self.file_artifacts = kwargs.get("file_artifacts", {})
@property
def unique_statements(self):
return sorted(list(set(self.merged_statements)))
@property
def merged_statements(self):
"""
Merge statements with the same import stems
"""
leafless_counter = defaultdict(list)
counter = defaultdict(list)
for statement in self.statements:
if statement.leafs:
counter[statement.stem].append(statement)
else:
leafless_counter[statement.stem].append(statement)
merged_statements = list(itertools.chain(*leafless_counter.values()))
def merge(statements):
_special = []
_statements = []
for i in statements:
if i.leafs and i.leafs[0].name == "*":
_special.append(i)
else:
_statements.append(i)
_reduced = []
if _statements:
_reduced = [reduce(lambda a, b: a + b, _statements)]
return _special + _reduced
for statements in counter.values():
merged_statements.extend(merge(statements))
return merged_statements
def all_line_numbers(self):
return sorted(
list(
set(
list(
itertools.chain(
*map(
operator.attrgetter("line_numbers"),
self.statements,
)
)
)
)
)
)
def should_add_statement(self, statement):
raise NotImplementedError
def add_statement(self, statement):
if self.should_add_statement(statement):
self.statements.append(statement)
return True
return False
def as_string(self):
sep = self.file_artifacts.get("sep", "\n")
return sep.join(
map(operator.methodcaller("as_string"), self.unique_statements)
)
def formatted(self, formatter=DEFAULT_FORMATTER, length=DEFAULT_LENGTH):
sep = self.file_artifacts.get("sep", "\n")
return sep.join(
map(
operator.methodcaller(
"formatted", formatter=formatter, length=length
),
self.unique_statements,
)
)
def __str__(self):
return self.as_string()
class StdLibGroup(BaseImportGroup):
def should_add_statement(self, statement):
return is_std_lib(statement.root_module)
class SitePackagesGroup(BaseImportGroup):
def should_add_statement(self, statement):
return is_site_package(statement.root_module)
class PackagesGroup(BaseImportGroup):
def __init__(self, *args, **kwargs):
super(PackagesGroup, self).__init__(*args, **kwargs)
if "packages" not in self.config:
msg = (
'"package" config must be supplied ' "for packages import group"
)
raise ValueError(msg)
def should_add_statement(self, statement):
return statement.root_module in self.config.get("packages", [])
class LocalGroup(BaseImportGroup):
def should_add_statement(self, statement):
return statement.stem.startswith(".")
class RemainderGroup(BaseImportGroup):
def should_add_statement(self, statement):
return True
# -- RemainderGroup goes last and catches everything left over
GROUP_MAPPING = OrderedDict(
(
("stdlib", StdLibGroup),
("sitepackages", SitePackagesGroup),
("packages", PackagesGroup),
("local", LocalGroup),
("remainder", RemainderGroup),
)
)
def sort_groups(groups):
return sorted(
groups, key=lambda i: list(GROUP_MAPPING.values()).index(type(i))
)
@six.python_2_unicode_compatible
class ImportGroups(list):
def __init__(self, *args, **kwargs):
super(ImportGroups, self).__init__(*args)
self.file_artifacts = kwargs.get("file_artifacts", {})
def all_line_numbers(self):
return sorted(
list(
set(
list(
itertools.chain(
*map(
operator.methodcaller("all_line_numbers"), self
)
)
)
)
)
)
def add_group(self, config):
if "type" not in config:
msg = '"type" must be specified in ' "import group config"
raise ValueError(msg)
if config["type"] not in GROUP_MAPPING:
msg = '"{}" is not supported import group'.format(config["type"])
raise ValueError(msg)
self.append(GROUP_MAPPING[config["type"]](config))
def add_statement_to_group(self, statement):
groups_by_priority = sort_groups(self)
added = False
for group in groups_by_priority:
if group.add_statement(statement):
added = True
break
if not added:
msg = (
"Import statement was not added into "
"any of the import groups. "
"Perhaps you can consider adding "
'"remaining" import group which will '
"catch all remaining import statements."
)
raise ValueError(msg)
def as_string(self):
sep = self.file_artifacts.get("sep", "\n") * 2
return sep.join(
filter(None, map(operator.methodcaller("as_string"), self))
)
def formatted(self, formatter=DEFAULT_FORMATTER, length=DEFAULT_LENGTH):
sep = self.file_artifacts.get("sep", "\n") * 2
return sep.join(
filter(
None,
map(
operator.methodcaller(
"formatted", formatter=formatter, length=length
),
self,
),
)
)
def __str__(self):
return self.as_string()
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import itertools
import operator
from collections import OrderedDict, defaultdict
from functools import reduce
import six
from .formatters import DEFAULT_FORMATTER, DEFAULT_LENGTH
from .utils import is_site_package, is_std_lib
@six.python_2_unicode_compatible
class BaseImportGroup(object):
def __init__(self, config=None, **kwargs):
self.config = config or {}
self.statements = kwargs.get("statements", [])
self.file_artifacts = kwargs.get("file_artifacts", {})
@property
def unique_statements(self):
return sorted(list(set(self.merged_statements)))
@property
def merged_statements(self):
"""
Merge statements with the same import stems
"""
leafless_counter = defaultdict(list)
counter = defaultdict(list)
for statement in self.statements:
if statement.leafs:
counter[statement.stem].append(statement)
else:
leafless_counter[statement.stem].append(statement)
merged_statements = list(itertools.chain(*leafless_counter.values()))
def merge(statements):
_special = []
_statements = []
for i in statements:
if i.leafs and i.leafs[0].name == "*":
_special.append(i)
else:
_statements.append(i)
_reduced = []
if _statements:
_reduced = [reduce(lambda a, b: a + b, _statements)]
return _special + _reduced
for statements in counter.values():
merged_statements.extend(merge(statements))
return merged_statements
def all_line_numbers(self):
return sorted(
list(
set(
list(
itertools.chain(
*map(
operator.attrgetter("line_numbers"),
self.statements,
)
)
)
)
)
)
def should_add_statement(self, statement):
raise NotImplementedError
def add_statement(self, statement):
if self.should_add_statement(statement):
self.statements.append(statement)
return True
return False
def as_string(self):
sep = self.file_artifacts.get("sep", "\n")
return sep.join(
map(operator.methodcaller("as_string"), self.unique_statements)
)
def formatted(self, formatter=DEFAULT_FORMATTER, length=DEFAULT_LENGTH):
sep = self.file_artifacts.get("sep", "\n")
return sep.join(
map(
operator.methodcaller(
"formatted", formatter=formatter, length=length
),
self.unique_statements,
)
)
def __str__(self):
return self.as_string()
class StdLibGroup(BaseImportGroup):
def should_add_statement(self, statement):
return is_std_lib(statement.root_module)
class SitePackagesGroup(BaseImportGroup):
def should_add_statement(self, statement):
return is_site_package(statement.root_module)
class PackagesGroup(BaseImportGroup):
def __init__(self, *args, **kwargs):
super(PackagesGroup, self).__init__(*args, **kwargs)
if "packages" not in self.config:
msg = (
'"package" config must be supplied ' "for packages import group"
)
raise ValueError(msg)
def should_add_statement(self, statement):
return statement.root_module in self.config.get("packages", [])
class LocalGroup(BaseImportGroup):
def should_add_statement(self, statement):
return statement.stem.startswith(".")
class RemainderGroup(BaseImportGroup):
def should_add_statement(self, statement):
return True
# -- RemainderGroup goes last and catches everything left over
GROUP_MAPPING = OrderedDict(
(
("stdlib", StdLibGroup),
("sitepackages", SitePackagesGroup),
("packages", PackagesGroup),
("local", LocalGroup),
("remainder", RemainderGroup),
)
)
def sort_groups(groups):
return sorted(
groups, key=lambda i: list(GROUP_MAPPING.values()).index(type(i))
)
@six.python_2_unicode_compatible
class ImportGroups(list):
def __init__(self, *args, **kwargs):
super(ImportGroups, self).__init__(*args)
self.file_artifacts = kwargs.get("file_artifacts", {})
def all_line_numbers(self):
return sorted(
list(
set(
list(
itertools.chain(
*map(
operator.methodcaller("all_line_numbers"), self
)
)
)
)
)
)
def add_group(self, config):
if "type" not in config:
msg = '"type" must be specified in ' "import group config"
raise ValueError(msg)
if config["type"] not in GROUP_MAPPING:
msg = '"{}" is not supported import group'.format(config["type"])
raise ValueError(msg)
self.append(GROUP_MAPPING[config["type"]](config))
def add_statement_to_group(self, statement):
groups_by_priority = sort_groups(self)
added = False
for group in groups_by_priority:
if group.add_statement(statement):
added = True
break
if not added:
msg = (
"Import statement was not added into "
"any of the import groups. "
"Perhaps you can consider adding "
'"remaining" import group which will '
"catch all remaining import statements."
)
raise ValueError(msg)
def as_string(self):
sep = self.file_artifacts.get("sep", "\n") * 2
return sep.join(
filter(None, map(operator.methodcaller("as_string"), self))
)
def formatted(self, formatter=DEFAULT_FORMATTER, length=DEFAULT_LENGTH):
sep = self.file_artifacts.get("sep", "\n") * 2
return sep.join(
filter(
None,
map(
operator.methodcaller(
"formatted", formatter=formatter, length=length
),
self,
),
)
)
def __str__(self):
return self.as_string()
| it | 0.230769 | 2.218511 | 2 |
NLP4CCB/migrations/0005_auto_20170415_2236.py | rossmechanic/know_your_nyms | 1 | 13650 | <filename>NLP4CCB/migrations/0005_auto_20170415_2236.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-04-15 22:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("NLP4CCB", "0004_auto_20170330_0255")]
operations = [
migrations.RenameField(
model_name="userstat", old_name="index", new_name="meronyms_index"
),
migrations.AddField(
model_name="userstat",
name="antonyms_index",
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name="userstat",
name="hyponyms_index",
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name="userstat",
name="synonyms_index",
field=models.IntegerField(default=0),
),
]
| <filename>NLP4CCB/migrations/0005_auto_20170415_2236.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-04-15 22:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("NLP4CCB", "0004_auto_20170330_0255")]
operations = [
migrations.RenameField(
model_name="userstat", old_name="index", new_name="meronyms_index"
),
migrations.AddField(
model_name="userstat",
name="antonyms_index",
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name="userstat",
name="hyponyms_index",
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name="userstat",
name="synonyms_index",
field=models.IntegerField(default=0),
),
]
| fr | 0.160113 | 1.643188 | 2 |
synchCams/start_server.py | ateshkoul/synchCams | 0 | 13651 | <reponame>ateshkoul/synchCams<gh_stars>0
import socket
import json
import pdb
import copy
def dict_to_bytes(the_dict):
string = json.dumps(the_dict).encode('utf-8')
return(string)
def bytes_to_dict(string):
the_dict = json.loads(string.decode('utf-8'))
return(the_dict)
class server_con():
def __init__(self,host='',port=30):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.bind((host, port))
def server_read(self):
# pdb.set_trace()
print("Waiting to read ...")
self.s.listen(1)
self.conn, self.addr = self.s.accept()
print('Connected by', self.addr)
return_data = {}
try:
in_data = self.conn.recv(1024)
# pdb.set_trace()
if in_data: return_data = copy.deepcopy(in_data)
# if not in_data: break
print("Client Says: "+return_data.decode("utf-8"))
# self.conn.sendall(b"Server Says:hi")
except socket.error:
print("Error Occured.")
# self.conn.close()
return(bytes_to_dict(return_data))
def server_write(self,data,host="172.16.58.3",port=30):
# pdb.set_trace()
print('Writing values ...')
self.conn.sendall(dict_to_bytes(data))
# def server_read(host='',port=30):
# # host = '' # Symbolic name meaning all available interfaces
# # port = 30 # Arbitrary non-privileged port
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# s.bind((host, port))
# print(host , port)
# s.listen(1)
# conn, addr = s.accept()
# print('Connected by', addr)
# return_data = {}
# while True:
# try:
# in_data = conn.recv(1024)
# # pdb.set_trace()
# if in_data: return_data = copy.deepcopy(in_data)
# if not in_data: break
# print("Client Says: "+return_data.decode("utf-8"))
# conn.sendall(b"Server Says:hi")
# except socket.error:
# print("Error Occured.")
# break
# conn.close()
# return(bytes_to_dict(return_data))
# # return(return_data) | import socket
import json
import pdb
import copy
def dict_to_bytes(the_dict):
string = json.dumps(the_dict).encode('utf-8')
return(string)
def bytes_to_dict(string):
the_dict = json.loads(string.decode('utf-8'))
return(the_dict)
class server_con():
def __init__(self,host='',port=30):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.bind((host, port))
def server_read(self):
# pdb.set_trace()
print("Waiting to read ...")
self.s.listen(1)
self.conn, self.addr = self.s.accept()
print('Connected by', self.addr)
return_data = {}
try:
in_data = self.conn.recv(1024)
# pdb.set_trace()
if in_data: return_data = copy.deepcopy(in_data)
# if not in_data: break
print("Client Says: "+return_data.decode("utf-8"))
# self.conn.sendall(b"Server Says:hi")
except socket.error:
print("Error Occured.")
# self.conn.close()
return(bytes_to_dict(return_data))
def server_write(self,data,host="172.16.58.3",port=30):
# pdb.set_trace()
print('Writing values ...')
self.conn.sendall(dict_to_bytes(data))
# def server_read(host='',port=30):
# # host = '' # Symbolic name meaning all available interfaces
# # port = 30 # Arbitrary non-privileged port
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# s.bind((host, port))
# print(host , port)
# s.listen(1)
# conn, addr = s.accept()
# print('Connected by', addr)
# return_data = {}
# while True:
# try:
# in_data = conn.recv(1024)
# # pdb.set_trace()
# if in_data: return_data = copy.deepcopy(in_data)
# if not in_data: break
# print("Client Says: "+return_data.decode("utf-8"))
# conn.sendall(b"Server Says:hi")
# except socket.error:
# print("Error Occured.")
# break
# conn.close()
# return(bytes_to_dict(return_data))
# # return(return_data) | en | 0.127242 | 2.779059 | 3 |
9.part2.py | elp2/advent_of_code_2020 | 1 | 13652 | <gh_stars>1-10
from collections import defaultdict
def return_default():
return 0
def dd():
return defaultdict(return_default)
CHALLENGE_DAY = "9"
REAL = open(CHALLENGE_DAY + ".txt").read()
assert len(REAL) > 1
SAMPLE = open(CHALLENGE_DAY + ".sample.txt").read()
SAMPLE_EXPECTED = 127
# SAMPLE_EXPECTED =
def parse_lines(raw):
# Groups.
# split = raw.split("\n\n")
# return list(map(lambda group: group.split("\n"), split))
split = raw.split("\n")
# return split # raw
# return list(map(lambda l: l.split(" "), split)) # words.
return list(map(int, split))
# return list(map(lambda l: l.strip(), split)) # beware leading / trailing WS
def lastnums(nums, last, sumsto):
f = last - 25
to = last
print("considering ", f, to)
for j in range(f, to):
for k in range(f, to):
if j == k:
continue
if nums[j] + nums[k] == sumsto:
return True
return False
def pream(nums, last):
at = last
for i in range(last, len(nums)):
print(i)
if not lastnums(nums, i, nums[i]):
return nums[i]
else:
print("Not", nums[i])
def solve(raw):
parsed = parse_lines(raw)
# Debug here to make sure parsing is good.
TARGET=1639024365
for i in range(len(parsed)):
for j in range(i, (len(parsed))):
arr = parsed[i:j]
here = sum(arr)
if here == TARGET:
return min(arr) + max(arr)
return ret
def test_parsing(lines):
if isinstance(lines, list):
for i in range(min(5, len(lines))):
print(lines[i])
elif isinstance(lines, dict) or isinstance(lines, defaultdict):
nd = {}
for k in list(lines.keys())[0: 5]:
print("\"" + k + "\": " + str(lines[k]))
test_parsing(parse_lines(SAMPLE))
print("^^^^^^^^^PARSED SAMPLE SAMPLE^^^^^^^^^")
# sample = solve(SAMPLE)
# if SAMPLE_EXPECTED is None:
# print("*** SKIPPING SAMPLE! ***")
# else:
# assert sample == SAMPLE_EXPECTED
# print("*** SAMPLE PASSED ***")
solved = solve(REAL)
print("SOLUTION: ", solved)
# assert solved
| from collections import defaultdict
def return_default():
return 0
def dd():
return defaultdict(return_default)
CHALLENGE_DAY = "9"
REAL = open(CHALLENGE_DAY + ".txt").read()
assert len(REAL) > 1
SAMPLE = open(CHALLENGE_DAY + ".sample.txt").read()
SAMPLE_EXPECTED = 127
# SAMPLE_EXPECTED =
def parse_lines(raw):
# Groups.
# split = raw.split("\n\n")
# return list(map(lambda group: group.split("\n"), split))
split = raw.split("\n")
# return split # raw
# return list(map(lambda l: l.split(" "), split)) # words.
return list(map(int, split))
# return list(map(lambda l: l.strip(), split)) # beware leading / trailing WS
def lastnums(nums, last, sumsto):
f = last - 25
to = last
print("considering ", f, to)
for j in range(f, to):
for k in range(f, to):
if j == k:
continue
if nums[j] + nums[k] == sumsto:
return True
return False
def pream(nums, last):
at = last
for i in range(last, len(nums)):
print(i)
if not lastnums(nums, i, nums[i]):
return nums[i]
else:
print("Not", nums[i])
def solve(raw):
parsed = parse_lines(raw)
# Debug here to make sure parsing is good.
TARGET=1639024365
for i in range(len(parsed)):
for j in range(i, (len(parsed))):
arr = parsed[i:j]
here = sum(arr)
if here == TARGET:
return min(arr) + max(arr)
return ret
def test_parsing(lines):
if isinstance(lines, list):
for i in range(min(5, len(lines))):
print(lines[i])
elif isinstance(lines, dict) or isinstance(lines, defaultdict):
nd = {}
for k in list(lines.keys())[0: 5]:
print("\"" + k + "\": " + str(lines[k]))
test_parsing(parse_lines(SAMPLE))
print("^^^^^^^^^PARSED SAMPLE SAMPLE^^^^^^^^^")
# sample = solve(SAMPLE)
# if SAMPLE_EXPECTED is None:
# print("*** SKIPPING SAMPLE! ***")
# else:
# assert sample == SAMPLE_EXPECTED
# print("*** SAMPLE PASSED ***")
solved = solve(REAL)
print("SOLUTION: ", solved)
# assert solved | en | 0.350445 | 3.464303 | 3 |
exif_address_finder/ExifAddressFinderManager.py | jonathanlurie/ExifAddressFinder | 0 | 13653 | <reponame>jonathanlurie/ExifAddressFinder
#!/usr/bin/env python
'''
Author : <NAME>
Email : <EMAIL>
Version : 0.1
Licence : MIT
description : The entry point to the library.
'''
import GeoToolbox
import exifread
import piexif
from IFD_KEYS_REFERENCE import *
import exifWriter
import os
class ExifAddressFinderManager:
_geotoolbox = None
def __init__(self):
self._geotoolbox = GeoToolbox.GeoToolbox()
# return a dictionnary {"lat": yy.yyy, "lon": xx.xxx}
# or None if not found
def _getGpsCoordinates(self, fileAddress):
f = open(fileAddress, 'rb')
# Return Exif tags
tags = exifread.process_file(f)
# add positionning
if('EXIF GPS GPSLatitude' in tags.keys() and 'EXIF GPS GPSLongitude' in tags.keys()):
# dealing with latitutes
latValues = tags["EXIF GPS GPSLatitude"].values
latRef = tags["EXIF GPS GPSLatitudeRef"]
latInt = float(latValues[0].num)
latDec = float(latValues[1].num) / float(latValues[1].den) / 60. + float(latValues[2].num) / float(latValues[2].den) / 3600.
lat = latInt + latDec
if(latRef.values != 'N'):
lat = lat * (-1)
# dealing with longitudes
lonValues = tags["EXIF GPS GPSLongitude"].values
lonRef = tags["EXIF GPS GPSLongitudeRef"]
lonInt = float(lonValues[0].num)
lonDec = float(lonValues[1].num) / float(lonValues[1].den) / 60. + float(lonValues[2].num) / float(lonValues[2].den) / 3600.
lon = lonInt + lonDec
if(lonRef.values != 'E'):
lon = lon * (-1)
return {"lat": lat, "lon": lon}
else:
return None
# return the address if found
# returns None if not retrieve
def _retrieveAddress(self, latitude, longitude):
address = self._geotoolbox.getAddress(latitude=latitude, longitude=longitude)
# if the address was well retrieve
if(address["status"]):
return address["address"]
else:
return None
# update the EXIF Decription field with the real postal address
def _updateDescription(self, fileAddress, locationAddress, addToFormer=False):
# reading exif
exifDict = piexif.load(fileAddress)
newDict = exifWriter.writeField(exifDict, DESCRIPTION_FIELD, locationAddress, addToFormer)
exifWriter.writeExifToFile(newDict, fileAddress)
def addAddressToImage(self, fileAddress, prefix="", suffix="", addToFormer=False):
coordinates = self._getGpsCoordinates(fileAddress)
if(not coordinates):
print("\tERROR: "+ os.path.basename(fileAddress) +" is not geo tagged")
return None
postalAddress = self._retrieveAddress(coordinates["lat"], coordinates["lon"])
if(not postalAddress):
print("\tERROR: The address was impossible to retrieve")
return None
self._updateDescription(fileAddress, prefix + postalAddress + suffix, addToFormer)
return 1
| #!/usr/bin/env python
'''
Author : <NAME>
Email : <EMAIL>
Version : 0.1
Licence : MIT
description : The entry point to the library.
'''
import GeoToolbox
import exifread
import piexif
from IFD_KEYS_REFERENCE import *
import exifWriter
import os
class ExifAddressFinderManager:
_geotoolbox = None
def __init__(self):
self._geotoolbox = GeoToolbox.GeoToolbox()
# return a dictionnary {"lat": yy.yyy, "lon": xx.xxx}
# or None if not found
def _getGpsCoordinates(self, fileAddress):
f = open(fileAddress, 'rb')
# Return Exif tags
tags = exifread.process_file(f)
# add positionning
if('EXIF GPS GPSLatitude' in tags.keys() and 'EXIF GPS GPSLongitude' in tags.keys()):
# dealing with latitutes
latValues = tags["EXIF GPS GPSLatitude"].values
latRef = tags["EXIF GPS GPSLatitudeRef"]
latInt = float(latValues[0].num)
latDec = float(latValues[1].num) / float(latValues[1].den) / 60. + float(latValues[2].num) / float(latValues[2].den) / 3600.
lat = latInt + latDec
if(latRef.values != 'N'):
lat = lat * (-1)
# dealing with longitudes
lonValues = tags["EXIF GPS GPSLongitude"].values
lonRef = tags["EXIF GPS GPSLongitudeRef"]
lonInt = float(lonValues[0].num)
lonDec = float(lonValues[1].num) / float(lonValues[1].den) / 60. + float(lonValues[2].num) / float(lonValues[2].den) / 3600.
lon = lonInt + lonDec
if(lonRef.values != 'E'):
lon = lon * (-1)
return {"lat": lat, "lon": lon}
else:
return None
# return the address if found
# returns None if not retrieve
def _retrieveAddress(self, latitude, longitude):
address = self._geotoolbox.getAddress(latitude=latitude, longitude=longitude)
# if the address was well retrieve
if(address["status"]):
return address["address"]
else:
return None
# update the EXIF Decription field with the real postal address
def _updateDescription(self, fileAddress, locationAddress, addToFormer=False):
# reading exif
exifDict = piexif.load(fileAddress)
newDict = exifWriter.writeField(exifDict, DESCRIPTION_FIELD, locationAddress, addToFormer)
exifWriter.writeExifToFile(newDict, fileAddress)
def addAddressToImage(self, fileAddress, prefix="", suffix="", addToFormer=False):
coordinates = self._getGpsCoordinates(fileAddress)
if(not coordinates):
print("\tERROR: "+ os.path.basename(fileAddress) +" is not geo tagged")
return None
postalAddress = self._retrieveAddress(coordinates["lat"], coordinates["lon"])
if(not postalAddress):
print("\tERROR: The address was impossible to retrieve")
return None
self._updateDescription(fileAddress, prefix + postalAddress + suffix, addToFormer)
return 1 | pt | 0.111562 | 2.37779 | 2 |
src/anaplan_api/Model.py | jeswils-ap/anaplan-api | 2 | 13654 | <filename>src/anaplan_api/Model.py
import json
import logging
import requests
from typing import List
from requests.exceptions import HTTPError, ConnectionError, SSLError, Timeout, ConnectTimeout, ReadTimeout
from .User import User
from .ModelDetails import ModelDetails
logger = logging.getLogger(__name__)
class Model(User):
def get_models(self) -> List[ModelDetails]:
model_details_list = [ModelDetails]
model_list = {}
url = ''.join([super().get_url(), super().get_id(), "/models"])
authorization = super().get_conn().get_auth().get_auth_token()
get_header = {
"Authorization": authorization,
"Content-Type": "application/json"
}
logger.info(f"Fetching models for {super().get_id()}")
try:
model_list = json.loads(requests.get(url, headers=get_header, timeout=(5, 30)).text)
except (HTTPError, ConnectionError, SSLError, Timeout, ConnectTimeout, ReadTimeout) as e:
logger.error(f"Error getting models list: {e}", exc_info=True)
raise Exception(f"Error getting model list {e}")
except ValueError as e:
logger.error(f"Error loading model list {e}", exc_info=True)
raise Exception(f"Error loading model list {e}")
if 'models' in model_list:
models = model_list['models']
logger.info("Finished fetching models.")
for item in models:
model_details_list.append(ModelDetails(item))
return model_details_list
else:
raise AttributeError("Models not found in response.")
| <filename>src/anaplan_api/Model.py
import json
import logging
import requests
from typing import List
from requests.exceptions import HTTPError, ConnectionError, SSLError, Timeout, ConnectTimeout, ReadTimeout
from .User import User
from .ModelDetails import ModelDetails
logger = logging.getLogger(__name__)
class Model(User):
def get_models(self) -> List[ModelDetails]:
model_details_list = [ModelDetails]
model_list = {}
url = ''.join([super().get_url(), super().get_id(), "/models"])
authorization = super().get_conn().get_auth().get_auth_token()
get_header = {
"Authorization": authorization,
"Content-Type": "application/json"
}
logger.info(f"Fetching models for {super().get_id()}")
try:
model_list = json.loads(requests.get(url, headers=get_header, timeout=(5, 30)).text)
except (HTTPError, ConnectionError, SSLError, Timeout, ConnectTimeout, ReadTimeout) as e:
logger.error(f"Error getting models list: {e}", exc_info=True)
raise Exception(f"Error getting model list {e}")
except ValueError as e:
logger.error(f"Error loading model list {e}", exc_info=True)
raise Exception(f"Error loading model list {e}")
if 'models' in model_list:
models = model_list['models']
logger.info("Finished fetching models.")
for item in models:
model_details_list.append(ModelDetails(item))
return model_details_list
else:
raise AttributeError("Models not found in response.")
| none | 1 | 2.491296 | 2 |
reproduction/Summarization/BertSum/model.py | KuNyaa/fastNLP | 1 | 13655 | <filename>reproduction/Summarization/BertSum/model.py
import torch
from torch import nn
from torch.nn import init
from fastNLP.modules.encoder.bert import BertModel
class Classifier(nn.Module):
def __init__(self, hidden_size):
super(Classifier, self).__init__()
self.linear = nn.Linear(hidden_size, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, inputs, mask_cls):
h = self.linear(inputs).squeeze(-1) # [batch_size, seq_len]
sent_scores = self.sigmoid(h) * mask_cls.float()
return sent_scores
class BertSum(nn.Module):
def __init__(self, hidden_size=768):
super(BertSum, self).__init__()
self.hidden_size = hidden_size
self.encoder = BertModel.from_pretrained('/path/to/uncased_L-12_H-768_A-12')
self.decoder = Classifier(self.hidden_size)
def forward(self, article, segment_id, cls_id):
# print(article.device)
# print(segment_id.device)
# print(cls_id.device)
input_mask = 1 - (article == 0)
mask_cls = 1 - (cls_id == -1)
assert input_mask.size() == article.size()
assert mask_cls.size() == cls_id.size()
bert_out = self.encoder(article, token_type_ids=segment_id, attention_mask=input_mask)
bert_out = bert_out[0][-1] # last layer
sent_emb = bert_out[torch.arange(bert_out.size(0)).unsqueeze(1), cls_id]
sent_emb = sent_emb * mask_cls.unsqueeze(-1).float()
assert sent_emb.size() == (article.size(0), cls_id.size(1), self.hidden_size) # [batch_size, seq_len, hidden_size]
sent_scores = self.decoder(sent_emb, mask_cls) # [batch_size, seq_len]
assert sent_scores.size() == (article.size(0), cls_id.size(1))
return {'pred': sent_scores, 'mask': mask_cls}
| <filename>reproduction/Summarization/BertSum/model.py
import torch
from torch import nn
from torch.nn import init
from fastNLP.modules.encoder.bert import BertModel
class Classifier(nn.Module):
def __init__(self, hidden_size):
super(Classifier, self).__init__()
self.linear = nn.Linear(hidden_size, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, inputs, mask_cls):
h = self.linear(inputs).squeeze(-1) # [batch_size, seq_len]
sent_scores = self.sigmoid(h) * mask_cls.float()
return sent_scores
class BertSum(nn.Module):
def __init__(self, hidden_size=768):
super(BertSum, self).__init__()
self.hidden_size = hidden_size
self.encoder = BertModel.from_pretrained('/path/to/uncased_L-12_H-768_A-12')
self.decoder = Classifier(self.hidden_size)
def forward(self, article, segment_id, cls_id):
# print(article.device)
# print(segment_id.device)
# print(cls_id.device)
input_mask = 1 - (article == 0)
mask_cls = 1 - (cls_id == -1)
assert input_mask.size() == article.size()
assert mask_cls.size() == cls_id.size()
bert_out = self.encoder(article, token_type_ids=segment_id, attention_mask=input_mask)
bert_out = bert_out[0][-1] # last layer
sent_emb = bert_out[torch.arange(bert_out.size(0)).unsqueeze(1), cls_id]
sent_emb = sent_emb * mask_cls.unsqueeze(-1).float()
assert sent_emb.size() == (article.size(0), cls_id.size(1), self.hidden_size) # [batch_size, seq_len, hidden_size]
sent_scores = self.decoder(sent_emb, mask_cls) # [batch_size, seq_len]
assert sent_scores.size() == (article.size(0), cls_id.size(1))
return {'pred': sent_scores, 'mask': mask_cls}
| it | 0.134949 | 2.422503 | 2 |
p4p2p/dht/constants.py | ntoll/p4p2p | 8 | 13656 | <filename>p4p2p/dht/constants.py
# -*- coding: utf-8 -*-
"""
Defines constants used by P4P2P. Usually these are based upon concepts from
the Kademlia DHT and where possible naming is derived from the original
Kademlia paper as are the suggested default values.
"""
#: Represents the degree of parallelism in network calls.
ALPHA = 3
#: The maximum number of contacts stored in a bucket. Must be an even number.
K = 20
#: The default maximum time a NodeLookup is allowed to take (in seconds).
LOOKUP_TIMEOUT = 600
#: The timeout for network connections (in seconds).
RPC_TIMEOUT = 5
#: The timeout for receiving complete message once a connection is made (in
#: seconds). Ensures there are no stale deferreds in the node's _pending
#: dictionary.
RESPONSE_TIMEOUT = 1800 # half an hour
#: How long to wait before an unused bucket is refreshed (in seconds).
REFRESH_TIMEOUT = 3600 # 1 hour
#: How long to wait before a node replicates any data it stores (in seconds).
REPLICATE_INTERVAL = REFRESH_TIMEOUT
#: How long to wait before a node checks whether any buckets need refreshing or
#: data needs republishing (in seconds).
REFRESH_INTERVAL = int(REFRESH_TIMEOUT / 6) # Every 10 minutes.
#: The number of failed remote procedure calls allowed for a peer node. If this
#: is equalled or exceeded then the contact is removed from the routing table.
ALLOWED_RPC_FAILS = 5
#: The number of nodes to attempt to use to store a value in the network.
DUPLICATION_COUNT = K
#: The duration (in seconds) that is added to a value's creation time in order
#: to work out its expiry timestamp. -1 denotes no expiry point.
EXPIRY_DURATION = -1
#: Defines the errors that can be reported between nodes in the network.
ERRORS = {
# The message simply didn't make any sense.
1: 'Bad message',
# The message was parsed but not recognised.
2: 'Unknown message type',
# The message was parsed and recognised but the node encountered a problem
# when dealing with it.
3: 'Internal error',
# The message was too big for the node to handle.
4: 'Message too big',
# Unsupported version of the protocol.
5: 'Unsupported protocol',
# The message could not be cryptographically verified.
6: 'Unverifiable provenance'
}
| <filename>p4p2p/dht/constants.py
# -*- coding: utf-8 -*-
"""
Defines constants used by P4P2P. Usually these are based upon concepts from
the Kademlia DHT and where possible naming is derived from the original
Kademlia paper as are the suggested default values.
"""
#: Represents the degree of parallelism in network calls.
ALPHA = 3
#: The maximum number of contacts stored in a bucket. Must be an even number.
K = 20
#: The default maximum time a NodeLookup is allowed to take (in seconds).
LOOKUP_TIMEOUT = 600
#: The timeout for network connections (in seconds).
RPC_TIMEOUT = 5
#: The timeout for receiving complete message once a connection is made (in
#: seconds). Ensures there are no stale deferreds in the node's _pending
#: dictionary.
RESPONSE_TIMEOUT = 1800 # half an hour
#: How long to wait before an unused bucket is refreshed (in seconds).
REFRESH_TIMEOUT = 3600 # 1 hour
#: How long to wait before a node replicates any data it stores (in seconds).
REPLICATE_INTERVAL = REFRESH_TIMEOUT
#: How long to wait before a node checks whether any buckets need refreshing or
#: data needs republishing (in seconds).
REFRESH_INTERVAL = int(REFRESH_TIMEOUT / 6) # Every 10 minutes.
#: The number of failed remote procedure calls allowed for a peer node. If this
#: is equalled or exceeded then the contact is removed from the routing table.
ALLOWED_RPC_FAILS = 5
#: The number of nodes to attempt to use to store a value in the network.
DUPLICATION_COUNT = K
#: The duration (in seconds) that is added to a value's creation time in order
#: to work out its expiry timestamp. -1 denotes no expiry point.
EXPIRY_DURATION = -1
#: Defines the errors that can be reported between nodes in the network.
ERRORS = {
# The message simply didn't make any sense.
1: 'Bad message',
# The message was parsed but not recognised.
2: 'Unknown message type',
# The message was parsed and recognised but the node encountered a problem
# when dealing with it.
3: 'Internal error',
# The message was too big for the node to handle.
4: 'Message too big',
# Unsupported version of the protocol.
5: 'Unsupported protocol',
# The message could not be cryptographically verified.
6: 'Unverifiable provenance'
}
| pt | 0.193493 | 2.015959 | 2 |
turtle-crossing/car_manager.py | twbm/Git-Learning-Thingy | 1 | 13657 | <reponame>twbm/Git-Learning-Thingy
from turtle import Turtle
import random
COLORS = ["red", "orange", "yellow", "green", "blue", "purple"]
STARTING_MOVE_DISTANCE = 5
MOVE_INCREMENT = 10
class CarManager(Turtle):
def __del__(self):
print(f"Deleted: {self}")
def __init__(self):
super().__init__('square')
self.speed('fast')
self.hideturtle()
self.setheading(180)
self.shapesize(1, 2.5)
self.color(random.choice(COLORS))
self.penup()
self.goto(320, random.randint(-250, 250))
self.showturtle()
def move(self):
self.forward(10)
| from turtle import Turtle
import random
COLORS = ["red", "orange", "yellow", "green", "blue", "purple"]
STARTING_MOVE_DISTANCE = 5
MOVE_INCREMENT = 10
class CarManager(Turtle):
def __del__(self):
print(f"Deleted: {self}")
def __init__(self):
super().__init__('square')
self.speed('fast')
self.hideturtle()
self.setheading(180)
self.shapesize(1, 2.5)
self.color(random.choice(COLORS))
self.penup()
self.goto(320, random.randint(-250, 250))
self.showturtle()
def move(self):
self.forward(10) | none | 1 | 3.62974 | 4 |
manabi/apps/flashcards/permissions.py | aehlke/manabi | 14 | 13658 | <reponame>aehlke/manabi
from django.shortcuts import get_object_or_404
from rest_framework import permissions
from manabi.apps.flashcards.models import Deck
WRITE_ACTIONS = ['create', 'update', 'partial_update', 'delete']
class DeckSynchronizationPermission(permissions.BasePermission):
message = "You don't have permission to add this deck to your library."
def has_permission(self, request, view):
if view.action in WRITE_ACTIONS:
upstream_deck = get_object_or_404(
Deck, pk=request.data['synchronized_with'])
return upstream_deck.shared
return True
class IsOwnerPermission(permissions.BasePermission):
message = "You don't own this."
def has_object_permission(self, request, view, obj):
if view.action in WRITE_ACTIONS:
return (
request.user.is_authenticated and
obj.owner.pk == request.user.pk
)
return True
| from django.shortcuts import get_object_or_404
from rest_framework import permissions
from manabi.apps.flashcards.models import Deck
WRITE_ACTIONS = ['create', 'update', 'partial_update', 'delete']
class DeckSynchronizationPermission(permissions.BasePermission):
message = "You don't have permission to add this deck to your library."
def has_permission(self, request, view):
if view.action in WRITE_ACTIONS:
upstream_deck = get_object_or_404(
Deck, pk=request.data['synchronized_with'])
return upstream_deck.shared
return True
class IsOwnerPermission(permissions.BasePermission):
message = "You don't own this."
def has_object_permission(self, request, view, obj):
if view.action in WRITE_ACTIONS:
return (
request.user.is_authenticated and
obj.owner.pk == request.user.pk
)
return True | none | 1 | 2.116661 | 2 |
Scripts/create_phone_number.py | yogeshwaran01/Mini-Projects | 4 | 13659 | """
Function convert lists of 10 elements
into in the format of phone number
Example,
(123) 456-789
"""
def create_phone_number(n: list) -> str:
"""
>>> create_phone_number([1,2,3,4,5,6,7,8,9,0])
'(123) 456-7890'
"""
return "({}{}{}) {}{}{}-{}{}{}{}".format(*n)
if __name__ == "__main__":
import doctest
doctest.testmod()
| """
Function convert lists of 10 elements
into in the format of phone number
Example,
(123) 456-789
"""
def create_phone_number(n: list) -> str:
"""
>>> create_phone_number([1,2,3,4,5,6,7,8,9,0])
'(123) 456-7890'
"""
return "({}{}{}) {}{}{}-{}{}{}{}".format(*n)
if __name__ == "__main__":
import doctest
doctest.testmod()
| it | 0.078453 | 3.674033 | 4 |
pointscan/scan.py | gtfierro/point_label_sharing | 5 | 13660 | <gh_stars>1-10
import click
import logging
import pandas as pd
from pathlib import Path
@click.group()
def main():
pass
@main.command(help="Scan for BACnet devices on your network")
@click.option("--ip", help="source IP to use (interface)")
@click.option("--dest", default=".", help="destination of scraped points")
def scan(ip, dest):
import BAC0
BAC0.log_level('error')
c = BAC0.connect(ip=ip)
c.discover()
points = []
for dev in c.devices:
logging.info(f"Scanning BACnet device {dev}")
devname = f"{dev[0]}-{dev[1]}-{dev[2]}-{dev[3]}.csv"
device = BAC0.device(dev[2], dev[3], c, history_size=1)
for point in device.points:
try:
d = {
'name': getattr(point.properties, 'name', None),
'units': getattr(point.properties, 'units', None),
'description': getattr(point.properties,
'description', None),
}
points.append(d)
except Exception as e:
logging.error(point)
logging.error(e)
c.disconnect()
df = pd.DataFrame.from_records(points)
df.to_csv(Path(dest) / Path(devname.replace(' ', '_')), index=False)
@main.command(help="Run webserver to clean/publish datasets")
@click.option("--port", default=5000, help="webserver port")
def web(port):
from pointscan.app import app
app.run(host='0.0.0.0', port=port, debug=True)
if __name__ == '__main__':
main()
| import click
import logging
import pandas as pd
from pathlib import Path
@click.group()
def main():
pass
@main.command(help="Scan for BACnet devices on your network")
@click.option("--ip", help="source IP to use (interface)")
@click.option("--dest", default=".", help="destination of scraped points")
def scan(ip, dest):
import BAC0
BAC0.log_level('error')
c = BAC0.connect(ip=ip)
c.discover()
points = []
for dev in c.devices:
logging.info(f"Scanning BACnet device {dev}")
devname = f"{dev[0]}-{dev[1]}-{dev[2]}-{dev[3]}.csv"
device = BAC0.device(dev[2], dev[3], c, history_size=1)
for point in device.points:
try:
d = {
'name': getattr(point.properties, 'name', None),
'units': getattr(point.properties, 'units', None),
'description': getattr(point.properties,
'description', None),
}
points.append(d)
except Exception as e:
logging.error(point)
logging.error(e)
c.disconnect()
df = pd.DataFrame.from_records(points)
df.to_csv(Path(dest) / Path(devname.replace(' ', '_')), index=False)
@main.command(help="Run webserver to clean/publish datasets")
@click.option("--port", default=5000, help="webserver port")
def web(port):
from pointscan.app import app
app.run(host='0.0.0.0', port=port, debug=True)
if __name__ == '__main__':
main() | none | 1 | 2.614705 | 3 |
var/spack/repos/builtin/packages/py-jdatetime/package.py | adrianjhpc/spack | 1 | 13661 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJdatetime(PythonPackage):
"""jdatetime is Jalali implementation of Python's datetime module"""
homepage = "https://github.com/slashmili/python-jalali"
url = "https://pypi.io/packages/source/j/jdatetime/jdatetime-3.6.2.tar.gz"
version('3.6.2', sha256='a589e35f0dab89283c1a3de9d70ed6cf657932aaed8e8ce1b0e5801aaab1da67')
| # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJdatetime(PythonPackage):
"""jdatetime is Jalali implementation of Python's datetime module"""
homepage = "https://github.com/slashmili/python-jalali"
url = "https://pypi.io/packages/source/j/jdatetime/jdatetime-3.6.2.tar.gz"
version('3.6.2', sha256='a589e35f0dab89283c1a3de9d70ed6cf657932aaed8e8ce1b0e5801aaab1da67')
| pt | 0.242172 | 1.557269 | 2 |
interview/leet/147_Insertion_Sort_List_Challenge.py | eroicaleo/LearningPython | 1 | 13662 | #!/usr/bin/env python
from linklist import *
class Solution:
def insertionSortList(self, head):
dumm = head
while head:
val, head, prev = head.val, head.next, dumm
while val >= prev.val:
prev = prev.next
while prev != head:
prev.val, prev, val = val, prev.next, prev.val
return dumm
sol = Solution()
nodeStringList = [
'[4,2,1,3]',
'[-1,5,3,4,0]',
'[3,2]',
'[23]',
'[]'
]
for nodeString in nodeStringList:
head = linkListBuilder(nodeString)
traverse(head)
traverse(sol.insertionSortList(head))
| #!/usr/bin/env python
from linklist import *
class Solution:
def insertionSortList(self, head):
dumm = head
while head:
val, head, prev = head.val, head.next, dumm
while val >= prev.val:
prev = prev.next
while prev != head:
prev.val, prev, val = val, prev.next, prev.val
return dumm
sol = Solution()
nodeStringList = [
'[4,2,1,3]',
'[-1,5,3,4,0]',
'[3,2]',
'[23]',
'[]'
]
for nodeString in nodeStringList:
head = linkListBuilder(nodeString)
traverse(head)
traverse(sol.insertionSortList(head))
| es | 0.159471 | 3.781444 | 4 |
robocrm/migrations/0020_auto_20141027_0145.py | CMU-Robotics-Club/roboticsclub.org | 0 | 13663 | <reponame>CMU-Robotics-Club/roboticsclub.org<filename>robocrm/migrations/0020_auto_20141027_0145.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('robocrm', '0019_auto_20141021_1157'),
]
operations = [
migrations.RemoveField(
model_name='robouser',
name='sec_major_one',
),
migrations.RemoveField(
model_name='robouser',
name='sec_major_two',
),
migrations.AlterField(
model_name='robouser',
name='cell',
field=models.DecimalField(help_text='Cell Phone # if you wish to provide it to Officers', blank=True, decimal_places=0, null=True, max_digits=10),
),
migrations.AlterField(
model_name='robouser',
name='magnetic',
field=models.CharField(help_text='9 Character Magnetic Card ID(found on Student ID)', max_length=9, null=True, blank=True),
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('robocrm', '0019_auto_20141021_1157'),
]
operations = [
migrations.RemoveField(
model_name='robouser',
name='sec_major_one',
),
migrations.RemoveField(
model_name='robouser',
name='sec_major_two',
),
migrations.AlterField(
model_name='robouser',
name='cell',
field=models.DecimalField(help_text='Cell Phone # if you wish to provide it to Officers', blank=True, decimal_places=0, null=True, max_digits=10),
),
migrations.AlterField(
model_name='robouser',
name='magnetic',
field=models.CharField(help_text='9 Character Magnetic Card ID(found on Student ID)', max_length=9, null=True, blank=True),
),
] | it | 0.107002 | 1.713059 | 2 |
src/tests/flow.py | SeleSchaefer/super_resolution | 5 | 13664 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import imageio
import numpy as np
from tar.miscellaneous import convert_flow_to_color
prev = imageio.imread("ressources/1_1.png")
prev = cv2.cvtColor(prev, cv2.COLOR_RGB2GRAY)
curr = imageio.imread("ressources/1_2.png")
curr = cv2.cvtColor(curr, cv2.COLOR_RGB2GRAY)
flow = cv2.calcOpticalFlowFarneback(prev, curr, None, 0.9, 15, 20, 100, 10, 1.5, cv2.OPTFLOW_FARNEBACK_GAUSSIAN)
rgb = convert_flow_to_color(flow)
imageio.imsave("/Users/sele/Desktop/test.png", rgb)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import imageio
import numpy as np
from tar.miscellaneous import convert_flow_to_color
prev = imageio.imread("ressources/1_1.png")
prev = cv2.cvtColor(prev, cv2.COLOR_RGB2GRAY)
curr = imageio.imread("ressources/1_2.png")
curr = cv2.cvtColor(curr, cv2.COLOR_RGB2GRAY)
flow = cv2.calcOpticalFlowFarneback(prev, curr, None, 0.9, 15, 20, 100, 10, 1.5, cv2.OPTFLOW_FARNEBACK_GAUSSIAN)
rgb = convert_flow_to_color(flow)
imageio.imsave("/Users/sele/Desktop/test.png", rgb)
| es | 0.148637 | 2.689129 | 3 |
assignment4/utils.py | nicedi/ML_course_projects | 0 | 13665 | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def plot_loss(model, n_iter):
plt.figure()
plt.plot(model.trainloss, 'b-', model.validloss, 'r-')
plt.xlim(0, n_iter)
plt.xlabel('iteration')
plt.ylabel('loss')
plt.title('learning curve')
plt.legend(['training loss', 'validation loss'])
plt.show()
def plot_F1(model, n_iter):
plt.figure()
plt.plot(model.trainF1, 'b-', model.validF1, 'r-')
plt.xlim(0, n_iter)
plt.xlabel('iteration')
plt.ylabel('F1 score')
plt.title('F1 metric curve')
plt.legend(['training F1', 'validation F1'], loc='lower right')
plt.show()
def confusion_matrix(threshold, y_hat, y_target):
# 任务2:实现该函数。函数应返回 TP, FP, FN, TN 四个值。
# y_hat = (y_hat > threshold).astype(np.int32) # 高于阈值的预测值置为1,反之为0
# 提示:对比 y_hat 和 y_target 中的值计算 True Positive,False Positive 等
tmp = np.hstack((y_target, y_hat > threshold))
pass
# return TP, FP, FN, TN
| # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def plot_loss(model, n_iter):
plt.figure()
plt.plot(model.trainloss, 'b-', model.validloss, 'r-')
plt.xlim(0, n_iter)
plt.xlabel('iteration')
plt.ylabel('loss')
plt.title('learning curve')
plt.legend(['training loss', 'validation loss'])
plt.show()
def plot_F1(model, n_iter):
plt.figure()
plt.plot(model.trainF1, 'b-', model.validF1, 'r-')
plt.xlim(0, n_iter)
plt.xlabel('iteration')
plt.ylabel('F1 score')
plt.title('F1 metric curve')
plt.legend(['training F1', 'validation F1'], loc='lower right')
plt.show()
def confusion_matrix(threshold, y_hat, y_target):
# 任务2:实现该函数。函数应返回 TP, FP, FN, TN 四个值。
# y_hat = (y_hat > threshold).astype(np.int32) # 高于阈值的预测值置为1,反之为0
# 提示:对比 y_hat 和 y_target 中的值计算 True Positive,False Positive 等
tmp = np.hstack((y_target, y_hat > threshold))
pass
# return TP, FP, FN, TN
| zh | 0.941145 | 3.481705 | 3 |
post/migrations/0009_auto_20171207_2320.py | silvareal/personal-blog | 2 | 13666 | <filename>post/migrations/0009_auto_20171207_2320.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-12-07 22:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0008_auto_20171207_2256'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='category',
),
migrations.AddField(
model_name='post',
name='category',
field=models.CharField(choices=[('frontend', 'Frontend'), ('backend', 'Backend'), ('interview', 'Interview'), ('devop', 'Devop')], default='backend', max_length=15),
),
migrations.DeleteModel(
name='Category',
),
]
| <filename>post/migrations/0009_auto_20171207_2320.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-12-07 22:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0008_auto_20171207_2256'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='category',
),
migrations.AddField(
model_name='post',
name='category',
field=models.CharField(choices=[('frontend', 'Frontend'), ('backend', 'Backend'), ('interview', 'Interview'), ('devop', 'Devop')], default='backend', max_length=15),
),
migrations.DeleteModel(
name='Category',
),
]
| es | 0.140981 | 1.618108 | 2 |
tests/test_utils_project.py | FingerCrunch/scrapy | 41,267 | 13667 | <gh_stars>1000+
import unittest
import os
import tempfile
import shutil
import contextlib
from pytest import warns
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.project import data_path, get_project_settings
@contextlib.contextmanager
def inside_a_project():
prev_dir = os.getcwd()
project_dir = tempfile.mkdtemp()
try:
os.chdir(project_dir)
with open('scrapy.cfg', 'w') as f:
# create an empty scrapy.cfg
f.close()
yield project_dir
finally:
os.chdir(prev_dir)
shutil.rmtree(project_dir)
class ProjectUtilsTest(unittest.TestCase):
def test_data_path_outside_project(self):
self.assertEqual(
os.path.join('.scrapy', 'somepath'),
data_path('somepath')
)
abspath = os.path.join(os.path.sep, 'absolute', 'path')
self.assertEqual(abspath, data_path(abspath))
def test_data_path_inside_project(self):
with inside_a_project() as proj_path:
expected = os.path.join(proj_path, '.scrapy', 'somepath')
self.assertEqual(
os.path.realpath(expected),
os.path.realpath(data_path('somepath'))
)
abspath = os.path.join(os.path.sep, 'absolute', 'path')
self.assertEqual(abspath, data_path(abspath))
@contextlib.contextmanager
def set_env(**update):
modified = set(update.keys()) & set(os.environ.keys())
update_after = {k: os.environ[k] for k in modified}
remove_after = frozenset(k for k in update if k not in os.environ)
try:
os.environ.update(update)
yield
finally:
os.environ.update(update_after)
for k in remove_after:
os.environ.pop(k)
class GetProjectSettingsTestCase(unittest.TestCase):
def test_valid_envvar(self):
value = 'tests.test_cmdline.settings'
envvars = {
'SCRAPY_SETTINGS_MODULE': value,
}
with set_env(**envvars), warns(None) as warnings:
settings = get_project_settings()
assert not warnings
assert settings.get('SETTINGS_MODULE') == value
def test_invalid_envvar(self):
envvars = {
'SCRAPY_FOO': 'bar',
}
with set_env(**envvars), warns(None) as warnings:
get_project_settings()
assert len(warnings) == 1
assert warnings[0].category == ScrapyDeprecationWarning
assert str(warnings[0].message).endswith(': FOO')
def test_valid_and_invalid_envvars(self):
value = 'tests.test_cmdline.settings'
envvars = {
'SCRAPY_FOO': 'bar',
'SCRAPY_SETTINGS_MODULE': value,
}
with set_env(**envvars), warns(None) as warnings:
settings = get_project_settings()
assert len(warnings) == 1
assert warnings[0].category == ScrapyDeprecationWarning
assert str(warnings[0].message).endswith(': FOO')
assert settings.get('SETTINGS_MODULE') == value
| import unittest
import os
import tempfile
import shutil
import contextlib
from pytest import warns
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.project import data_path, get_project_settings
@contextlib.contextmanager
def inside_a_project():
prev_dir = os.getcwd()
project_dir = tempfile.mkdtemp()
try:
os.chdir(project_dir)
with open('scrapy.cfg', 'w') as f:
# create an empty scrapy.cfg
f.close()
yield project_dir
finally:
os.chdir(prev_dir)
shutil.rmtree(project_dir)
class ProjectUtilsTest(unittest.TestCase):
def test_data_path_outside_project(self):
self.assertEqual(
os.path.join('.scrapy', 'somepath'),
data_path('somepath')
)
abspath = os.path.join(os.path.sep, 'absolute', 'path')
self.assertEqual(abspath, data_path(abspath))
def test_data_path_inside_project(self):
with inside_a_project() as proj_path:
expected = os.path.join(proj_path, '.scrapy', 'somepath')
self.assertEqual(
os.path.realpath(expected),
os.path.realpath(data_path('somepath'))
)
abspath = os.path.join(os.path.sep, 'absolute', 'path')
self.assertEqual(abspath, data_path(abspath))
@contextlib.contextmanager
def set_env(**update):
modified = set(update.keys()) & set(os.environ.keys())
update_after = {k: os.environ[k] for k in modified}
remove_after = frozenset(k for k in update if k not in os.environ)
try:
os.environ.update(update)
yield
finally:
os.environ.update(update_after)
for k in remove_after:
os.environ.pop(k)
class GetProjectSettingsTestCase(unittest.TestCase):
def test_valid_envvar(self):
value = 'tests.test_cmdline.settings'
envvars = {
'SCRAPY_SETTINGS_MODULE': value,
}
with set_env(**envvars), warns(None) as warnings:
settings = get_project_settings()
assert not warnings
assert settings.get('SETTINGS_MODULE') == value
def test_invalid_envvar(self):
envvars = {
'SCRAPY_FOO': 'bar',
}
with set_env(**envvars), warns(None) as warnings:
get_project_settings()
assert len(warnings) == 1
assert warnings[0].category == ScrapyDeprecationWarning
assert str(warnings[0].message).endswith(': FOO')
def test_valid_and_invalid_envvars(self):
value = 'tests.test_cmdline.settings'
envvars = {
'SCRAPY_FOO': 'bar',
'SCRAPY_SETTINGS_MODULE': value,
}
with set_env(**envvars), warns(None) as warnings:
settings = get_project_settings()
assert len(warnings) == 1
assert warnings[0].category == ScrapyDeprecationWarning
assert str(warnings[0].message).endswith(': FOO')
assert settings.get('SETTINGS_MODULE') == value | es | 0.373587 | 2.233953 | 2 |
trainer/__init__.py | Greeser/gate-decorator-pruning | 192 | 13668 | from trainer.normal import NormalTrainer
from config import cfg
def get_trainer():
pair = {
'normal': NormalTrainer
}
assert (cfg.train.trainer in pair)
return pair[cfg.train.trainer]()
| from trainer.normal import NormalTrainer
from config import cfg
def get_trainer():
pair = {
'normal': NormalTrainer
}
assert (cfg.train.trainer in pair)
return pair[cfg.train.trainer]()
| none | 1 | 2.44744 | 2 |
Tasks/Community/ts_scriptExamples/pythonLogging.py | nneul/Velocity-assets | 4 | 13669 | #!/usr/bin/python
import logging
# create logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create file handler which and set level to debug
fh = logging.FileHandler('pythonLogging.log')
fh.setLevel(logging.WARNING)
# create formatter
formatter = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")
# add formatter to ch and fh
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add ch and fh to logger
logger.addHandler(ch)
logger.addHandler(fh)
# "application" code
logger.debug("debug message")
logger.info("info message")
logger.warn("warn message")
logger.error("error message")
logger.critical("critical message")
print('\nDone')
| #!/usr/bin/python
import logging
# create logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create file handler which and set level to debug
fh = logging.FileHandler('pythonLogging.log')
fh.setLevel(logging.WARNING)
# create formatter
formatter = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")
# add formatter to ch and fh
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add ch and fh to logger
logger.addHandler(ch)
logger.addHandler(fh)
# "application" code
logger.debug("debug message")
logger.info("info message")
logger.warn("warn message")
logger.error("error message")
logger.critical("critical message")
print('\nDone')
| pt | 0.210334 | 3.253845 | 3 |
google/datalab/commands/_datalab.py | freyrsae/pydatalab | 198 | 13670 | # Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Platform library - datalab cell magic."""
from __future__ import absolute_import
from __future__ import unicode_literals
try:
import IPython
import IPython.core.display
import IPython.core.magic
except ImportError:
raise Exception('This module can only be loaded in ipython.')
import google.datalab.utils.commands
@IPython.core.magic.register_line_cell_magic
def datalab(line, cell=None):
"""Implements the datalab cell magic for ipython notebooks.
Args:
line: the contents of the datalab line.
Returns:
The results of executing the cell.
"""
parser = google.datalab.utils.commands.CommandParser(
prog='%datalab',
description="""
Execute operations that apply to multiple Datalab APIs.
Use "%datalab <command> -h" for help on a specific command.
""")
config_parser = parser.subcommand(
'config', help='List or set API-specific configurations.')
config_sub_commands = config_parser.add_subparsers(dest='command')
# %%datalab config list
config_list_parser = config_sub_commands.add_parser(
'list', help='List configurations')
config_list_parser.set_defaults(func=_config_list_fn)
# %%datalab config set -n <NAME> -v <VALUE>
config_set_parser = config_sub_commands.add_parser(
'set', help='Set configurations')
config_set_parser.add_argument(
'-n', '--name',
help='The name of the configuration value', required=True)
config_set_parser.add_argument(
'-v', '--value', help='The value to set', required=True)
config_set_parser.set_defaults(func=_config_set_fn)
project_parser = parser.subcommand(
'project', help='Get or set the default project ID')
project_sub_commands = project_parser.add_subparsers(dest='command')
# %%datalab project get
project_get_parser = project_sub_commands.add_parser(
'get', help='Get the default project ID')
project_get_parser.set_defaults(func=_project_get_fn)
# %%datalab project set -p <PROJECT_ID>
project_set_parser = project_sub_commands.add_parser(
'set', help='Set the default project ID')
project_set_parser.add_argument(
'-p', '--project', help='The default project ID', required=True)
project_set_parser.set_defaults(func=_project_set_fn)
return google.datalab.utils.commands.handle_magic_line(line, cell, parser)
def _config_list_fn(args, cell):
ctx = google.datalab.Context.default()
return google.datalab.utils.commands.render_dictionary([ctx.config])
def _config_set_fn(args, cell):
name = args['name']
value = args['value']
ctx = google.datalab.Context.default()
ctx.config[name] = value
return google.datalab.utils.commands.render_dictionary([ctx.config])
def _project_get_fn(args, cell):
ctx = google.datalab.Context.default()
return google.datalab.utils.commands.render_text(ctx.project_id)
def _project_set_fn(args, cell):
project = args['project']
ctx = google.datalab.Context.default()
ctx.set_project_id(project)
return
| # Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Platform library - datalab cell magic."""
from __future__ import absolute_import
from __future__ import unicode_literals
try:
import IPython
import IPython.core.display
import IPython.core.magic
except ImportError:
raise Exception('This module can only be loaded in ipython.')
import google.datalab.utils.commands
@IPython.core.magic.register_line_cell_magic
def datalab(line, cell=None):
"""Implements the datalab cell magic for ipython notebooks.
Args:
line: the contents of the datalab line.
Returns:
The results of executing the cell.
"""
parser = google.datalab.utils.commands.CommandParser(
prog='%datalab',
description="""
Execute operations that apply to multiple Datalab APIs.
Use "%datalab <command> -h" for help on a specific command.
""")
config_parser = parser.subcommand(
'config', help='List or set API-specific configurations.')
config_sub_commands = config_parser.add_subparsers(dest='command')
# %%datalab config list
config_list_parser = config_sub_commands.add_parser(
'list', help='List configurations')
config_list_parser.set_defaults(func=_config_list_fn)
# %%datalab config set -n <NAME> -v <VALUE>
config_set_parser = config_sub_commands.add_parser(
'set', help='Set configurations')
config_set_parser.add_argument(
'-n', '--name',
help='The name of the configuration value', required=True)
config_set_parser.add_argument(
'-v', '--value', help='The value to set', required=True)
config_set_parser.set_defaults(func=_config_set_fn)
project_parser = parser.subcommand(
'project', help='Get or set the default project ID')
project_sub_commands = project_parser.add_subparsers(dest='command')
# %%datalab project get
project_get_parser = project_sub_commands.add_parser(
'get', help='Get the default project ID')
project_get_parser.set_defaults(func=_project_get_fn)
# %%datalab project set -p <PROJECT_ID>
project_set_parser = project_sub_commands.add_parser(
'set', help='Set the default project ID')
project_set_parser.add_argument(
'-p', '--project', help='The default project ID', required=True)
project_set_parser.set_defaults(func=_project_set_fn)
return google.datalab.utils.commands.handle_magic_line(line, cell, parser)
def _config_list_fn(args, cell):
ctx = google.datalab.Context.default()
return google.datalab.utils.commands.render_dictionary([ctx.config])
def _config_set_fn(args, cell):
name = args['name']
value = args['value']
ctx = google.datalab.Context.default()
ctx.config[name] = value
return google.datalab.utils.commands.render_dictionary([ctx.config])
def _project_get_fn(args, cell):
ctx = google.datalab.Context.default()
return google.datalab.utils.commands.render_text(ctx.project_id)
def _project_set_fn(args, cell):
project = args['project']
ctx = google.datalab.Context.default()
ctx.set_project_id(project)
return
| pt | 0.202321 | 2.19565 | 2 |
src/server/__main__.py | ENDERZOMBI102/chatapp | 1 | 13671 | from sys import argv
from server.AServer import AServer
if '--old' in argv:
from server.server import Server
Server()
else:
AServer( websocket='--websocket' in argv ).Start()
| from sys import argv
from server.AServer import AServer
if '--old' in argv:
from server.server import Server
Server()
else:
AServer( websocket='--websocket' in argv ).Start()
| none | 1 | 1.995227 | 2 |
testing/tests/registers.py | Wynjones1/gbvhdl | 0 | 13672 | #!/usr/bin/env python2.7
from common import *
from random import randint, choice
registers = {\
"a" : int("0000", 2),
"f" : int("0001", 2),
"b" : int("0010", 2),
"c" : int("0011", 2),
"d" : int("0100", 2),
"e" : int("0101", 2),
"h" : int("0110", 2),
"l" : int("0111", 2),
"af" : int("1000", 2),
"bc" : int("1001", 2),
"de" : int("1010", 2),
"hl" : int("1011", 2),
"sp" : int("1100", 2),
"pc" : int("1101", 2),
}
def output_line(fp, reg_write, reg_read, we,
write_data, read_data, reg_w_name, reg_r_name):
fp.write("%s %s %s %s %s #%s %s\n" %
(to_bin(reg_write, 4),
to_bin(reg_read, 4),
"1" if we else "0",
to_bin(write_data, 16),
to_bin(read_data, 16),
reg_w_name,
reg_r_name))
class Registers(object):
def __init__(self):
self.regs = [0] * 8
self.sp = 0
self.pc = 0
def write(self, reg, value):
if reg == "af":
self.regs[registers["a"]] = (value >> 8) & 0xff
self.regs[registers["f"]] = (value >> 0) & 0xff
elif reg == "bc":
self.regs[registers["b"]] = (value >> 8) & 0xff
self.regs[registers["c"]] = (value >> 0) & 0xff
elif reg == "de":
self.regs[registers["d"]] = (value >> 8) & 0xff
self.regs[registers["e"]] = (value >> 0) & 0xff
elif reg == "hl":
self.regs[registers["h"]] = (value >> 8) & 0xff
self.regs[registers["l"]] = (value >> 0) & 0xff
elif reg == "sp":
self.sp = value
elif reg == "pc":
self.pc = value
else:
self.regs[registers[reg]] = (value) & 0xff
def read(self, reg):
if reg == "af":
return self.regs[registers["a"]] << 8 | self.regs[registers["f"]];
elif reg == "bc":
return self.regs[registers["b"]] << 8 | self.regs[registers["c"]];
elif reg == "de":
return self.regs[registers["d"]] << 8 | self.regs[registers["e"]];
elif reg == "hl":
return self.regs[registers["h"]] << 8 | self.regs[registers["l"]];
elif reg == "sp":
return self.sp
elif reg == "pc":
return self.pc
else:
return self.regs[registers[reg]];
def random_op(self):
we = randint(0, 1)
reg_write = choice(registers.keys())
reg_read = choice(registers.keys())
write_data = randint(0, 0xffff)
read_data = self.read(reg_read)
if we:
self.write(reg_write, write_data)
return (registers[reg_write], registers[reg_read],
we, write_data, read_data, reg_write, reg_read)
def main():
fp = open("registers.txt", "w")
reg = Registers()
m = 1000000
for i in xrange(m):
if i % 10000 == 0:
f = 100 * float(i) / float(m)
print("%s" % f)
output_line(fp, *reg.random_op())
if __name__ == "__main__":
main()
| #!/usr/bin/env python2.7
from common import *
from random import randint, choice
registers = {\
"a" : int("0000", 2),
"f" : int("0001", 2),
"b" : int("0010", 2),
"c" : int("0011", 2),
"d" : int("0100", 2),
"e" : int("0101", 2),
"h" : int("0110", 2),
"l" : int("0111", 2),
"af" : int("1000", 2),
"bc" : int("1001", 2),
"de" : int("1010", 2),
"hl" : int("1011", 2),
"sp" : int("1100", 2),
"pc" : int("1101", 2),
}
def output_line(fp, reg_write, reg_read, we,
write_data, read_data, reg_w_name, reg_r_name):
fp.write("%s %s %s %s %s #%s %s\n" %
(to_bin(reg_write, 4),
to_bin(reg_read, 4),
"1" if we else "0",
to_bin(write_data, 16),
to_bin(read_data, 16),
reg_w_name,
reg_r_name))
class Registers(object):
def __init__(self):
self.regs = [0] * 8
self.sp = 0
self.pc = 0
def write(self, reg, value):
if reg == "af":
self.regs[registers["a"]] = (value >> 8) & 0xff
self.regs[registers["f"]] = (value >> 0) & 0xff
elif reg == "bc":
self.regs[registers["b"]] = (value >> 8) & 0xff
self.regs[registers["c"]] = (value >> 0) & 0xff
elif reg == "de":
self.regs[registers["d"]] = (value >> 8) & 0xff
self.regs[registers["e"]] = (value >> 0) & 0xff
elif reg == "hl":
self.regs[registers["h"]] = (value >> 8) & 0xff
self.regs[registers["l"]] = (value >> 0) & 0xff
elif reg == "sp":
self.sp = value
elif reg == "pc":
self.pc = value
else:
self.regs[registers[reg]] = (value) & 0xff
def read(self, reg):
if reg == "af":
return self.regs[registers["a"]] << 8 | self.regs[registers["f"]];
elif reg == "bc":
return self.regs[registers["b"]] << 8 | self.regs[registers["c"]];
elif reg == "de":
return self.regs[registers["d"]] << 8 | self.regs[registers["e"]];
elif reg == "hl":
return self.regs[registers["h"]] << 8 | self.regs[registers["l"]];
elif reg == "sp":
return self.sp
elif reg == "pc":
return self.pc
else:
return self.regs[registers[reg]];
def random_op(self):
we = randint(0, 1)
reg_write = choice(registers.keys())
reg_read = choice(registers.keys())
write_data = randint(0, 0xffff)
read_data = self.read(reg_read)
if we:
self.write(reg_write, write_data)
return (registers[reg_write], registers[reg_read],
we, write_data, read_data, reg_write, reg_read)
def main():
fp = open("registers.txt", "w")
reg = Registers()
m = 1000000
for i in xrange(m):
if i % 10000 == 0:
f = 100 * float(i) / float(m)
print("%s" % f)
output_line(fp, *reg.random_op())
if __name__ == "__main__":
main()
| de | 0.18976 | 2.932178 | 3 |
DiscordRPC/__init__.py | EterNomm/discord-rpc | 4 | 13673 | <filename>DiscordRPC/__init__.py
from .presence import *
from .button import button
from .exceptions import *
#from .get_current_app import GCAR (Disabling due to a bug)
__title__ = "Discord-RPC"
__version__ = "3.5"
__authors__ = "LyQuid"
__license__ = "Apache License 2.0"
__copyright__ = "Copyright 2021-present LyQuid"
| <filename>DiscordRPC/__init__.py
from .presence import *
from .button import button
from .exceptions import *
#from .get_current_app import GCAR (Disabling due to a bug)
__title__ = "Discord-RPC"
__version__ = "3.5"
__authors__ = "LyQuid"
__license__ = "Apache License 2.0"
__copyright__ = "Copyright 2021-present LyQuid"
| en | 0.131632 | 1.40381 | 1 |
brax/training/ars.py | benelot/brax | 1 | 13674 | <reponame>benelot/brax
# Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Augmented Random Search training.
See: https://arxiv.org/pdf/1803.07055.pdf
"""
import time
from typing import Any, Callable, Dict, Optional
from absl import logging
from brax import envs
from brax.training import env
from brax.training import networks
from brax.training import normalization
import flax
import jax
import jax.numpy as jnp
import optax
Params = Any
@flax.struct.dataclass
class TrainingState:
"""Contains training state for the learner."""
key: jnp.ndarray
normalizer_params: Params
policy_params: Params
def make_ars_model(act_size: int, obs_size: int):
return networks.FeedForwardModel(
init=lambda _: jnp.zeros((obs_size, act_size)),
apply=lambda m, o: jnp.matmul(o, m))
def get_policy_head(head_type):
def head(params):
if not head_type:
return params
if head_type == 'clip':
return jnp.clip(params, -1, 1)
if head_type == 'tanh':
return jnp.tanh(params)
assert f'policy head type {head_type} is not known'
return head
def train(
environment_fn: Callable[..., envs.Env],
num_timesteps: int = 100,
log_frequency: int = 1,
episode_length: int = 1000,
action_repeat: int = 1,
num_eval_envs: int = 128,
seed: int = 0,
normalize_observations: bool = False,
step_size: float = 0.015,
max_devices_per_host: Optional[int] = None,
number_of_directions: int = 60,
exploration_noise_std: float = 0.025,
top_directions: int = 20,
head_type: str = '',
reward_shift: float = 0.0,
progress_fn: Optional[Callable[[int, Dict[str, Any]], None]] = None,
):
"""ARS."""
# TODO: pmap it
max_devices_per_host = 1
xt = time.time()
top_directions = min(top_directions, number_of_directions)
num_envs = number_of_directions * 2 # antitethic
epochs = 1 + num_timesteps // episode_length // num_envs
log_frequency = min(log_frequency, epochs)
process_count = jax.process_count()
process_id = jax.process_index()
local_device_count = jax.local_device_count()
local_devices_to_use = local_device_count
if max_devices_per_host:
local_devices_to_use = min(local_devices_to_use, max_devices_per_host)
logging.info(
'Device count: %d, process count: %d (id %d), local device count: %d, '
'devices to be used count: %d',
jax.device_count(), process_count, process_id, local_device_count,
local_devices_to_use)
key = jax.random.PRNGKey(seed)
key, key_model, key_env, key_eval = jax.random.split(key, 4)
core_env = environment_fn(
action_repeat=action_repeat,
batch_size=num_envs // local_devices_to_use // process_count,
episode_length=episode_length)
first_state, step_fn = env.wrap(core_env, key_env)
core_eval_env = environment_fn(
action_repeat=action_repeat,
batch_size=num_eval_envs,
episode_length=episode_length)
eval_first_state, eval_step_fn = env.wrap(core_eval_env, key_eval)
_, obs_size = eval_first_state.core.obs.shape
policy_model = make_ars_model(core_env.action_size, obs_size)
policy_head = get_policy_head(head_type)
normalizer_params, obs_normalizer_update_fn, obs_normalizer_apply_fn = (
normalization.create_observation_normalizer(
obs_size, normalize_observations, num_leading_batch_dims=1,
apply_clipping=False))
policy_params = policy_model.init(key_model)
def do_one_step_eval(carry, unused_target_t):
state, policy_params, normalizer_params = carry
obs = obs_normalizer_apply_fn(normalizer_params, state.core.obs)
actions = policy_head(policy_model.apply(policy_params, obs))
nstate = eval_step_fn(state, actions)
return (nstate, policy_params, normalizer_params), ()
@jax.jit
def run_eval(state, policy_params, normalizer_params) -> env.EnvState:
(state, _, _), _ = jax.lax.scan(
do_one_step_eval, (state, policy_params, normalizer_params), (),
length=episode_length // action_repeat)
return state
@jax.vmap
def training_inference(params, obs):
return policy_model.apply(params, obs)
def do_one_step(carry, unused_target_t):
state, policy_params, cumulative_reward, normalizer_params = carry
obs = obs_normalizer_apply_fn(normalizer_params, state.core.obs)
actions = policy_head(training_inference(policy_params, obs))
nstate = step_fn(state, actions)
cumulative_reward = cumulative_reward + nstate.core.reward - reward_shift
return (nstate, policy_params, cumulative_reward,
normalizer_params), state.core.obs
def run_ars_eval(state, params, normalizer_params):
cumulative_reward = jnp.zeros(state.core.obs.shape[0])
(state, _, cumulative_reward, _), obs = jax.lax.scan(
do_one_step, (state, params, cumulative_reward, normalizer_params),
(), length=episode_length // action_repeat)
return cumulative_reward, obs, state
def add_noise(params, key):
noise = jax.random.normal(key, shape=params.shape, dtype=params.dtype)
params_with_noise = params + noise * exploration_noise_std
anit_params_with_noise = params - noise * exploration_noise_std
return params_with_noise, anit_params_with_noise, noise
def ars_one_epoch(carry, unused_t):
state, training_state = carry
params = jnp.repeat(jnp.expand_dims(training_state.policy_params, axis=0),
num_envs // 2, axis=0)
key, key_petr = jax.random.split(training_state.key)
# generate perturbations
params_with_noise, params_with_anti_noise, noise = add_noise(
params, key_petr)
pparams = jnp.concatenate([params_with_noise, params_with_anti_noise],
axis=0)
eval_scores, obs, state = run_ars_eval(
state, pparams, training_state.normalizer_params)
obs = jnp.reshape(obs, [-1] + list(obs.shape[2:]))
normalizer_params = obs_normalizer_update_fn(
training_state.normalizer_params, obs)
reward_plus, reward_minus = jnp.split(eval_scores, 2, axis=0)
reward_max = jnp.maximum(reward_plus, reward_minus)
reward_rank = jnp.argsort(jnp.argsort(-reward_max))
reward_weight = jnp.where(reward_rank < top_directions, 1, 0)
reward_weight_double = jnp.concatenate([reward_weight, reward_weight],
axis=0)
reward_std = jnp.std(eval_scores, where=reward_weight_double)
noise = jnp.sum(jnp.transpose(jnp.transpose(noise) * reward_weight *
(reward_plus - reward_minus)), axis=0)
policy_params = (training_state.policy_params +
step_size / (top_directions * reward_std) * noise)
metrics = {
'params_norm': optax.global_norm(policy_params),
'eval_scores_mean': jnp.mean(eval_scores),
'eval_scores_std': jnp.std(eval_scores),
'reward_std': reward_std,
'weights': jnp.mean(reward_weight),
}
return (state,
TrainingState(
key=key,
normalizer_params=normalizer_params,
policy_params=policy_params)), metrics
epochs_per_step = (epochs + log_frequency - 1) // log_frequency
@jax.jit
def run_ars(state, training_state):
(state, training_state), metrics = jax.lax.scan(
ars_one_epoch, (state, training_state), (), length=epochs_per_step)
return state, training_state, jax.tree_map(jnp.mean, metrics)
training_state = TrainingState(key=key,
normalizer_params=normalizer_params,
policy_params=policy_params)
training_walltime = 0
eval_walltime = 0
sps = 0
eval_sps = 0
metrics = {}
summary = {}
state = first_state
for it in range(log_frequency + 1):
logging.info('starting iteration %s %s', it, time.time() - xt)
t = time.time()
if process_id == 0:
eval_state = run_eval(eval_first_state,
training_state.policy_params,
training_state.normalizer_params)
eval_state.completed_episodes.block_until_ready()
eval_walltime += time.time() - t
eval_sps = (
episode_length * eval_first_state.core.reward.shape[0] /
(time.time() - t))
avg_episode_length = (
eval_state.completed_episodes_steps / eval_state.completed_episodes)
metrics = dict(
dict({
f'eval/episode_{name}': value / eval_state.completed_episodes
for name, value in eval_state.completed_episodes_metrics.items()
}),
**dict({
f'train/{name}': value for name, value in summary.items()
}),
**dict({
'eval/completed_episodes': eval_state.completed_episodes,
'eval/episode_length': avg_episode_length,
'speed/sps': sps,
'speed/eval_sps': eval_sps,
'speed/training_walltime': training_walltime,
'speed/eval_walltime': eval_walltime,
'speed/timestamp': training_walltime,
}))
logging.info('Step %s metrics %s',
int(training_state.normalizer_params[0]) * action_repeat,
metrics)
if progress_fn:
progress_fn(int(training_state.normalizer_params[0]) * action_repeat,
metrics)
if it == log_frequency:
break
t = time.time()
# optimization
state, training_state, summary = run_ars(state, training_state)
jax.tree_map(lambda x: x.block_until_ready(), training_state)
sps = episode_length * num_envs * epochs_per_step / (
time.time() - t)
training_walltime += time.time() - t
_, inference = make_params_and_inference_fn(core_env.observation_size,
core_env.action_size,
normalize_observations,
head_type)
params = training_state.normalizer_params, training_state.policy_params
return (inference, params, metrics)
def make_params_and_inference_fn(observation_size, action_size,
normalize_observations, head_type=None):
"""Creates params and inference function for the ES agent."""
obs_normalizer_params, obs_normalizer_apply_fn = normalization.make_data_and_apply_fn(
observation_size, normalize_observations, apply_clipping=False)
policy_head = get_policy_head(head_type)
policy_model = make_ars_model(action_size, observation_size)
def inference_fn(params, obs, unused_rng):
normalizer_params, policy_params = params
obs = obs_normalizer_apply_fn(normalizer_params, obs)
action = policy_head(policy_model.apply(policy_params, obs))
return action
params = (obs_normalizer_params, policy_model.init(jax.random.PRNGKey(0)))
return params, inference_fn
| # Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Augmented Random Search training.
See: https://arxiv.org/pdf/1803.07055.pdf
"""
import time
from typing import Any, Callable, Dict, Optional
from absl import logging
from brax import envs
from brax.training import env
from brax.training import networks
from brax.training import normalization
import flax
import jax
import jax.numpy as jnp
import optax
Params = Any
@flax.struct.dataclass
class TrainingState:
"""Contains training state for the learner."""
key: jnp.ndarray
normalizer_params: Params
policy_params: Params
def make_ars_model(act_size: int, obs_size: int):
return networks.FeedForwardModel(
init=lambda _: jnp.zeros((obs_size, act_size)),
apply=lambda m, o: jnp.matmul(o, m))
def get_policy_head(head_type):
def head(params):
if not head_type:
return params
if head_type == 'clip':
return jnp.clip(params, -1, 1)
if head_type == 'tanh':
return jnp.tanh(params)
assert f'policy head type {head_type} is not known'
return head
def train(
environment_fn: Callable[..., envs.Env],
num_timesteps: int = 100,
log_frequency: int = 1,
episode_length: int = 1000,
action_repeat: int = 1,
num_eval_envs: int = 128,
seed: int = 0,
normalize_observations: bool = False,
step_size: float = 0.015,
max_devices_per_host: Optional[int] = None,
number_of_directions: int = 60,
exploration_noise_std: float = 0.025,
top_directions: int = 20,
head_type: str = '',
reward_shift: float = 0.0,
progress_fn: Optional[Callable[[int, Dict[str, Any]], None]] = None,
):
"""ARS."""
# TODO: pmap it
max_devices_per_host = 1
xt = time.time()
top_directions = min(top_directions, number_of_directions)
num_envs = number_of_directions * 2 # antitethic
epochs = 1 + num_timesteps // episode_length // num_envs
log_frequency = min(log_frequency, epochs)
process_count = jax.process_count()
process_id = jax.process_index()
local_device_count = jax.local_device_count()
local_devices_to_use = local_device_count
if max_devices_per_host:
local_devices_to_use = min(local_devices_to_use, max_devices_per_host)
logging.info(
'Device count: %d, process count: %d (id %d), local device count: %d, '
'devices to be used count: %d',
jax.device_count(), process_count, process_id, local_device_count,
local_devices_to_use)
key = jax.random.PRNGKey(seed)
key, key_model, key_env, key_eval = jax.random.split(key, 4)
core_env = environment_fn(
action_repeat=action_repeat,
batch_size=num_envs // local_devices_to_use // process_count,
episode_length=episode_length)
first_state, step_fn = env.wrap(core_env, key_env)
core_eval_env = environment_fn(
action_repeat=action_repeat,
batch_size=num_eval_envs,
episode_length=episode_length)
eval_first_state, eval_step_fn = env.wrap(core_eval_env, key_eval)
_, obs_size = eval_first_state.core.obs.shape
policy_model = make_ars_model(core_env.action_size, obs_size)
policy_head = get_policy_head(head_type)
normalizer_params, obs_normalizer_update_fn, obs_normalizer_apply_fn = (
normalization.create_observation_normalizer(
obs_size, normalize_observations, num_leading_batch_dims=1,
apply_clipping=False))
policy_params = policy_model.init(key_model)
def do_one_step_eval(carry, unused_target_t):
state, policy_params, normalizer_params = carry
obs = obs_normalizer_apply_fn(normalizer_params, state.core.obs)
actions = policy_head(policy_model.apply(policy_params, obs))
nstate = eval_step_fn(state, actions)
return (nstate, policy_params, normalizer_params), ()
@jax.jit
def run_eval(state, policy_params, normalizer_params) -> env.EnvState:
(state, _, _), _ = jax.lax.scan(
do_one_step_eval, (state, policy_params, normalizer_params), (),
length=episode_length // action_repeat)
return state
@jax.vmap
def training_inference(params, obs):
return policy_model.apply(params, obs)
def do_one_step(carry, unused_target_t):
state, policy_params, cumulative_reward, normalizer_params = carry
obs = obs_normalizer_apply_fn(normalizer_params, state.core.obs)
actions = policy_head(training_inference(policy_params, obs))
nstate = step_fn(state, actions)
cumulative_reward = cumulative_reward + nstate.core.reward - reward_shift
return (nstate, policy_params, cumulative_reward,
normalizer_params), state.core.obs
def run_ars_eval(state, params, normalizer_params):
cumulative_reward = jnp.zeros(state.core.obs.shape[0])
(state, _, cumulative_reward, _), obs = jax.lax.scan(
do_one_step, (state, params, cumulative_reward, normalizer_params),
(), length=episode_length // action_repeat)
return cumulative_reward, obs, state
def add_noise(params, key):
noise = jax.random.normal(key, shape=params.shape, dtype=params.dtype)
params_with_noise = params + noise * exploration_noise_std
anit_params_with_noise = params - noise * exploration_noise_std
return params_with_noise, anit_params_with_noise, noise
def ars_one_epoch(carry, unused_t):
state, training_state = carry
params = jnp.repeat(jnp.expand_dims(training_state.policy_params, axis=0),
num_envs // 2, axis=0)
key, key_petr = jax.random.split(training_state.key)
# generate perturbations
params_with_noise, params_with_anti_noise, noise = add_noise(
params, key_petr)
pparams = jnp.concatenate([params_with_noise, params_with_anti_noise],
axis=0)
eval_scores, obs, state = run_ars_eval(
state, pparams, training_state.normalizer_params)
obs = jnp.reshape(obs, [-1] + list(obs.shape[2:]))
normalizer_params = obs_normalizer_update_fn(
training_state.normalizer_params, obs)
reward_plus, reward_minus = jnp.split(eval_scores, 2, axis=0)
reward_max = jnp.maximum(reward_plus, reward_minus)
reward_rank = jnp.argsort(jnp.argsort(-reward_max))
reward_weight = jnp.where(reward_rank < top_directions, 1, 0)
reward_weight_double = jnp.concatenate([reward_weight, reward_weight],
axis=0)
reward_std = jnp.std(eval_scores, where=reward_weight_double)
noise = jnp.sum(jnp.transpose(jnp.transpose(noise) * reward_weight *
(reward_plus - reward_minus)), axis=0)
policy_params = (training_state.policy_params +
step_size / (top_directions * reward_std) * noise)
metrics = {
'params_norm': optax.global_norm(policy_params),
'eval_scores_mean': jnp.mean(eval_scores),
'eval_scores_std': jnp.std(eval_scores),
'reward_std': reward_std,
'weights': jnp.mean(reward_weight),
}
return (state,
TrainingState(
key=key,
normalizer_params=normalizer_params,
policy_params=policy_params)), metrics
epochs_per_step = (epochs + log_frequency - 1) // log_frequency
@jax.jit
def run_ars(state, training_state):
(state, training_state), metrics = jax.lax.scan(
ars_one_epoch, (state, training_state), (), length=epochs_per_step)
return state, training_state, jax.tree_map(jnp.mean, metrics)
training_state = TrainingState(key=key,
normalizer_params=normalizer_params,
policy_params=policy_params)
training_walltime = 0
eval_walltime = 0
sps = 0
eval_sps = 0
metrics = {}
summary = {}
state = first_state
for it in range(log_frequency + 1):
logging.info('starting iteration %s %s', it, time.time() - xt)
t = time.time()
if process_id == 0:
eval_state = run_eval(eval_first_state,
training_state.policy_params,
training_state.normalizer_params)
eval_state.completed_episodes.block_until_ready()
eval_walltime += time.time() - t
eval_sps = (
episode_length * eval_first_state.core.reward.shape[0] /
(time.time() - t))
avg_episode_length = (
eval_state.completed_episodes_steps / eval_state.completed_episodes)
metrics = dict(
dict({
f'eval/episode_{name}': value / eval_state.completed_episodes
for name, value in eval_state.completed_episodes_metrics.items()
}),
**dict({
f'train/{name}': value for name, value in summary.items()
}),
**dict({
'eval/completed_episodes': eval_state.completed_episodes,
'eval/episode_length': avg_episode_length,
'speed/sps': sps,
'speed/eval_sps': eval_sps,
'speed/training_walltime': training_walltime,
'speed/eval_walltime': eval_walltime,
'speed/timestamp': training_walltime,
}))
logging.info('Step %s metrics %s',
int(training_state.normalizer_params[0]) * action_repeat,
metrics)
if progress_fn:
progress_fn(int(training_state.normalizer_params[0]) * action_repeat,
metrics)
if it == log_frequency:
break
t = time.time()
# optimization
state, training_state, summary = run_ars(state, training_state)
jax.tree_map(lambda x: x.block_until_ready(), training_state)
sps = episode_length * num_envs * epochs_per_step / (
time.time() - t)
training_walltime += time.time() - t
_, inference = make_params_and_inference_fn(core_env.observation_size,
core_env.action_size,
normalize_observations,
head_type)
params = training_state.normalizer_params, training_state.policy_params
return (inference, params, metrics)
def make_params_and_inference_fn(observation_size, action_size,
normalize_observations, head_type=None):
"""Creates params and inference function for the ES agent."""
obs_normalizer_params, obs_normalizer_apply_fn = normalization.make_data_and_apply_fn(
observation_size, normalize_observations, apply_clipping=False)
policy_head = get_policy_head(head_type)
policy_model = make_ars_model(action_size, observation_size)
def inference_fn(params, obs, unused_rng):
normalizer_params, policy_params = params
obs = obs_normalizer_apply_fn(normalizer_params, obs)
action = policy_head(policy_model.apply(policy_params, obs))
return action
params = (obs_normalizer_params, policy_model.init(jax.random.PRNGKey(0)))
return params, inference_fn | pt | 0.188277 | 1.752689 | 2 |
docs/api/conf.py | kagemeka/selext | 1 | 13675 | import os
import sys
def find_docs_root() -> str:
filepath = os.path.abspath(__file__)
path_chunks = filepath.split(os.path.sep)
while path_chunks[-1] != "docs":
path_chunks.pop()
return os.path.sep.join(path_chunks)
sys.path.append(find_docs_root())
from _rtd_conf import *
from _sphinx_conf import *
| import os
import sys
def find_docs_root() -> str:
filepath = os.path.abspath(__file__)
path_chunks = filepath.split(os.path.sep)
while path_chunks[-1] != "docs":
path_chunks.pop()
return os.path.sep.join(path_chunks)
sys.path.append(find_docs_root())
from _rtd_conf import *
from _sphinx_conf import *
| none | 1 | 2.333118 | 2 |
src/graph/cli/server.py | clayman-micro/graph | 0 | 13676 | import socket
import click
import uvicorn # type: ignore
def get_address(default: str = "127.0.0.1") -> str:
try:
ip_address = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(("8.8.8.8", 1))
ip_address = s.getsockname()[0]
except socket.gaierror:
ip_address = default
finally:
s.close()
return ip_address
@click.group()
@click.pass_context
def server(ctx):
pass
@server.command()
@click.option("--host", default=None, help="Specify application host")
@click.option("--port", default=5000, help="Specify application port")
@click.pass_context
def run(ctx, host, port):
try:
port = int(port)
if port < 1024 and port > 65535:
raise RuntimeError("Port should be from 1024 to 65535")
except ValueError:
raise RuntimeError("Port should be numeric")
if not host:
host = "127.0.0.1"
address = "127.0.0.1"
else:
address = get_address()
uvicorn.run(
"graph:init",
host=address,
port=port,
access_log=False,
log_level="info",
log_config=None,
loop="uvloop",
factory=True,
)
| import socket
import click
import uvicorn # type: ignore
def get_address(default: str = "127.0.0.1") -> str:
try:
ip_address = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(("8.8.8.8", 1))
ip_address = s.getsockname()[0]
except socket.gaierror:
ip_address = default
finally:
s.close()
return ip_address
@click.group()
@click.pass_context
def server(ctx):
pass
@server.command()
@click.option("--host", default=None, help="Specify application host")
@click.option("--port", default=5000, help="Specify application port")
@click.pass_context
def run(ctx, host, port):
try:
port = int(port)
if port < 1024 and port > 65535:
raise RuntimeError("Port should be from 1024 to 65535")
except ValueError:
raise RuntimeError("Port should be numeric")
if not host:
host = "127.0.0.1"
address = "127.0.0.1"
else:
address = get_address()
uvicorn.run(
"graph:init",
host=address,
port=port,
access_log=False,
log_level="info",
log_config=None,
loop="uvloop",
factory=True,
)
| es | 0.23329 | 2.532118 | 3 |
settings/libs.py | skylifewww/pangolinreact | 0 | 13677 | # grappelli
GRAPPELLI_ADMIN_TITLE = 'pangolin - Administration panel'
# rest framework
# REST_FRAMEWORK = {
# 'PAGINATE_BY_PARAM': 'limit',
# 'SEARCH_PARAM': 'q'
# }
| # grappelli
GRAPPELLI_ADMIN_TITLE = 'pangolin - Administration panel'
# rest framework
# REST_FRAMEWORK = {
# 'PAGINATE_BY_PARAM': 'limit',
# 'SEARCH_PARAM': 'q'
# }
| en | 0.295275 | 1.032193 | 1 |
kubespawner/clients.py | moskiGithub/spawner_test | 0 | 13678 | <reponame>moskiGithub/spawner_test
"""Shared clients for kubernetes
avoids creating multiple kubernetes client objects,
each of which spawns an unused max-size thread pool
"""
from unittest.mock import Mock
import weakref
import kubernetes.client
from kubernetes.client import api_client
# FIXME: remove when instantiating a kubernetes client
# doesn't create N-CPUs threads unconditionally.
# monkeypatch threadpool in kubernetes api_client
# to avoid instantiating ThreadPools.
# This is known to work for kubernetes-4.0
# and may need updating with later kubernetes clients
_dummy_pool = Mock()
api_client.ThreadPool = lambda *args, **kwargs: _dummy_pool
_client_cache = {}
def shared_client(ClientType, *args, **kwargs):
"""Return a single shared kubernetes client instance
A weak reference to the instance is cached,
so that concurrent calls to shared_client
will all return the same instance until
all references to the client are cleared.
"""
kwarg_key = tuple((key, kwargs[key]) for key in sorted(kwargs))
cache_key = (ClientType, args, kwarg_key)
client = None
if cache_key in _client_cache:
# resolve cached weakref
# client can still be None after this!
client = _client_cache[cache_key]()
if client is None:
Client = getattr(kubernetes.client, ClientType)
client = Client(*args, **kwargs)
# cache weakref so that clients can be garbage collected
_client_cache[cache_key] = weakref.ref(client)
return client
| """Shared clients for kubernetes
avoids creating multiple kubernetes client objects,
each of which spawns an unused max-size thread pool
"""
from unittest.mock import Mock
import weakref
import kubernetes.client
from kubernetes.client import api_client
# FIXME: remove when instantiating a kubernetes client
# doesn't create N-CPUs threads unconditionally.
# monkeypatch threadpool in kubernetes api_client
# to avoid instantiating ThreadPools.
# This is known to work for kubernetes-4.0
# and may need updating with later kubernetes clients
_dummy_pool = Mock()
api_client.ThreadPool = lambda *args, **kwargs: _dummy_pool
_client_cache = {}
def shared_client(ClientType, *args, **kwargs):
"""Return a single shared kubernetes client instance
A weak reference to the instance is cached,
so that concurrent calls to shared_client
will all return the same instance until
all references to the client are cleared.
"""
kwarg_key = tuple((key, kwargs[key]) for key in sorted(kwargs))
cache_key = (ClientType, args, kwarg_key)
client = None
if cache_key in _client_cache:
# resolve cached weakref
# client can still be None after this!
client = _client_cache[cache_key]()
if client is None:
Client = getattr(kubernetes.client, ClientType)
client = Client(*args, **kwargs)
# cache weakref so that clients can be garbage collected
_client_cache[cache_key] = weakref.ref(client)
return client | it | 0.191892 | 2.369999 | 2 |
src/syncgitlab2msproject/gitlab_issues.py | lcv3/SyncGitlab2MSProject | 0 | 13679 | <reponame>lcv3/SyncGitlab2MSProject<gh_stars>0
import dateutil.parser
from datetime import datetime
from functools import lru_cache
from gitlab import Gitlab
from gitlab.v4.objects import Project
from logging import getLogger
from typing import Dict, List, Optional, Union
from .custom_types import GitlabIssue, GitlabUserDict
from .exceptions import MovedIssueNotDefined
from .funcions import warn_once
logger = getLogger(f"{__package__}.{__name__}")
def get_user_identifier(user_dict: GitlabUserDict) -> str:
"""
Return the user identifier
keep as separate function to allow easier changes later if required
"""
return str(user_dict["name"])
class Issue:
"""
Wrapper class around Group/Project Issues
"""
# The issue object itself is not dynamic only the contained obj is!
__slots__ = [
"obj",
"_moved_reference",
"_fixed_group_id",
]
def __init__(self, obj: GitlabIssue, fixed_group_id: Optional[int] = None):
"""
:param obj:
:param fixed_group_id: Do not extract the group_id from the
Gitlab issue but assume it is fixed
"""
self._fixed_group_id = fixed_group_id
self.obj: GitlabIssue = obj
self._moved_reference: Optional[Issue] = None
def __getattr__(self, item: str):
"""Default to get the values from the original objext"""
return getattr(self.obj, item)
@property
def moved_reference(self) -> Optional["Issue"]:
"""
get the reference to the moved issue if defined
:exceptions MovedIssueNotDefined
"""
if self.moved_to_id is None:
return None
else:
if self._moved_reference is None:
raise MovedIssueNotDefined(
"The issue is marked as moved but was not referenced "
"in the loaded issues, so tracking is not possible."
)
else:
return self._moved_reference
@moved_reference.setter
def moved_reference(self, value: "Issue"):
if not isinstance(value, Issue):
raise ValueError("Can only set an Issue object as moved reference!")
self._moved_reference = value
def __str__(self):
return f"'{self.title}' (ID: {self.id})"
# **************************************************************
# *** Define some default properties to allow static typing ***
# **************************************************************
@property
def id(self) -> int:
"""
The id of an issue - it seems to be unique within an installation
"""
return self.obj.id
@property
def iid(self) -> int:
return self.obj.iid
@property
def project_id(self) -> int:
return self.obj.project_id
@property
def group_id(self) -> Optional[int]:
"""
Return the group id, if negative a user id is given
The group ID is either taken from the issue itself or if a project is given
the issue is fixed (see #7)
If group_id isn't fixed and can't be extracted, only give a warning and do
not fail, as it isn't required to have the sync working. Only the
issue id or weblink is used to find the related issue.
See `sync.py`
* `get_issue_ref_from_task`
* `IssueFinder`
"""
if self._fixed_group_id is not None:
return self._fixed_group_id
try:
return self.obj.group_id
except AttributeError:
warn_once(
logger,
"Could not extract group_id from Issue. "
"This is not required for syncing, so I will continue.",
)
return None
@property
def has_tasks(self) -> bool:
return self.obj.has_tasks
@property
def is_closed(self) -> bool:
return str(self.obj.state).lower().strip().startswith("closed")
@property
def is_open(self):
return not self.is_closed
@property
def percentage_tasks_done(self) -> int:
"""
Percentage of tasks done, 0 if no tasks are defined and not closed.
By definition always 100 if issue is closed (and not moved)
:exceptions MovedIssueNotDefined
"""
if self.is_closed:
if self.moved_to_id is not None:
# Needed for
assert self._moved_reference is not None
return self._moved_reference.percentage_tasks_done
return 100
if not self.has_tasks:
return 0
task = self.task_completion_status
return round(task["completed_count"] / task["count"] * 100)
@property
def moved_to_id(self) -> Optional[int]:
return self.obj.moved_to_id
@property
def title(self) -> str:
return self.obj.title
@property
def description(self) -> str:
return self.obj.description
@property
def closed_at(self) -> Optional[datetime]:
if (val := self.obj.closed_at) is not None:
return dateutil.parser.parse(val)
return None
@property
def created_at(self) -> Optional[datetime]:
if (val := self.obj.created_at) is not None:
return dateutil.parser.parse(val)
return None
@property
def due_date(self) -> Optional[datetime]:
if (val := self.obj.due_date) is not None:
return dateutil.parser.parse(val)
return None
@property
def closed_by(self) -> Optional[str]:
if (val := self.obj.closed_by) is not None:
return get_user_identifier(val)
return None
def _get_from_time_stats(self, key) -> Optional[float]:
"""
Somehow the python-gitlab API seems to be not 100% fixed,
see issue #9
:param key: key to query from time stats
:return: the value if existing or none
"""
query_dict: Dict[str, float]
if callable(self.obj.time_stats):
query_dict = self.obj.time_stats()
else:
query_dict = self.obj.time_stats
return query_dict.get(key, None)
@property
def time_estimated(self) -> Optional[float]:
"""
Time estimated in minutes
"""
if (time_estimate := self._get_from_time_stats("time_estimate")) is not None:
return time_estimate / 60
else:
logger.warning("Time Estimate is None")
return None
@property
def time_spent_total(self) -> Optional[float]:
"""
Total time spent in minutes
"""
if (time_spend := self._get_from_time_stats("total_time_spent")) is not None:
return time_spend / 60
else:
logger.warning("Time spend is None")
return None
@property
def assignees(self) -> List[str]:
"""
list of Gitlab Assignees.
Note in the community edition only one assignee is possible
"""
return [get_user_identifier(user) for user in self.obj.assignees]
@property
def labels(self) -> List[str]:
"""
list of labels
"""
return self.obj.labels
@property
def full_ref(self) -> str:
"""
give the full reference through which the issue can be accessed
"""
return self.obj.attributes['references']['full']
@property
def web_url(self) -> str:
"""
give the url from which the issue can be accessed
"""
return self.obj.web_url
@lru_cache(10)
def get_group_id_from_gitlab_project(project: Project) -> Optional[int]:
"""
Get user id form gitlab project.
If the namespace of the project is a user, a negativ
value is returned
:param project:
"""
try:
namespace: Dict[str, Union[int, str]] = project.namespace
except AttributeError:
logger.warning(
f"Could not extract name space for project '{project.get_id()}' - "
"This error will be ignored."
)
return None
if str(namespace["kind"]).lower() == "user":
return -int(namespace["id"])
else:
return int(namespace["id"])
def get_gitlab_class(server: str, personal_token: Optional[str] = None) -> Gitlab:
if personal_token is None:
return Gitlab(server, ssl_verify=False)
else:
return Gitlab(server, private_token=personal_token, ssl_verify=False)
def get_group_issues(gitlab: Gitlab, group_id: int) -> List[Issue]:
group = gitlab.groups.get(group_id, lazy=True)
return [Issue(issue) for issue in group.issues.list(all=True)]
def get_project_issues(gitlab: Gitlab, project_id: int) -> List[Issue]:
project = gitlab.projects.get(project_id)
return [
Issue(issue, fixed_group_id=get_group_id_from_gitlab_project(project))
for issue in project.issues.list(all=True)
]
| import dateutil.parser
from datetime import datetime
from functools import lru_cache
from gitlab import Gitlab
from gitlab.v4.objects import Project
from logging import getLogger
from typing import Dict, List, Optional, Union
from .custom_types import GitlabIssue, GitlabUserDict
from .exceptions import MovedIssueNotDefined
from .funcions import warn_once
logger = getLogger(f"{__package__}.{__name__}")
def get_user_identifier(user_dict: GitlabUserDict) -> str:
"""
Return the user identifier
keep as separate function to allow easier changes later if required
"""
return str(user_dict["name"])
class Issue:
"""
Wrapper class around Group/Project Issues
"""
# The issue object itself is not dynamic only the contained obj is!
__slots__ = [
"obj",
"_moved_reference",
"_fixed_group_id",
]
def __init__(self, obj: GitlabIssue, fixed_group_id: Optional[int] = None):
"""
:param obj:
:param fixed_group_id: Do not extract the group_id from the
Gitlab issue but assume it is fixed
"""
self._fixed_group_id = fixed_group_id
self.obj: GitlabIssue = obj
self._moved_reference: Optional[Issue] = None
def __getattr__(self, item: str):
"""Default to get the values from the original objext"""
return getattr(self.obj, item)
@property
def moved_reference(self) -> Optional["Issue"]:
"""
get the reference to the moved issue if defined
:exceptions MovedIssueNotDefined
"""
if self.moved_to_id is None:
return None
else:
if self._moved_reference is None:
raise MovedIssueNotDefined(
"The issue is marked as moved but was not referenced "
"in the loaded issues, so tracking is not possible."
)
else:
return self._moved_reference
@moved_reference.setter
def moved_reference(self, value: "Issue"):
if not isinstance(value, Issue):
raise ValueError("Can only set an Issue object as moved reference!")
self._moved_reference = value
def __str__(self):
return f"'{self.title}' (ID: {self.id})"
# **************************************************************
# *** Define some default properties to allow static typing ***
# **************************************************************
@property
def id(self) -> int:
"""
The id of an issue - it seems to be unique within an installation
"""
return self.obj.id
@property
def iid(self) -> int:
return self.obj.iid
@property
def project_id(self) -> int:
return self.obj.project_id
@property
def group_id(self) -> Optional[int]:
"""
Return the group id, if negative a user id is given
The group ID is either taken from the issue itself or if a project is given
the issue is fixed (see #7)
If group_id isn't fixed and can't be extracted, only give a warning and do
not fail, as it isn't required to have the sync working. Only the
issue id or weblink is used to find the related issue.
See `sync.py`
* `get_issue_ref_from_task`
* `IssueFinder`
"""
if self._fixed_group_id is not None:
return self._fixed_group_id
try:
return self.obj.group_id
except AttributeError:
warn_once(
logger,
"Could not extract group_id from Issue. "
"This is not required for syncing, so I will continue.",
)
return None
@property
def has_tasks(self) -> bool:
return self.obj.has_tasks
@property
def is_closed(self) -> bool:
return str(self.obj.state).lower().strip().startswith("closed")
@property
def is_open(self):
return not self.is_closed
@property
def percentage_tasks_done(self) -> int:
"""
Percentage of tasks done, 0 if no tasks are defined and not closed.
By definition always 100 if issue is closed (and not moved)
:exceptions MovedIssueNotDefined
"""
if self.is_closed:
if self.moved_to_id is not None:
# Needed for
assert self._moved_reference is not None
return self._moved_reference.percentage_tasks_done
return 100
if not self.has_tasks:
return 0
task = self.task_completion_status
return round(task["completed_count"] / task["count"] * 100)
@property
def moved_to_id(self) -> Optional[int]:
return self.obj.moved_to_id
@property
def title(self) -> str:
return self.obj.title
@property
def description(self) -> str:
return self.obj.description
@property
def closed_at(self) -> Optional[datetime]:
if (val := self.obj.closed_at) is not None:
return dateutil.parser.parse(val)
return None
@property
def created_at(self) -> Optional[datetime]:
if (val := self.obj.created_at) is not None:
return dateutil.parser.parse(val)
return None
@property
def due_date(self) -> Optional[datetime]:
if (val := self.obj.due_date) is not None:
return dateutil.parser.parse(val)
return None
@property
def closed_by(self) -> Optional[str]:
if (val := self.obj.closed_by) is not None:
return get_user_identifier(val)
return None
def _get_from_time_stats(self, key) -> Optional[float]:
"""
Somehow the python-gitlab API seems to be not 100% fixed,
see issue #9
:param key: key to query from time stats
:return: the value if existing or none
"""
query_dict: Dict[str, float]
if callable(self.obj.time_stats):
query_dict = self.obj.time_stats()
else:
query_dict = self.obj.time_stats
return query_dict.get(key, None)
@property
def time_estimated(self) -> Optional[float]:
"""
Time estimated in minutes
"""
if (time_estimate := self._get_from_time_stats("time_estimate")) is not None:
return time_estimate / 60
else:
logger.warning("Time Estimate is None")
return None
@property
def time_spent_total(self) -> Optional[float]:
"""
Total time spent in minutes
"""
if (time_spend := self._get_from_time_stats("total_time_spent")) is not None:
return time_spend / 60
else:
logger.warning("Time spend is None")
return None
@property
def assignees(self) -> List[str]:
"""
list of Gitlab Assignees.
Note in the community edition only one assignee is possible
"""
return [get_user_identifier(user) for user in self.obj.assignees]
@property
def labels(self) -> List[str]:
"""
list of labels
"""
return self.obj.labels
@property
def full_ref(self) -> str:
"""
give the full reference through which the issue can be accessed
"""
return self.obj.attributes['references']['full']
@property
def web_url(self) -> str:
"""
give the url from which the issue can be accessed
"""
return self.obj.web_url
@lru_cache(10)
def get_group_id_from_gitlab_project(project: Project) -> Optional[int]:
"""
Get user id form gitlab project.
If the namespace of the project is a user, a negativ
value is returned
:param project:
"""
try:
namespace: Dict[str, Union[int, str]] = project.namespace
except AttributeError:
logger.warning(
f"Could not extract name space for project '{project.get_id()}' - "
"This error will be ignored."
)
return None
if str(namespace["kind"]).lower() == "user":
return -int(namespace["id"])
else:
return int(namespace["id"])
def get_gitlab_class(server: str, personal_token: Optional[str] = None) -> Gitlab:
if personal_token is None:
return Gitlab(server, ssl_verify=False)
else:
return Gitlab(server, private_token=personal_token, ssl_verify=False)
def get_group_issues(gitlab: Gitlab, group_id: int) -> List[Issue]:
group = gitlab.groups.get(group_id, lazy=True)
return [Issue(issue) for issue in group.issues.list(all=True)]
def get_project_issues(gitlab: Gitlab, project_id: int) -> List[Issue]:
project = gitlab.projects.get(project_id)
return [
Issue(issue, fixed_group_id=get_group_id_from_gitlab_project(project))
for issue in project.issues.list(all=True)
] | pt | 0.14519 | 2.19788 | 2 |
pytest/track_test.py | Sergej91/TheiaSfM | 0 | 13680 | import pytheia as pt
import os
import numpy as np
def test_track_set_descriptor_read_write():
recon = pt.sfm.Reconstruction()
view_id1 = recon.AddView("0",0.0)
m_view1 = recon.MutableView(view_id1)
m_view1.IsEstimated = True
view_id2 = recon.AddView("1",1.0)
m_view2 = recon.MutableView(view_id2)
m_view2.IsEstimated = True
t_id = recon.AddTrack()
m_track = recon.MutableTrack(t_id)
m_track.AddView(view_id1)
m_track.AddView(view_id2)
m_track.IsEstimated = True
desc = np.asarray([100,200,300,400])
m_track.SetReferenceDescriptor(desc)
assert (m_track.ReferenceDescriptor() == desc).all()
# read write
pt.io.WriteReconstruction(recon,"test")
recon_loaded = pt.io.ReadReconstruction("test")[1]
s_track = recon_loaded.Track(t_id)
assert (s_track.ReferenceDescriptor() == desc).all()
os.remove("test")
if __name__ == "__main__":
test_track_set_descriptor_read_write() | import pytheia as pt
import os
import numpy as np
def test_track_set_descriptor_read_write():
recon = pt.sfm.Reconstruction()
view_id1 = recon.AddView("0",0.0)
m_view1 = recon.MutableView(view_id1)
m_view1.IsEstimated = True
view_id2 = recon.AddView("1",1.0)
m_view2 = recon.MutableView(view_id2)
m_view2.IsEstimated = True
t_id = recon.AddTrack()
m_track = recon.MutableTrack(t_id)
m_track.AddView(view_id1)
m_track.AddView(view_id2)
m_track.IsEstimated = True
desc = np.asarray([100,200,300,400])
m_track.SetReferenceDescriptor(desc)
assert (m_track.ReferenceDescriptor() == desc).all()
# read write
pt.io.WriteReconstruction(recon,"test")
recon_loaded = pt.io.ReadReconstruction("test")[1]
s_track = recon_loaded.Track(t_id)
assert (s_track.ReferenceDescriptor() == desc).all()
os.remove("test")
if __name__ == "__main__":
test_track_set_descriptor_read_write() | it | 0.277966 | 2.150352 | 2 |
jayk/util.py | alekratz/jayk | 1 | 13681 | <reponame>alekratz/jayk
"""Common utilities used through this codebase."""
import logging
import logging.config
class LogMixin:
"""
A logging mixin class, which provides methods for writing log messages.
"""
def __init__(self, logger_name: str):
"""
Creates the logger with the specified name.
:param logger_name: the name for this logger. When in doubt, use MyType.__name__.
"""
self.__logger = logging.getLogger(logger_name)
def critical(self, message, *args, **kwargs):
"""
Passes a critical logging message on to the internal logger.
"""
self.__logger.critical(message, *args, **kwargs)
def error(self, message, *args, **kwargs):
"""
Passes an error logging message on to the internal logger.
"""
self.__logger.error(message, *args, **kwargs)
def warning(self, message, *args, **kwargs):
"""
Passes an warning logging message on to the internal logger.
"""
self.__logger.warning(message, *args, **kwargs)
def info(self, message, *args, **kwargs):
"""
Passes an info logging message on to the internal logger.
"""
self.__logger.info(message, *args, **kwargs)
def debug(self, message, *args, **kwargs):
"""
Passes a debug logging message on to the internal logger.
"""
self.__logger.debug(message, *args, **kwargs)
def exception(self, message, *args, **kwargs):
"""
Passes an exception logging message on to the internal logger. This should only be called
when in the "except" clause of an exception handler.
"""
self.__logger.exception(message, *args, **kwargs)
| """Common utilities used through this codebase."""
import logging
import logging.config
class LogMixin:
"""
A logging mixin class, which provides methods for writing log messages.
"""
def __init__(self, logger_name: str):
"""
Creates the logger with the specified name.
:param logger_name: the name for this logger. When in doubt, use MyType.__name__.
"""
self.__logger = logging.getLogger(logger_name)
def critical(self, message, *args, **kwargs):
"""
Passes a critical logging message on to the internal logger.
"""
self.__logger.critical(message, *args, **kwargs)
def error(self, message, *args, **kwargs):
"""
Passes an error logging message on to the internal logger.
"""
self.__logger.error(message, *args, **kwargs)
def warning(self, message, *args, **kwargs):
"""
Passes an warning logging message on to the internal logger.
"""
self.__logger.warning(message, *args, **kwargs)
def info(self, message, *args, **kwargs):
"""
Passes an info logging message on to the internal logger.
"""
self.__logger.info(message, *args, **kwargs)
def debug(self, message, *args, **kwargs):
"""
Passes a debug logging message on to the internal logger.
"""
self.__logger.debug(message, *args, **kwargs)
def exception(self, message, *args, **kwargs):
"""
Passes an exception logging message on to the internal logger. This should only be called
when in the "except" clause of an exception handler.
"""
self.__logger.exception(message, *args, **kwargs) | pt | 0.221109 | 3.075706 | 3 |
experimentation/trap/statistics_calculator.py | GruppoPBDMNG-10/AIExam | 0 | 13682 | <filename>experimentation/trap/statistics_calculator.py
import experimentation.statistics.statistics as statistics
intersection = statistics.find_matches_from_file('result/experimentation/hmm/anomalous.json', 'result/experimentation/rnn/anomalous.json')
print(len(intersection)) | <filename>experimentation/trap/statistics_calculator.py
import experimentation.statistics.statistics as statistics
intersection = statistics.find_matches_from_file('result/experimentation/hmm/anomalous.json', 'result/experimentation/rnn/anomalous.json')
print(len(intersection)) | none | 1 | 2.317385 | 2 |
objects/CSCG/_3d/exact_solutions/status/incompressible_Navier_Stokes/Sin_Cos.py | mathischeap/mifem | 1 | 13683 | # -*- coding: utf-8 -*-
"""
@author: <NAME>.
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft, Delft, Netherlands
"""
from numpy import sin, cos, pi
from objects.CSCG._3d.exact_solutions.status.incompressible_Navier_Stokes.base import incompressible_NavierStokes_Base
from objects.CSCG._3d.fields.vector.main import _3dCSCG_VectorField
# noinspection PyAbstractClass
class SinCosRebholz_Conservation(incompressible_NavierStokes_Base):
"""
The sin cos test case for the conservation, see Section 5.2 of paper:
[An Energy- and helicity-conserving finite element scheme for the Navier-Stokes
equations, <NAME>, 2007]
"""
def __init__(self, es):
super(SinCosRebholz_Conservation, self).__init__(es, 0)
@property
def valid_time(self):
return 'valid_only_at_its_first_instant'
def u(self, t, x, y, z): return cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return -2 * pi * sin(2 * pi * z)
def v(self, t, x, y, z): return sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * z)
def w(self, t, x, y, z): return sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
def fx(self, t, x, y, z): return 0 * x # can not name it by _fx_
def fy(self, t, x, y, z): return 0 * x # can not name it by _fy_
def fz(self, t, x, y, z): return 0 * x # can not name it by _fz_
@property
def body_force(self):
"""This makes body force valid at all time instants."""
if self._bodyForce_ is None:
self._bodyForce_ = _3dCSCG_VectorField(self.mesh, (self.fx, self.fy, self.fz))
return self._bodyForce_
class SinCosRebholz_Dissipation(incompressible_NavierStokes_Base):
"""
The sin cos test case for the conservation, see Section 5.3 of paper:
[An Energy- and helicity-conserving finite element scheme for the Navier-Stokes
equations, <NAME>, 2007]
"""
def __init__(self, es, nu=1):
super(SinCosRebholz_Dissipation, self).__init__(es, nu)
def u(self, t, x, y, z): return (2 - t) * cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return - 2 * pi * (2 - t) * sin(2 * pi * z)
def u_t(self, t, x, y, z): return - cos(2 * pi * z)
def u_xx(self, t, x, y, z): return 0 * x
def u_yy(self, t, x, y, z): return 0 * y
def u_zz(self, t, x, y, z): return -4 * pi ** 2 * (2 - t) * cos(2 * pi * z)
def v(self, t, x, y, z): return (1 + t) * sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * (1 + t) * cos(2 * pi * z)
def v_t(self, t, x, y, z): return sin(2 * pi * z)
def v_xx(self, t, x, y, z): return 0 * x
def v_yy(self, t, x, y, z): return 0 * x
def v_zz(self, t, x, y, z): return - 4 * pi ** 2 * (1 + t) * sin(2 * pi * z)
def w(self, t, x, y, z): return (1 - t) * sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * (1 - t) * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
def w_t(self, t, x, y, z): return - sin(2 * pi * x)
def w_xx(self, t, x, y, z): return - 4 * pi ** 2 * (1 - t) * sin(2 * pi * x)
def w_yy(self, t, x, y, z): return 0 * x
def w_zz(self, t, x, y, z): return 0 * x
def p(self, t, x, y, z): return sin(2 * pi * (x + y + z + t))
def p_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * (x + y + z + t))
def p_y(self, t, x, y, z): return 2 * pi * cos(2 * pi * (x + y + z + t))
def p_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * (x + y + z + t))
class SinCos_Modified_Dissipation(incompressible_NavierStokes_Base):
"""A modified case that the solution along t is not linear."""
def __init__(self, es, nu=1):
super(SinCos_Modified_Dissipation, self).__init__(es, nu)
def u(self, t, x, y, z): return (1 - sin(2*pi*t)) * cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return - 2 * pi * (1 - sin(2*pi*t)) * sin(2 * pi * z)
def u_t(self, t, x, y, z): return - 2*pi*cos(2*pi*t) * cos(2 * pi * z)
def u_xx(self, t, x, y, z): return 0 * x
def u_yy(self, t, x, y, z): return 0 * y
def u_zz(self, t, x, y, z): return -4 * pi ** 2 * (1 - sin(2*pi*t)) * cos(2 * pi * z)
def v(self, t, x, y, z): return (1 + cos(2*pi*t)) * sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * (1 + cos(2*pi*t)) * cos(2 * pi * z)
def v_t(self, t, x, y, z): return -2*pi*sin(2*pi*t) * sin(2 * pi * z)
def v_xx(self, t, x, y, z): return 0 * x
def v_yy(self, t, x, y, z): return 0 * x
def v_zz(self, t, x, y, z): return - 4 * pi ** 2 * (1 + cos(2*pi*t)) * sin(2 * pi * z)
def w(self, t, x, y, z): return (1 - sin(2*pi*t)) * sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * (1 - sin(2*pi*t)) * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
def w_t(self, t, x, y, z): return - 2*pi*cos(2*pi*t) * sin(2 * pi * x)
def w_xx(self, t, x, y, z): return - 4 * pi ** 2 * (1 - sin(2*pi*t)) * sin(2 * pi * x)
def w_yy(self, t, x, y, z): return 0 * x
def w_zz(self, t, x, y, z): return 0 * x
def p(self, t, x, y, z): return sin(2 * pi * (x + y + z + t))
def p_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * (x + y + z + t))
def p_y(self, t, x, y, z): return 2 * pi * cos(2 * pi * (x + y + z + t))
def p_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * (x + y + z + t))
# noinspection PyAbstractClass
class SinCos_Conservation_Conservative_Body_Force(incompressible_NavierStokes_Base):
"""
The sin cos test case for the conservation, see Section 5.2 of paper:
[An Energy- and helicity-conserving finite element scheme for the Navier-Stokes
equations, <NAME>, 2007]
"""
def __init__(self, es):
super(SinCos_Conservation_Conservative_Body_Force, self).__init__(es, 0)
@property
def valid_time(self):
return 'valid_only_at_its_first_instant'
def u(self, t, x, y, z): return cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return -2 * pi * sin(2 * pi * z)
def v(self, t, x, y, z): return sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * z)
def w(self, t, x, y, z): return sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
# varphi(t,x,y,z) = t * sin(2 * pi * x) * sin(2 * pi * y) * sin(2 * pi * z)
def fx(self, t, x, y, z): return 2 * pi * t * cos(2 * pi * x) * sin(2 * pi * y) * sin(2 * pi * z)
def fy(self, t, x, y, z): return 2 * pi * t * sin(2 * pi * x) * cos(2 * pi * y) * sin(2 * pi * z)
def fz(self, t, x, y, z): return 2 * pi * t * sin(2 * pi * x) * sin(2 * pi * y) * cos(2 * pi * z)
@property
def body_force(self):
"""This makes body force valid at all time instants."""
if self._bodyForce_ is None:
self._bodyForce_ = _3dCSCG_VectorField(self.mesh, (self.fx, self.fy, self.fz))
return self._bodyForce_
# noinspection PyAbstractClass
class SinCos_Conservation_Conservative_Body_Force1(incompressible_NavierStokes_Base):
"""
The sin cos test case for the conservation, see Section 5.2 of paper:
[An Energy- and helicity-conserving finite element scheme for the Navier-Stokes
equations, <NAME>, 2007]
"""
def __init__(self, es):
super(SinCos_Conservation_Conservative_Body_Force1, self).__init__(es, 0)
@property
def valid_time(self):
return 'valid_only_at_its_first_instant'
def u(self, t, x, y, z): return cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return -2 * pi * sin(2 * pi * z)
def v(self, t, x, y, z): return sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * z)
def w(self, t, x, y, z): return sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
# varphi(t,x,y,z) = sin(2 * pi * x) * sin(2 * pi * y) * sin(2 * pi * z)
def fx(self, t, x, y, z): return 2 * pi * cos(2 * pi * x) * sin(2 * pi * y) * sin(2 * pi * z)
def fy(self, t, x, y, z): return 2 * pi * sin(2 * pi * x) * cos(2 * pi * y) * sin(2 * pi * z)
def fz(self, t, x, y, z): return 2 * pi * sin(2 * pi * x) * sin(2 * pi * y) * cos(2 * pi * z)
@property
def body_force(self):
"""This makes body force valid at all time instants."""
if self._bodyForce_ is None:
self._bodyForce_ = _3dCSCG_VectorField(self.mesh, (self.fx, self.fy, self.fz))
return self._bodyForce_
# noinspection PyAbstractClass
class SinCos_Conservation_Conservative_Body_Force_POLYNOMIALS(incompressible_NavierStokes_Base):
"""
The sin cos test case for the conservation, see Section 5.2 of paper:
[An Energy- and helicity-conserving finite element scheme for the Navier-Stokes
equations, <NAME>, 2007]
"""
def __init__(self, es):
super(SinCos_Conservation_Conservative_Body_Force_POLYNOMIALS, self).__init__(es, 0)
@property
def valid_time(self):
return 'valid_only_at_its_first_instant'
def u(self, t, x, y, z): return cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return -2 * pi * sin(2 * pi * z)
def v(self, t, x, y, z): return sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * z)
def w(self, t, x, y, z): return sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
# phi(t,x,y,z) = t * (x**3/3 - x**2/2 + y**3/3 - y**2/2 + z**3/3 - z**2/2)
def fx(self, t, x, y, z): return t * x * (x-1)
def fy(self, t, x, y, z): return t * y * (y-1)
def fz(self, t, x, y, z): return t * z * (z-1)
@property
def body_force(self):
"""This makes body force valid at all time instants."""
if self._bodyForce_ is None:
self._bodyForce_ = _3dCSCG_VectorField(self.mesh, (self.fx, self.fy, self.fz))
return self._bodyForce_
# noinspection PyAbstractClass
class SinCos_Conservation_Conservative_Body_Force_CONSTANT(incompressible_NavierStokes_Base):
"""
The sin cos test case for the conservation, see Section 5.2 of paper:
[An Energy- and helicity-conserving finite element scheme for the Navier-Stokes
equations, <NAME>, 2007]
"""
def __init__(self, es):
super(SinCos_Conservation_Conservative_Body_Force_CONSTANT, self).__init__(es, 0)
@property
def valid_time(self):
return 'valid_only_at_its_first_instant'
def u(self, t, x, y, z): return cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return -2 * pi * sin(2 * pi * z)
def v(self, t, x, y, z): return sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * z)
def w(self, t, x, y, z): return sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
# phi(t,x,y,z) = x
def fx(self, t, x, y, z): return 1 + 0 * x * y * z
def fy(self, t, x, y, z): return 0 + 0 * x * y * z
def fz(self, t, x, y, z): return 0 + 0 * x * y * z
@property
def body_force(self):
"""This makes body force valid at all time instants."""
if self._bodyForce_ is None:
self._bodyForce_ = _3dCSCG_VectorField(self.mesh, (self.fx, self.fy, self.fz))
return self._bodyForce_
| # -*- coding: utf-8 -*-
"""
@author: <NAME>.
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft, Delft, Netherlands
"""
from numpy import sin, cos, pi
from objects.CSCG._3d.exact_solutions.status.incompressible_Navier_Stokes.base import incompressible_NavierStokes_Base
from objects.CSCG._3d.fields.vector.main import _3dCSCG_VectorField
# noinspection PyAbstractClass
class SinCosRebholz_Conservation(incompressible_NavierStokes_Base):
"""
The sin cos test case for the conservation, see Section 5.2 of paper:
[An Energy- and helicity-conserving finite element scheme for the Navier-Stokes
equations, <NAME>, 2007]
"""
def __init__(self, es):
super(SinCosRebholz_Conservation, self).__init__(es, 0)
@property
def valid_time(self):
return 'valid_only_at_its_first_instant'
def u(self, t, x, y, z): return cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return -2 * pi * sin(2 * pi * z)
def v(self, t, x, y, z): return sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * z)
def w(self, t, x, y, z): return sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
def fx(self, t, x, y, z): return 0 * x # can not name it by _fx_
def fy(self, t, x, y, z): return 0 * x # can not name it by _fy_
def fz(self, t, x, y, z): return 0 * x # can not name it by _fz_
@property
def body_force(self):
"""This makes body force valid at all time instants."""
if self._bodyForce_ is None:
self._bodyForce_ = _3dCSCG_VectorField(self.mesh, (self.fx, self.fy, self.fz))
return self._bodyForce_
class SinCosRebholz_Dissipation(incompressible_NavierStokes_Base):
"""
The sin cos test case for the conservation, see Section 5.3 of paper:
[An Energy- and helicity-conserving finite element scheme for the Navier-Stokes
equations, <NAME>, 2007]
"""
def __init__(self, es, nu=1):
super(SinCosRebholz_Dissipation, self).__init__(es, nu)
def u(self, t, x, y, z): return (2 - t) * cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return - 2 * pi * (2 - t) * sin(2 * pi * z)
def u_t(self, t, x, y, z): return - cos(2 * pi * z)
def u_xx(self, t, x, y, z): return 0 * x
def u_yy(self, t, x, y, z): return 0 * y
def u_zz(self, t, x, y, z): return -4 * pi ** 2 * (2 - t) * cos(2 * pi * z)
def v(self, t, x, y, z): return (1 + t) * sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * (1 + t) * cos(2 * pi * z)
def v_t(self, t, x, y, z): return sin(2 * pi * z)
def v_xx(self, t, x, y, z): return 0 * x
def v_yy(self, t, x, y, z): return 0 * x
def v_zz(self, t, x, y, z): return - 4 * pi ** 2 * (1 + t) * sin(2 * pi * z)
def w(self, t, x, y, z): return (1 - t) * sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * (1 - t) * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
def w_t(self, t, x, y, z): return - sin(2 * pi * x)
def w_xx(self, t, x, y, z): return - 4 * pi ** 2 * (1 - t) * sin(2 * pi * x)
def w_yy(self, t, x, y, z): return 0 * x
def w_zz(self, t, x, y, z): return 0 * x
def p(self, t, x, y, z): return sin(2 * pi * (x + y + z + t))
def p_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * (x + y + z + t))
def p_y(self, t, x, y, z): return 2 * pi * cos(2 * pi * (x + y + z + t))
def p_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * (x + y + z + t))
class SinCos_Modified_Dissipation(incompressible_NavierStokes_Base):
"""A modified case that the solution along t is not linear."""
def __init__(self, es, nu=1):
super(SinCos_Modified_Dissipation, self).__init__(es, nu)
def u(self, t, x, y, z): return (1 - sin(2*pi*t)) * cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return - 2 * pi * (1 - sin(2*pi*t)) * sin(2 * pi * z)
def u_t(self, t, x, y, z): return - 2*pi*cos(2*pi*t) * cos(2 * pi * z)
def u_xx(self, t, x, y, z): return 0 * x
def u_yy(self, t, x, y, z): return 0 * y
def u_zz(self, t, x, y, z): return -4 * pi ** 2 * (1 - sin(2*pi*t)) * cos(2 * pi * z)
def v(self, t, x, y, z): return (1 + cos(2*pi*t)) * sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * (1 + cos(2*pi*t)) * cos(2 * pi * z)
def v_t(self, t, x, y, z): return -2*pi*sin(2*pi*t) * sin(2 * pi * z)
def v_xx(self, t, x, y, z): return 0 * x
def v_yy(self, t, x, y, z): return 0 * x
def v_zz(self, t, x, y, z): return - 4 * pi ** 2 * (1 + cos(2*pi*t)) * sin(2 * pi * z)
def w(self, t, x, y, z): return (1 - sin(2*pi*t)) * sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * (1 - sin(2*pi*t)) * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
def w_t(self, t, x, y, z): return - 2*pi*cos(2*pi*t) * sin(2 * pi * x)
def w_xx(self, t, x, y, z): return - 4 * pi ** 2 * (1 - sin(2*pi*t)) * sin(2 * pi * x)
def w_yy(self, t, x, y, z): return 0 * x
def w_zz(self, t, x, y, z): return 0 * x
def p(self, t, x, y, z): return sin(2 * pi * (x + y + z + t))
def p_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * (x + y + z + t))
def p_y(self, t, x, y, z): return 2 * pi * cos(2 * pi * (x + y + z + t))
def p_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * (x + y + z + t))
# noinspection PyAbstractClass
class SinCos_Conservation_Conservative_Body_Force(incompressible_NavierStokes_Base):
"""
The sin cos test case for the conservation, see Section 5.2 of paper:
[An Energy- and helicity-conserving finite element scheme for the Navier-Stokes
equations, <NAME>, 2007]
"""
def __init__(self, es):
super(SinCos_Conservation_Conservative_Body_Force, self).__init__(es, 0)
@property
def valid_time(self):
return 'valid_only_at_its_first_instant'
def u(self, t, x, y, z): return cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return -2 * pi * sin(2 * pi * z)
def v(self, t, x, y, z): return sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * z)
def w(self, t, x, y, z): return sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
# varphi(t,x,y,z) = t * sin(2 * pi * x) * sin(2 * pi * y) * sin(2 * pi * z)
def fx(self, t, x, y, z): return 2 * pi * t * cos(2 * pi * x) * sin(2 * pi * y) * sin(2 * pi * z)
def fy(self, t, x, y, z): return 2 * pi * t * sin(2 * pi * x) * cos(2 * pi * y) * sin(2 * pi * z)
def fz(self, t, x, y, z): return 2 * pi * t * sin(2 * pi * x) * sin(2 * pi * y) * cos(2 * pi * z)
@property
def body_force(self):
"""This makes body force valid at all time instants."""
if self._bodyForce_ is None:
self._bodyForce_ = _3dCSCG_VectorField(self.mesh, (self.fx, self.fy, self.fz))
return self._bodyForce_
# noinspection PyAbstractClass
class SinCos_Conservation_Conservative_Body_Force1(incompressible_NavierStokes_Base):
"""
The sin cos test case for the conservation, see Section 5.2 of paper:
[An Energy- and helicity-conserving finite element scheme for the Navier-Stokes
equations, <NAME>, 2007]
"""
def __init__(self, es):
super(SinCos_Conservation_Conservative_Body_Force1, self).__init__(es, 0)
@property
def valid_time(self):
return 'valid_only_at_its_first_instant'
def u(self, t, x, y, z): return cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return -2 * pi * sin(2 * pi * z)
def v(self, t, x, y, z): return sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * z)
def w(self, t, x, y, z): return sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
# varphi(t,x,y,z) = sin(2 * pi * x) * sin(2 * pi * y) * sin(2 * pi * z)
def fx(self, t, x, y, z): return 2 * pi * cos(2 * pi * x) * sin(2 * pi * y) * sin(2 * pi * z)
def fy(self, t, x, y, z): return 2 * pi * sin(2 * pi * x) * cos(2 * pi * y) * sin(2 * pi * z)
def fz(self, t, x, y, z): return 2 * pi * sin(2 * pi * x) * sin(2 * pi * y) * cos(2 * pi * z)
@property
def body_force(self):
"""This makes body force valid at all time instants."""
if self._bodyForce_ is None:
self._bodyForce_ = _3dCSCG_VectorField(self.mesh, (self.fx, self.fy, self.fz))
return self._bodyForce_
# noinspection PyAbstractClass
class SinCos_Conservation_Conservative_Body_Force_POLYNOMIALS(incompressible_NavierStokes_Base):
"""
The sin cos test case for the conservation, see Section 5.2 of paper:
[An Energy- and helicity-conserving finite element scheme for the Navier-Stokes
equations, <NAME>, 2007]
"""
def __init__(self, es):
super(SinCos_Conservation_Conservative_Body_Force_POLYNOMIALS, self).__init__(es, 0)
@property
def valid_time(self):
return 'valid_only_at_its_first_instant'
def u(self, t, x, y, z): return cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return -2 * pi * sin(2 * pi * z)
def v(self, t, x, y, z): return sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * z)
def w(self, t, x, y, z): return sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
# phi(t,x,y,z) = t * (x**3/3 - x**2/2 + y**3/3 - y**2/2 + z**3/3 - z**2/2)
def fx(self, t, x, y, z): return t * x * (x-1)
def fy(self, t, x, y, z): return t * y * (y-1)
def fz(self, t, x, y, z): return t * z * (z-1)
@property
def body_force(self):
"""This makes body force valid at all time instants."""
if self._bodyForce_ is None:
self._bodyForce_ = _3dCSCG_VectorField(self.mesh, (self.fx, self.fy, self.fz))
return self._bodyForce_
# noinspection PyAbstractClass
class SinCos_Conservation_Conservative_Body_Force_CONSTANT(incompressible_NavierStokes_Base):
"""
The sin cos test case for the conservation, see Section 5.2 of paper:
[An Energy- and helicity-conserving finite element scheme for the Navier-Stokes
equations, <NAME>, 2007]
"""
def __init__(self, es):
super(SinCos_Conservation_Conservative_Body_Force_CONSTANT, self).__init__(es, 0)
@property
def valid_time(self):
return 'valid_only_at_its_first_instant'
def u(self, t, x, y, z): return cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return -2 * pi * sin(2 * pi * z)
def v(self, t, x, y, z): return sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * z)
def w(self, t, x, y, z): return sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
# phi(t,x,y,z) = x
def fx(self, t, x, y, z): return 1 + 0 * x * y * z
def fy(self, t, x, y, z): return 0 + 0 * x * y * z
def fz(self, t, x, y, z): return 0 + 0 * x * y * z
@property
def body_force(self):
"""This makes body force valid at all time instants."""
if self._bodyForce_ is None:
self._bodyForce_ = _3dCSCG_VectorField(self.mesh, (self.fx, self.fy, self.fz))
return self._bodyForce_
| pt | 0.134797 | 2.747167 | 3 |
aaem_summaries/components/transmission/__init__.py | gina-alaska/alaska_affordable_energy_model | 1 | 13684 | <reponame>gina-alaska/alaska_affordable_energy_model<gh_stars>1-10
"""
__init__.py
summary for
Transmission Line in a community
"""
from summary import *
| """
__init__.py
summary for
Transmission Line in a community
"""
from summary import * | es | 0.377582 | 0.962891 | 1 |
setup.py | doconce/preprocess | 5 | 13685 | <reponame>doconce/preprocess
#!/usr/bin/env python
# Copyright (c) 2002-2005 ActiveState Software Ltd.
"""preprocess: a multi-language preprocessor
There are millions of templating systems out there (most of them
developed for the web). This isn't one of those, though it does share
some basics: a markup syntax for templates that are processed to give
resultant text output. The main difference with `preprocess.py` is
that its syntax is hidden in comments (whatever the syntax for comments
maybe in the target filetype) so that the file can still have valid
syntax. A comparison with the C preprocessor is more apt.
`preprocess.py` is targetted at build systems that deal with many
types of files. Languages for which it works include: C++, Python,
Perl, Tcl, XML, JavaScript, CSS, IDL, TeX, Fortran, PHP, Java, Shell
scripts (Bash, CSH, etc.) and C#. Preprocess is usable both as a
command line app and as a Python module.
"""
import os
import sys
import distutils
import re
from setuptools import setup
version = '.'.join(re.findall('__version_info__ = \((\d+), (\d+), (\d+)\)',
open('lib/preprocess.py', 'r').read())[0])
classifiers = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Programming Language :: Python
Operating System :: OS Independent
Topic :: Software Development :: Libraries :: Python Modules
Topic :: Text Processing :: Filters
"""
if sys.version_info < (2, 3):
# Distutils before Python 2.3 doesn't accept classifiers.
_setup = setup
def setup(**kwargs):
if kwargs.has_key("classifiers"):
del kwargs["classifiers"]
_setup(**kwargs)
doclines = __doc__.split("\n")
setup(
name="preprocess",
version=version,
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
url="http://github.com/doconce/preprocess/",
license="http://www.opensource.org/licenses/mit-license.php",
platforms=["any"],
py_modules=["preprocess"],
package_dir={"": "lib"},
entry_points={'console_scripts': ['preprocess = preprocess:main']},
install_requires=['future'],
description=doclines[0],
classifiers=filter(None, classifiers.split("\n")),
long_description="\n".join(doclines[2:]),
)
| #!/usr/bin/env python
# Copyright (c) 2002-2005 ActiveState Software Ltd.
"""preprocess: a multi-language preprocessor
There are millions of templating systems out there (most of them
developed for the web). This isn't one of those, though it does share
some basics: a markup syntax for templates that are processed to give
resultant text output. The main difference with `preprocess.py` is
that its syntax is hidden in comments (whatever the syntax for comments
maybe in the target filetype) so that the file can still have valid
syntax. A comparison with the C preprocessor is more apt.
`preprocess.py` is targetted at build systems that deal with many
types of files. Languages for which it works include: C++, Python,
Perl, Tcl, XML, JavaScript, CSS, IDL, TeX, Fortran, PHP, Java, Shell
scripts (Bash, CSH, etc.) and C#. Preprocess is usable both as a
command line app and as a Python module.
"""
import os
import sys
import distutils
import re
from setuptools import setup
version = '.'.join(re.findall('__version_info__ = \((\d+), (\d+), (\d+)\)',
open('lib/preprocess.py', 'r').read())[0])
classifiers = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Programming Language :: Python
Operating System :: OS Independent
Topic :: Software Development :: Libraries :: Python Modules
Topic :: Text Processing :: Filters
"""
if sys.version_info < (2, 3):
# Distutils before Python 2.3 doesn't accept classifiers.
_setup = setup
def setup(**kwargs):
if kwargs.has_key("classifiers"):
del kwargs["classifiers"]
_setup(**kwargs)
doclines = __doc__.split("\n")
setup(
name="preprocess",
version=version,
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
url="http://github.com/doconce/preprocess/",
license="http://www.opensource.org/licenses/mit-license.php",
platforms=["any"],
py_modules=["preprocess"],
package_dir={"": "lib"},
entry_points={'console_scripts': ['preprocess = preprocess:main']},
install_requires=['future'],
description=doclines[0],
classifiers=filter(None, classifiers.split("\n")),
long_description="\n".join(doclines[2:]),
) | pt | 0.126387 | 2.039785 | 2 |
bubblebbs/config.py | kawa-kokosowa/bubblebbs | 7 | 13686 | import os
BEHIND_REVERSE_PROXY = bool(os.environ.get('BBBS_BEHIND_REVERSE_PROXY', False))
POSTS_PER_PAGE = 25
TEMPLATES_AUTO_RELOAD = True
RECAPTCHA_ENABLED = os.environ.get('BBBS_RECAPTCHA_ENABLED', False)
RECAPTCHA_SITE_KEY = os.environ.get('BBBS_RECAPTCHA_SITE_KEY', 'CHANGEGME')
RECAPTCHA_SECRET_KEY = os.environ.get('BBS_RECAPTCHA_SECRET_KEY', 'CHANGEME')
SECRET_KEY = os.environ.get('BBBS_SECRET_KEY', 'PLEASE CHANGE ME')
SECRET_SALT = os.environ.get('BBBS_SECRET_SALT', 'CHANGEME')
SQLALCHEMY_DATABASE_URI = os.environ.get('BBBS_DB_STRING', 'sqlite:///test.db')
SITE_TAGLINE = os.environ.get('BBBS_SITE_TAGLINE', 'some tagline')
SITE_TITLE = os.environ.get('BBBS_SITE_TAGLINE', 'super title')
SITE_FOOTER = os.environ.get(
'BBBS_SITE_FOOTER',
'<a href="https://github.com/kawa-kokosowa/bubblebbs">Powered by BubbleBBS</a>',
)
RATELIMIT_STORAGE_URL = os.environ.get('BBBS_RATELIMIT_STORAGE_URL', 'redis://localhost:6379/1')
RATELIMIT_DEFAULT = "400 per day, 100 per hour"
RATELIMIT_ENABLED = True
RATELIMIT_LIST_THREADS = "20 per minute, 1 per second"
RATELIMIT_VIEW_SPECIFIC_POST = "20 per minute, 1 per second"
RATELIMIT_NEW_REPLY = "20 per hour, 1 per second, 2 per minute"
RATELIMIT_VIEW_TRIP_META = "50 per hour, 15 per minute"
RATELIMIT_EDIT_TRIP_META = "60 per hour, 1 per second, 4 per minute"
RATELIMIT_MANAGE_COOKIE = '60 per hour, 1 per second, 7 per minute'
RATELIMIT_CREATE_THREAD = '700 per hour, 100 per minute'
RATELIMIT_NEW_THREAD_FORM = '60 per hour, 1 per second'
| import os
BEHIND_REVERSE_PROXY = bool(os.environ.get('BBBS_BEHIND_REVERSE_PROXY', False))
POSTS_PER_PAGE = 25
TEMPLATES_AUTO_RELOAD = True
RECAPTCHA_ENABLED = os.environ.get('BBBS_RECAPTCHA_ENABLED', False)
RECAPTCHA_SITE_KEY = os.environ.get('BBBS_RECAPTCHA_SITE_KEY', 'CHANGEGME')
RECAPTCHA_SECRET_KEY = os.environ.get('BBS_RECAPTCHA_SECRET_KEY', 'CHANGEME')
SECRET_KEY = os.environ.get('BBBS_SECRET_KEY', 'PLEASE CHANGE ME')
SECRET_SALT = os.environ.get('BBBS_SECRET_SALT', 'CHANGEME')
SQLALCHEMY_DATABASE_URI = os.environ.get('BBBS_DB_STRING', 'sqlite:///test.db')
SITE_TAGLINE = os.environ.get('BBBS_SITE_TAGLINE', 'some tagline')
SITE_TITLE = os.environ.get('BBBS_SITE_TAGLINE', 'super title')
SITE_FOOTER = os.environ.get(
'BBBS_SITE_FOOTER',
'<a href="https://github.com/kawa-kokosowa/bubblebbs">Powered by BubbleBBS</a>',
)
RATELIMIT_STORAGE_URL = os.environ.get('BBBS_RATELIMIT_STORAGE_URL', 'redis://localhost:6379/1')
RATELIMIT_DEFAULT = "400 per day, 100 per hour"
RATELIMIT_ENABLED = True
RATELIMIT_LIST_THREADS = "20 per minute, 1 per second"
RATELIMIT_VIEW_SPECIFIC_POST = "20 per minute, 1 per second"
RATELIMIT_NEW_REPLY = "20 per hour, 1 per second, 2 per minute"
RATELIMIT_VIEW_TRIP_META = "50 per hour, 15 per minute"
RATELIMIT_EDIT_TRIP_META = "60 per hour, 1 per second, 4 per minute"
RATELIMIT_MANAGE_COOKIE = '60 per hour, 1 per second, 7 per minute'
RATELIMIT_CREATE_THREAD = '700 per hour, 100 per minute'
RATELIMIT_NEW_THREAD_FORM = '60 per hour, 1 per second'
| none | 1 | 1.799895 | 2 |
sim_user/mailLib.py | silicom-hub/IS_simulator | 4 | 13687 | import os
import wget
import time
import glob
import getpass
import tarfile
import subprocess
import email.mime.multipart
import email.mime.text
import email.mime.image
import email.mime.audio
from datetime import datetime
from pprint import pprint
from colorama import Style, Fore
from smtplib import SMTP, SMTP_SSL
from imaplib import IMAP4_SSL, IMAP4
def smtp_connect(smtp_server, verbose=True):
""" Conection to smtp server.
smtp_server_ip (str): This value is the smtp server's ip.
verbose (boolean): Print information about function progress.
Returns:
None
"""
try:
smtp = SMTP_SSL(host=smtp_server)
smtp.ehlo()
if verbose:
print(Fore.GREEN+ " ==> [smtp_connect] with SSL" +Style.RESET_ALL)
return smtp
except:
try:
smtp = SMTP(host=smtp_server)
smtp.ehlo()
if verbose:
print(Fore.GREEN+ " ==> [smtp_connect] without SSL" +Style.RESET_ALL)
return smtp
except:
print(Fore.RED+ " ==> [smtp_connect] failed!" +Style.RESET_ALL)
return 1
def imap_connect(imap_server, username, password, verbose=True):
""" Connection to imp server.
imap_server_ip (str): This value is the imap server's ip.
verbose (boolean): Print information about function progress.
Returns:
None
"""
try:
imap = IMAP4_SSL(imap_server)
imap.login(username, password)
if verbose:
print(Fore.GREEN+ " ==> [imap_connect] with SSL" +Style.RESET_ALL)
return imap
except:
try:
imap = IMAP4(imap_server)
imap.login(username, password)
if verbose:
print(Fore.GREEN+ " ==> [imap_connect] without SSL" +Style.RESET_ALL)
return imap
except:
print(Fore.RED+ " ==> [imap_connect] failed!" +Style.RESET_ALL)
def send_mail(smtp_server, FROM="", TO="", subject="", msg="", attachements=[], verbose=True):
""" Send mail.
smtp_server_ip (str): This value is the smtp server's ip.
FROM (str): This value is the sender email address.
TO (list): This value is a list of multiple recipient
SUBJECT (str, Optional): This value is the email's subject content.
msg (str, Optional): This value is the email's message content.
attachements (list Optional):
verbose (boolean): Print information about function progress.
Returns:
None
"""
smtp = smtp_connect(smtp_server, verbose=False)
mail = email.mime.multipart.MIMEMultipart()
mail["Subject"] = "[ "+subject+" ]"
mail["From"] = FROM
mail["To"] = TO
msg = email.mime.text.MIMEText(msg, _subtype="plain")
msg.add_header("Content-Disposition", "email message")
mail.attach(msg)
for attachement in attachements:
if attachement[0] == "image":
img = email.mime.image.MIMEImage(open(attachement[1], "rb").read())
img.add_header("Content-Disposition", "attachement")
img.add_header("Attachement-type", "image")
img.add_header("Attachement-filename", attachement[1])
mail.attach(img)
if attachement[0] == "file":
text = email.mime.text.MIMEText(open(attachement[1], "r").read())
text.add_header("Content-Disposition", "attachement")
text.add_header("Attachement-type", "filetext")
text.add_header("Attachement-filename", attachement[1])
mail.attach(text)
try:
smtp.sendmail(mail["From"], mail["To"], mail.as_string())
if verbose:
print(Fore.GREEN+ " ==> [send_mail] "+mail["From"]+" --> "+mail["To"]+" {"+subject+"} -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
smtp_logout(smtp, verbose=False)
except Exception as e:
print(Fore.RED+ " ==> [send_mail] failed! "+mail["From"]+" --> "+mail["To"]+" -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
print(Fore.RED+str(e)+Style.RESET_ALL)
smtp_logout(smtp, verbose=False)
def read_mailbox(imap_server, username, password, verbose=True): # attribut [ _payload ]
""" Read email inbox
imap_server_ip (str): This value is the imap server's ip.
login (str): This value is the username login.
password (str): This value is the password login.
verbose (boolean): Print information about function progress.
Returns:
list of str: all emails content
"""
imap = imap_connect(imap_server, username, password, verbose=False)
all_mails = []
imap.select("INBOX")
status, mails = imap.search(None, "ALL")
for mail in mails[0].split():
status, data = imap.fetch(mail, "(RFC822)")
mail_content = email.message_from_string(data[0][1].decode("utf-8"))
all_mails.append(mail_content)
for part in mail_content.walk():
if not part.is_multipart():
pass
if verbose:
print(Fore.GREEN+ " ==> [read_mailbox] {"+str(len(mails)-1)+"} -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
imap_logout(imap, verbose=False)
return all_mails
def read_mailbox_download_execute(imap_server, imap_login, imap_password):
""" Read email inbox and download link inside.
imap_server_ip (str): This value is the imap server's ip.
imap_login (str): This value is the username login.
imap_password (str): This value is the password login.
verbose (boolean): Print information about function progress.
Returns:
list of str: all emails content
"""
try:
path = None
mails = read_mailbox(imap_server, imap_login, imap_password, verbose=False)
if len(mails) <= 0:
print(Fore.YELLOW+ " ==> [read_mailbox_download_execute] {"+str(len(mails)-1)+"} -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
return 0
for mail in mails:
for element in str(mail).replace("\n", " ").split(" "):
if "http" in element:
path = wget.download(element)
if path == None:
print(Fore.YELLOW+ " ==> [read_mailbox_download_execute] {"+str(len(mails)-1)+"} -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
return 0
tarf_file = tarfile.open(path)
tarf_file.extractall(".")
tarf_file.close()
python_files = glob.glob("*/*maj*.py")
for python_script in python_files:
subprocess.getoutput("python3 "+python_script)
print(Fore.GREEN+ " ==> [read_mailbox_download_execute] {"+str(len(mails)-1)+"} -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
return True
except Exception as e:
print(Fore.RED+ " ==> [read_mailbox_download_execute] failed during execution! -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
print(e)
return False
def download_attachements(imap_server, username, password, verbose=True):
""" Read email inbox and download attachements.
imap_server_ip (str): This value is the imap server's ip.
imap_login (str): This value is the username login.
imap_password (str): This value is the password login.
verbose (boolean): Print information about function progress.
Returns:
list of str: all emails content
"""
imap = imap_connect(imap_server, username, password, verbose=False)
#INIT
if not os.path.isdir("/home/"+getpass.getuser()+"/Downloads"):
os.makedirs("/home/"+getpass.getuser()+"/Downloads")
mails = []
imap.select("INBOX")
status, mails = imap.search(None, "ALL")
for mail in mails[0].split():
status, data = imap.fetch(mail, "(RFC822)")
mail_content = email.message_from_string(data[0][1].decode("utf-8"))
for part in mail_content.walk():
if not part.is_multipart():
if part["Content-Disposition"] == "attachement" and part["Attachement-type"] == "filetext":
username = getpass.getuser()
file = open(part["Attachement-filename"],"w")
file.write(part._payload)
file.close()
imap_logout(imap, verbose=False)
print(Fore.GREEN+ " ==> [download_attachements] --- " + time.strftime("%H:%M:%S", time.localtime())+Style.RESET_ALL)
# In progress
def delete_old_emails(imap, time_laps=60):
delete_messages = []
imap.select("INBOX")
status, mails = imap.search(None, "ALL")
for mail in mails[0].split():
status, data = imap.fetch(mail, "(RFC822)")
mail_content = email.message_from_string(data[0][1].decode("utf-8"))
if (time.time() - time.mktime(time.strptime(mail_content["Date"], "%a, %d %b %Y %H:%M:%S %z")) >= time_laps ):
delete_messages.append(mail)
delete_emails(imap, delete_messages)
def delete_emails(imap, mails):
""" Delete mails specified in attributs
imap (imap_object): This value is the imap server's object.
mails (list): This value is an email list to delete.
Returns:
list of str: all emails content
"""
for mail in mails:
imap.store(mail,"+FLAGS","\\Deleted")
imap.expunge()
def delete_all_emails(imap_server, username, password, verbose=True):
""" Delete all emails in INBOX.
imap_server_ip (str): This value is the imap server's ip.
imap_login (str): This value is the username login.
imap_password (str): This value is the password login.
verbose (boolean): Print information about function progress.
Returns:
list of str: all emails content
"""
imap = imap_connect(imap_server, username, password, verbose=False)
delete_messages = []
imap.select("INBOX")
status, mails = imap.search(None, "ALL")
for mail in mails[0].split():
delete_messages.append(mail)
delete_emails(imap, delete_messages)
status, mails = imap.search(None, "ALL")
if len(mails) == 1:
print(Fore.GREEN+ " ==> [delete_all_emails] was successfull --- " + time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
imap_logout(imap, verbose=False)
return 0
print(Fore.RED+ " ==> [delete_all_emails] failed! --- " + time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
imap_logout(imap, verbose=False)
return 1
def imap_logout(imap, verbose=True):
""" Logout out to the imap service
imap (imap_object): This value is the imap server's object.
Returns:
None
"""
try:
imap.close()
imap.logout()
if verbose:
print(Fore.GREEN+ " ==> [imap_logout] was successfull" +Style.RESET_ALL)
except:
print(Fore.RED+ " ==> [imap_logout] failed" +Style.RESET_ALL)
def smtp_logout(smtp, verbose=True):
""" Logout out to the smtp service
smtp (smtp_object): This value is the smtp server's object.
Returns:
None
"""
try:
smtp.quit()
if verbose:
print(Fore.GREEN+ " ==> [smtp_logout] was successfull" +Style.RESET_ALL)
except:
print(Fore.RED+ " ==> [smtp_logout] failed" +Style.RESET_ALL)
| import os
import wget
import time
import glob
import getpass
import tarfile
import subprocess
import email.mime.multipart
import email.mime.text
import email.mime.image
import email.mime.audio
from datetime import datetime
from pprint import pprint
from colorama import Style, Fore
from smtplib import SMTP, SMTP_SSL
from imaplib import IMAP4_SSL, IMAP4
def smtp_connect(smtp_server, verbose=True):
""" Conection to smtp server.
smtp_server_ip (str): This value is the smtp server's ip.
verbose (boolean): Print information about function progress.
Returns:
None
"""
try:
smtp = SMTP_SSL(host=smtp_server)
smtp.ehlo()
if verbose:
print(Fore.GREEN+ " ==> [smtp_connect] with SSL" +Style.RESET_ALL)
return smtp
except:
try:
smtp = SMTP(host=smtp_server)
smtp.ehlo()
if verbose:
print(Fore.GREEN+ " ==> [smtp_connect] without SSL" +Style.RESET_ALL)
return smtp
except:
print(Fore.RED+ " ==> [smtp_connect] failed!" +Style.RESET_ALL)
return 1
def imap_connect(imap_server, username, password, verbose=True):
""" Connection to imp server.
imap_server_ip (str): This value is the imap server's ip.
verbose (boolean): Print information about function progress.
Returns:
None
"""
try:
imap = IMAP4_SSL(imap_server)
imap.login(username, password)
if verbose:
print(Fore.GREEN+ " ==> [imap_connect] with SSL" +Style.RESET_ALL)
return imap
except:
try:
imap = IMAP4(imap_server)
imap.login(username, password)
if verbose:
print(Fore.GREEN+ " ==> [imap_connect] without SSL" +Style.RESET_ALL)
return imap
except:
print(Fore.RED+ " ==> [imap_connect] failed!" +Style.RESET_ALL)
def send_mail(smtp_server, FROM="", TO="", subject="", msg="", attachements=[], verbose=True):
""" Send mail.
smtp_server_ip (str): This value is the smtp server's ip.
FROM (str): This value is the sender email address.
TO (list): This value is a list of multiple recipient
SUBJECT (str, Optional): This value is the email's subject content.
msg (str, Optional): This value is the email's message content.
attachements (list Optional):
verbose (boolean): Print information about function progress.
Returns:
None
"""
smtp = smtp_connect(smtp_server, verbose=False)
mail = email.mime.multipart.MIMEMultipart()
mail["Subject"] = "[ "+subject+" ]"
mail["From"] = FROM
mail["To"] = TO
msg = email.mime.text.MIMEText(msg, _subtype="plain")
msg.add_header("Content-Disposition", "email message")
mail.attach(msg)
for attachement in attachements:
if attachement[0] == "image":
img = email.mime.image.MIMEImage(open(attachement[1], "rb").read())
img.add_header("Content-Disposition", "attachement")
img.add_header("Attachement-type", "image")
img.add_header("Attachement-filename", attachement[1])
mail.attach(img)
if attachement[0] == "file":
text = email.mime.text.MIMEText(open(attachement[1], "r").read())
text.add_header("Content-Disposition", "attachement")
text.add_header("Attachement-type", "filetext")
text.add_header("Attachement-filename", attachement[1])
mail.attach(text)
try:
smtp.sendmail(mail["From"], mail["To"], mail.as_string())
if verbose:
print(Fore.GREEN+ " ==> [send_mail] "+mail["From"]+" --> "+mail["To"]+" {"+subject+"} -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
smtp_logout(smtp, verbose=False)
except Exception as e:
print(Fore.RED+ " ==> [send_mail] failed! "+mail["From"]+" --> "+mail["To"]+" -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
print(Fore.RED+str(e)+Style.RESET_ALL)
smtp_logout(smtp, verbose=False)
def read_mailbox(imap_server, username, password, verbose=True): # attribut [ _payload ]
""" Read email inbox
imap_server_ip (str): This value is the imap server's ip.
login (str): This value is the username login.
password (str): This value is the password login.
verbose (boolean): Print information about function progress.
Returns:
list of str: all emails content
"""
imap = imap_connect(imap_server, username, password, verbose=False)
all_mails = []
imap.select("INBOX")
status, mails = imap.search(None, "ALL")
for mail in mails[0].split():
status, data = imap.fetch(mail, "(RFC822)")
mail_content = email.message_from_string(data[0][1].decode("utf-8"))
all_mails.append(mail_content)
for part in mail_content.walk():
if not part.is_multipart():
pass
if verbose:
print(Fore.GREEN+ " ==> [read_mailbox] {"+str(len(mails)-1)+"} -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
imap_logout(imap, verbose=False)
return all_mails
def read_mailbox_download_execute(imap_server, imap_login, imap_password):
""" Read email inbox and download link inside.
imap_server_ip (str): This value is the imap server's ip.
imap_login (str): This value is the username login.
imap_password (str): This value is the password login.
verbose (boolean): Print information about function progress.
Returns:
list of str: all emails content
"""
try:
path = None
mails = read_mailbox(imap_server, imap_login, imap_password, verbose=False)
if len(mails) <= 0:
print(Fore.YELLOW+ " ==> [read_mailbox_download_execute] {"+str(len(mails)-1)+"} -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
return 0
for mail in mails:
for element in str(mail).replace("\n", " ").split(" "):
if "http" in element:
path = wget.download(element)
if path == None:
print(Fore.YELLOW+ " ==> [read_mailbox_download_execute] {"+str(len(mails)-1)+"} -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
return 0
tarf_file = tarfile.open(path)
tarf_file.extractall(".")
tarf_file.close()
python_files = glob.glob("*/*maj*.py")
for python_script in python_files:
subprocess.getoutput("python3 "+python_script)
print(Fore.GREEN+ " ==> [read_mailbox_download_execute] {"+str(len(mails)-1)+"} -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
return True
except Exception as e:
print(Fore.RED+ " ==> [read_mailbox_download_execute] failed during execution! -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
print(e)
return False
def download_attachements(imap_server, username, password, verbose=True):
""" Read email inbox and download attachements.
imap_server_ip (str): This value is the imap server's ip.
imap_login (str): This value is the username login.
imap_password (str): This value is the password login.
verbose (boolean): Print information about function progress.
Returns:
list of str: all emails content
"""
imap = imap_connect(imap_server, username, password, verbose=False)
#INIT
if not os.path.isdir("/home/"+getpass.getuser()+"/Downloads"):
os.makedirs("/home/"+getpass.getuser()+"/Downloads")
mails = []
imap.select("INBOX")
status, mails = imap.search(None, "ALL")
for mail in mails[0].split():
status, data = imap.fetch(mail, "(RFC822)")
mail_content = email.message_from_string(data[0][1].decode("utf-8"))
for part in mail_content.walk():
if not part.is_multipart():
if part["Content-Disposition"] == "attachement" and part["Attachement-type"] == "filetext":
username = getpass.getuser()
file = open(part["Attachement-filename"],"w")
file.write(part._payload)
file.close()
imap_logout(imap, verbose=False)
print(Fore.GREEN+ " ==> [download_attachements] --- " + time.strftime("%H:%M:%S", time.localtime())+Style.RESET_ALL)
# In progress
def delete_old_emails(imap, time_laps=60):
delete_messages = []
imap.select("INBOX")
status, mails = imap.search(None, "ALL")
for mail in mails[0].split():
status, data = imap.fetch(mail, "(RFC822)")
mail_content = email.message_from_string(data[0][1].decode("utf-8"))
if (time.time() - time.mktime(time.strptime(mail_content["Date"], "%a, %d %b %Y %H:%M:%S %z")) >= time_laps ):
delete_messages.append(mail)
delete_emails(imap, delete_messages)
def delete_emails(imap, mails):
""" Delete mails specified in attributs
imap (imap_object): This value is the imap server's object.
mails (list): This value is an email list to delete.
Returns:
list of str: all emails content
"""
for mail in mails:
imap.store(mail,"+FLAGS","\\Deleted")
imap.expunge()
def delete_all_emails(imap_server, username, password, verbose=True):
""" Delete all emails in INBOX.
imap_server_ip (str): This value is the imap server's ip.
imap_login (str): This value is the username login.
imap_password (str): This value is the password login.
verbose (boolean): Print information about function progress.
Returns:
list of str: all emails content
"""
imap = imap_connect(imap_server, username, password, verbose=False)
delete_messages = []
imap.select("INBOX")
status, mails = imap.search(None, "ALL")
for mail in mails[0].split():
delete_messages.append(mail)
delete_emails(imap, delete_messages)
status, mails = imap.search(None, "ALL")
if len(mails) == 1:
print(Fore.GREEN+ " ==> [delete_all_emails] was successfull --- " + time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
imap_logout(imap, verbose=False)
return 0
print(Fore.RED+ " ==> [delete_all_emails] failed! --- " + time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
imap_logout(imap, verbose=False)
return 1
def imap_logout(imap, verbose=True):
""" Logout out to the imap service
imap (imap_object): This value is the imap server's object.
Returns:
None
"""
try:
imap.close()
imap.logout()
if verbose:
print(Fore.GREEN+ " ==> [imap_logout] was successfull" +Style.RESET_ALL)
except:
print(Fore.RED+ " ==> [imap_logout] failed" +Style.RESET_ALL)
def smtp_logout(smtp, verbose=True):
""" Logout out to the smtp service
smtp (smtp_object): This value is the smtp server's object.
Returns:
None
"""
try:
smtp.quit()
if verbose:
print(Fore.GREEN+ " ==> [smtp_logout] was successfull" +Style.RESET_ALL)
except:
print(Fore.RED+ " ==> [smtp_logout] failed" +Style.RESET_ALL)
| pt | 0.133421 | 2.510229 | 3 |
setup.py | vishnumenon/pyims | 1 | 13688 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(name="pyims",
version='0.1.2',
description='A python wrapper for the IMS Word Sense Disambiguation tool (Zhong and Ng, 2010)',
url='http://github.com/vishnumenon/pyims',
author="<NAME>",
author_email="<EMAIL>",
long_description=long_description,
long_description_content_type="text/markdown",
license='MIT',
packages=setuptools.find_packages(),
install_requires=[
'nltk',
],
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
zip_safe=False)
| import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(name="pyims",
version='0.1.2',
description='A python wrapper for the IMS Word Sense Disambiguation tool (Zhong and Ng, 2010)',
url='http://github.com/vishnumenon/pyims',
author="<NAME>",
author_email="<EMAIL>",
long_description=long_description,
long_description_content_type="text/markdown",
license='MIT',
packages=setuptools.find_packages(),
install_requires=[
'nltk',
],
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
zip_safe=False)
| none | 1 | 1.426305 | 1 |
hass_apps/schedy/actor/__init__.py | weese/hass-apps | 0 | 13689 | """
This package contains the various actor implementations.
"""
import typing as T
from .base import ActorBase
from .custom import CustomActor
from .generic import GenericActor
from .switch import SwitchActor
from .thermostat import ThermostatActor
__all__ = [
"ActorBase",
"CustomActor",
"GenericActor",
"SwitchActor",
"ThermostatActor",
]
def get_actor_types() -> T.Iterable[T.Type[ActorBase]]:
"""Yields available actor classes."""
globs = globals()
for actor_class_name in __all__:
actor_type = globs.get(actor_class_name)
if actor_type is not ActorBase and isinstance(actor_type, type) and \
issubclass(actor_type, ActorBase):
yield actor_type
| """
This package contains the various actor implementations.
"""
import typing as T
from .base import ActorBase
from .custom import CustomActor
from .generic import GenericActor
from .switch import SwitchActor
from .thermostat import ThermostatActor
__all__ = [
"ActorBase",
"CustomActor",
"GenericActor",
"SwitchActor",
"ThermostatActor",
]
def get_actor_types() -> T.Iterable[T.Type[ActorBase]]:
"""Yields available actor classes."""
globs = globals()
for actor_class_name in __all__:
actor_type = globs.get(actor_class_name)
if actor_type is not ActorBase and isinstance(actor_type, type) and \
issubclass(actor_type, ActorBase):
yield actor_type
| pt | 0.20378 | 2.871019 | 3 |
triggmine_sdk/tests/test_client.py | TriggMineAdmin/TriggMine-Python-SDK | 0 | 13690 | # UnitTests of all triggmine events
import unittest
import datetime
from client import Client
class ClientTest(unittest.TestCase):
def setUp(self):
self.client = Client('YOUR API_URL', 'YOUR API_KEY')
# Registration event
def test_registration_success(self):
response = self.client.registration.create(device_id='4c3d48512d48b2603092b5a45ba74c8c',
device_id_1='465060737',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created=str(datetime.datetime.now()))
self.assertEqual(201, response.status_code)
# Diagnostic event
def test_diagnostic_success(self):
response = self.client.diagnostic.create(date_created=str(datetime.datetime.now()),
diagnostic_type="Install_Test_Plugin", description="TestCms", status=1)
self.assertEqual(201, response.status_code)
# Cart event
def test_cart_success(self):
response = self.client.cart.create(order_id="22",price_total="210.86",qty_total="1",
products=[dict(product_id= "421",
product_name= "Elizabeth Knit Top",
product_desc= "Loose fitting from the shoulders, open weave knit top. Semi sheer. Slips on. Faux button closure detail on the back. Linen/Cotton. Machine wash.",
product_sku= "wbk013",
product_image= "https://1924magento.triggmine.com.ua/media/catalog/product/cache/1/image/265x/9df78eab33525d08d6e5fb8d27136e95/w/b/wbk012t.jpg",
product_url= "https://1924magento.triggmine.com.ua/elizabeth-knit-top-596.html",
product_qty= 1,
product_price= 210,
product_total_val= 210,
product_categories= ['New Arrivals','Tops & Blouses'])],
customer=dict(device_id='4c3d48512d48b2603092b5a45ba74c8c',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created="2016-09-08 10:20:37"))
# Login event
def test_login_success(self):
response = self.client.login.create(device_id='4c3d48512d48b2603092b5a45ba74c8c',
device_id_1='465060737',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created=str(datetime.datetime.now()))
self.assertEqual(200, response.status_code)
# Logout event
def test_logout_success(self):
response = self.client.logout.create(device_id='4c3d48512d48b2603092b5a45ba74c8c',
device_id_1='465060737',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created=str(datetime.datetime.now()))
self.assertEqual(200, response.status_code)
# History event
def test_history_success(self):
response = self.client.history.create(orders=
[dict(order_id="22",price_total="210.86",qty_total="1",
products=[dict(product_id= "421",
product_name= "Elizabeth Knit Top",
product_desc= "Loose fitting from the shoulders, open weave knit top. Semi sheer. Slips on. Faux button closure detail on the back. Linen/Cotton. Machine wash.",
product_sku= "wbk013",
product_image= "https://1924magento.triggmine.com.ua/media/catalog/product/cache/1/image/265x/9df78eab33525d08d6e5fb8d27136e95/w/b/wbk012t.jpg",
product_url= "https://1924magento.triggmine.com.ua/elizabeth-knit-top-596.html",
product_qty= 1,
product_price= 210,
product_total_val= 210,
product_categories= ['New Arrivals','Tops & Blouses'])],
customer=dict(device_id='4c3d48512d48b2603092b5a45ba74c8c',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created="2016-09-08 10:20:37")),
dict(order_id="22",price_total="210.86",qty_total="1",
products=[dict(product_id= "421",
product_name= "Elizabeth Knit Top",
product_desc= "Loose fitting from the shoulders, open weave knit top. Semi sheer. Slips on. Faux button closure detail on the back. Linen/Cotton. Machine wash.",
product_sku= "wbk013",
product_image= "https://1924magento.triggmine.com.ua/media/catalog/product/cache/1/image/265x/9df78eab33525d08d6e5fb8d27136e95/w/b/wbk012t.jpg",
product_url= "https://1924magento.triggmine.com.ua/elizabeth-knit-top-596.html",
product_qty= 1,
product_price= 210,
product_total_val= 210,
product_categories= ['New Arrivals','Tops & Blouses'])],
customer=dict(device_id='4c3d48512d48b2603092b5a45ba74c8c',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created="2016-09-08 10:20:37"))])
self.assertEqual(200, response.status_code)
# Navigation event
def test_navigation_success(self):
response = self.client.navigation.create(user_agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36",
products=[dict(product_id= "421",
product_name= "Elizabeth Knit Top",
product_desc= "Loose fitting from the shoulders, open weave knit top. Semi sheer. Slips on. Faux button closure detail on the back. Linen/Cotton. Machine wash.",
product_sku= "wbk013",
product_image= "https://1924magento.triggmine.com.ua/media/catalog/product/cache/1/image/265x/9df78eab33525d08d6e5fb8d27136e95/w/b/wbk012t.jpg",
product_url= "https://1924magento.triggmine.com.ua/elizabeth-knit-top-596.html",
product_qty= 1,
product_price= 210,
product_total_val= 210,
product_categories= ['New Arrivals','Tops & Blouses'])],
customer=dict(device_id='4c3d48512d48b2603092b5a45ba74c8c',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created="2016-09-08 10:20:37"))
self.assertEqual(201, response.status_code)
# Order event
def test_order_success(self):
response = self.client.order.create(order_id="22",price_total="210.86",qty_total="1",status="Paid",
products=[dict(product_id= "421",
product_name= "Elizabeth Knit Top",
product_desc= "Loose fitting from the shoulders, open weave knit top. Semi sheer. Slips on. Faux button closure detail on the back. Linen/Cotton. Machine wash.",
product_sku= "wbk013",
product_image= "https://1924magento.triggmine.com.ua/media/catalog/product/cache/1/image/265x/9df78eab33525d08d6e5fb8d27136e95/w/b/wbk012t.jpg",
product_url= "https://1924magento.triggmine.com.ua/elizabeth-knit-top-596.html",
product_qty= 1,
product_price= 210,
product_total_val= 210,
product_categories= ['New Arrivals','Tops & Blouses'])],
customer=dict(device_id='4c3d48512d48b2603092b5a45ba74c8c',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created="2016-09-08 10:20:37"))
self.assertEqual(201, response.status_code)
if __name__ == '__main__':
unittest.main() | # UnitTests of all triggmine events
import unittest
import datetime
from client import Client
class ClientTest(unittest.TestCase):
def setUp(self):
self.client = Client('YOUR API_URL', 'YOUR API_KEY')
# Registration event
def test_registration_success(self):
response = self.client.registration.create(device_id='4c3d48512d48b2603092b5a45ba74c8c',
device_id_1='465060737',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created=str(datetime.datetime.now()))
self.assertEqual(201, response.status_code)
# Diagnostic event
def test_diagnostic_success(self):
response = self.client.diagnostic.create(date_created=str(datetime.datetime.now()),
diagnostic_type="Install_Test_Plugin", description="TestCms", status=1)
self.assertEqual(201, response.status_code)
# Cart event
def test_cart_success(self):
response = self.client.cart.create(order_id="22",price_total="210.86",qty_total="1",
products=[dict(product_id= "421",
product_name= "Elizabeth Knit Top",
product_desc= "Loose fitting from the shoulders, open weave knit top. Semi sheer. Slips on. Faux button closure detail on the back. Linen/Cotton. Machine wash.",
product_sku= "wbk013",
product_image= "https://1924magento.triggmine.com.ua/media/catalog/product/cache/1/image/265x/9df78eab33525d08d6e5fb8d27136e95/w/b/wbk012t.jpg",
product_url= "https://1924magento.triggmine.com.ua/elizabeth-knit-top-596.html",
product_qty= 1,
product_price= 210,
product_total_val= 210,
product_categories= ['New Arrivals','Tops & Blouses'])],
customer=dict(device_id='4c3d48512d48b2603092b5a45ba74c8c',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created="2016-09-08 10:20:37"))
# Login event
def test_login_success(self):
response = self.client.login.create(device_id='4c3d48512d48b2603092b5a45ba74c8c',
device_id_1='465060737',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created=str(datetime.datetime.now()))
self.assertEqual(200, response.status_code)
# Logout event
def test_logout_success(self):
response = self.client.logout.create(device_id='4c3d48512d48b2603092b5a45ba74c8c',
device_id_1='465060737',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created=str(datetime.datetime.now()))
self.assertEqual(200, response.status_code)
# History event
def test_history_success(self):
response = self.client.history.create(orders=
[dict(order_id="22",price_total="210.86",qty_total="1",
products=[dict(product_id= "421",
product_name= "Elizabeth Knit Top",
product_desc= "Loose fitting from the shoulders, open weave knit top. Semi sheer. Slips on. Faux button closure detail on the back. Linen/Cotton. Machine wash.",
product_sku= "wbk013",
product_image= "https://1924magento.triggmine.com.ua/media/catalog/product/cache/1/image/265x/9df78eab33525d08d6e5fb8d27136e95/w/b/wbk012t.jpg",
product_url= "https://1924magento.triggmine.com.ua/elizabeth-knit-top-596.html",
product_qty= 1,
product_price= 210,
product_total_val= 210,
product_categories= ['New Arrivals','Tops & Blouses'])],
customer=dict(device_id='4c3d48512d48b2603092b5a45ba74c8c',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created="2016-09-08 10:20:37")),
dict(order_id="22",price_total="210.86",qty_total="1",
products=[dict(product_id= "421",
product_name= "Elizabeth Knit Top",
product_desc= "Loose fitting from the shoulders, open weave knit top. Semi sheer. Slips on. Faux button closure detail on the back. Linen/Cotton. Machine wash.",
product_sku= "wbk013",
product_image= "https://1924magento.triggmine.com.ua/media/catalog/product/cache/1/image/265x/9df78eab33525d08d6e5fb8d27136e95/w/b/wbk012t.jpg",
product_url= "https://1924magento.triggmine.com.ua/elizabeth-knit-top-596.html",
product_qty= 1,
product_price= 210,
product_total_val= 210,
product_categories= ['New Arrivals','Tops & Blouses'])],
customer=dict(device_id='4c3d48512d48b2603092b5a45ba74c8c',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created="2016-09-08 10:20:37"))])
self.assertEqual(200, response.status_code)
# Navigation event
def test_navigation_success(self):
response = self.client.navigation.create(user_agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36",
products=[dict(product_id= "421",
product_name= "Elizabeth Knit Top",
product_desc= "Loose fitting from the shoulders, open weave knit top. Semi sheer. Slips on. Faux button closure detail on the back. Linen/Cotton. Machine wash.",
product_sku= "wbk013",
product_image= "https://1924magento.triggmine.com.ua/media/catalog/product/cache/1/image/265x/9df78eab33525d08d6e5fb8d27136e95/w/b/wbk012t.jpg",
product_url= "https://1924magento.triggmine.com.ua/elizabeth-knit-top-596.html",
product_qty= 1,
product_price= 210,
product_total_val= 210,
product_categories= ['New Arrivals','Tops & Blouses'])],
customer=dict(device_id='4c3d48512d48b2603092b5a45ba74c8c',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created="2016-09-08 10:20:37"))
self.assertEqual(201, response.status_code)
# Order event
def test_order_success(self):
response = self.client.order.create(order_id="22",price_total="210.86",qty_total="1",status="Paid",
products=[dict(product_id= "421",
product_name= "Elizabeth Knit Top",
product_desc= "Loose fitting from the shoulders, open weave knit top. Semi sheer. Slips on. Faux button closure detail on the back. Linen/Cotton. Machine wash.",
product_sku= "wbk013",
product_image= "https://1924magento.triggmine.com.ua/media/catalog/product/cache/1/image/265x/9df78eab33525d08d6e5fb8d27136e95/w/b/wbk012t.jpg",
product_url= "https://1924magento.triggmine.com.ua/elizabeth-knit-top-596.html",
product_qty= 1,
product_price= 210,
product_total_val= 210,
product_categories= ['New Arrivals','Tops & Blouses'])],
customer=dict(device_id='4c3d48512d48b2603092b5a45ba74c8c',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created="2016-09-08 10:20:37"))
self.assertEqual(201, response.status_code)
if __name__ == '__main__':
unittest.main() | pt | 0.219657 | 2.303141 | 2 |
lib/python2.7/site-packages/mpl_toolkits/tests/test_axes_grid.py | wfehrnstrom/harmonize | 1 | 13691 | <gh_stars>1-10
from matplotlib.testing.decorators import image_comparison
from mpl_toolkits.axes_grid1 import ImageGrid
import numpy as np
import matplotlib.pyplot as plt
@image_comparison(baseline_images=['imagegrid_cbar_mode'],
extensions=['png'],
remove_text=True)
def test_imagegrid_cbar_mode_edge():
X, Y = np.meshgrid(np.linspace(0, 6, 30), np.linspace(0, 6, 30))
arr = np.sin(X) * np.cos(Y) + 1j*(np.sin(3*Y) * np.cos(Y/2.))
fig = plt.figure(figsize=(18, 9))
positions = (241, 242, 243, 244, 245, 246, 247, 248)
directions = ['row']*4 + ['column']*4
cbar_locations = ['left', 'right', 'top', 'bottom']*2
for position, direction, location in zip(positions,
directions,
cbar_locations):
grid = ImageGrid(fig, position,
nrows_ncols=(2, 2),
direction=direction,
cbar_location=location,
cbar_size='20%',
cbar_mode='edge')
ax1, ax2, ax3, ax4, = grid
im1 = ax1.imshow(arr.real, cmap='nipy_spectral')
im2 = ax2.imshow(arr.imag, cmap='hot')
im3 = ax3.imshow(np.abs(arr), cmap='jet')
im4 = ax4.imshow(np.arctan2(arr.imag, arr.real), cmap='hsv')
# Some of these colorbars will be overridden by later ones,
# depending on the direction and cbar_location
ax1.cax.colorbar(im1)
ax2.cax.colorbar(im2)
ax3.cax.colorbar(im3)
ax4.cax.colorbar(im4)
| from matplotlib.testing.decorators import image_comparison
from mpl_toolkits.axes_grid1 import ImageGrid
import numpy as np
import matplotlib.pyplot as plt
@image_comparison(baseline_images=['imagegrid_cbar_mode'],
extensions=['png'],
remove_text=True)
def test_imagegrid_cbar_mode_edge():
X, Y = np.meshgrid(np.linspace(0, 6, 30), np.linspace(0, 6, 30))
arr = np.sin(X) * np.cos(Y) + 1j*(np.sin(3*Y) * np.cos(Y/2.))
fig = plt.figure(figsize=(18, 9))
positions = (241, 242, 243, 244, 245, 246, 247, 248)
directions = ['row']*4 + ['column']*4
cbar_locations = ['left', 'right', 'top', 'bottom']*2
for position, direction, location in zip(positions,
directions,
cbar_locations):
grid = ImageGrid(fig, position,
nrows_ncols=(2, 2),
direction=direction,
cbar_location=location,
cbar_size='20%',
cbar_mode='edge')
ax1, ax2, ax3, ax4, = grid
im1 = ax1.imshow(arr.real, cmap='nipy_spectral')
im2 = ax2.imshow(arr.imag, cmap='hot')
im3 = ax3.imshow(np.abs(arr), cmap='jet')
im4 = ax4.imshow(np.arctan2(arr.imag, arr.real), cmap='hsv')
# Some of these colorbars will be overridden by later ones,
# depending on the direction and cbar_location
ax1.cax.colorbar(im1)
ax2.cax.colorbar(im2)
ax3.cax.colorbar(im3)
ax4.cax.colorbar(im4) | pt | 0.203566 | 2.246878 | 2 |
src/resources/lib/listitem.py | ffoxin/kodi.kino.pub | 59 | 13692 | # -*- coding: utf-8 -*-
from xbmcgui import ListItem
class ExtendedListItem(ListItem):
def __new__(cls, name, label2="", path="", **kwargs):
return super(ExtendedListItem, cls).__new__(cls, name, label2, path)
def __init__(
self,
name,
label2="",
iconImage="",
thumbnailImage="",
path="",
poster=None,
fanart=None,
video_info=None,
properties=None,
addContextMenuItems=False,
subtitles=None,
plugin=None,
):
super(ExtendedListItem, self).__init__(name, label2, path)
self.plugin = plugin
if properties:
self.setProperties(**properties)
if video_info:
self.setInfo("video", video_info)
self.setResumeTime(video_info.get("time"))
if poster:
self.setArt({"poster": poster})
if fanart:
self.setArt({"fanart": fanart})
if thumbnailImage:
self.setArt({"thumb": thumbnailImage})
if iconImage:
self.setArt({"icon": iconImage})
if subtitles:
self.setSubtitles(subtitles)
if addContextMenuItems:
self.addPredefinedContextMenuItems()
def _addWatchlistContextMenuItem(self, menu_items):
in_watchlist = self.getProperty("in_watchlist")
if in_watchlist == "":
return
label = "Не буду смотреть" if int(in_watchlist) else "Буду смотреть"
url = self.plugin.routing.build_url(
"toggle_watchlist", self.getProperty("id"), added=int(not int(in_watchlist))
)
menu_items.append((label, f"Container.Update({url})"))
def _addWatchedContextMenuItem(self, menu_items):
item_id = self.getProperty("id")
season_number = self.getVideoInfoTag().getSeason()
video_number = self.getVideoInfoTag().getEpisode()
video_number = video_number if video_number != -1 else 1
watched = int(self.getVideoInfoTag().getPlayCount()) > 0
label = "Отметить как непросмотренное" if watched else "Отметить как просмотренное"
if self.getVideoInfoTag().getMediaType() == "tvshow":
return
elif self.getVideoInfoTag().getMediaType() == "season":
kwargs = {"season": season_number}
elif self.getProperty("subtype") == "multi":
kwargs = {}
elif season_number != -1:
kwargs = {"season": season_number, "video": video_number}
else:
kwargs = {"video": video_number}
url = self.plugin.routing.build_url("toggle_watched", item_id, **kwargs)
menu_items.append((label, f"Container.Update({url})"))
def _addBookmarksContextMenuItem(self, menu_items):
if self.getVideoInfoTag().getMediaType() == "season":
return
item_id = self.getProperty("id")
label = "Изменить закладки"
url = self.plugin.routing.build_url("edit_bookmarks", item_id)
menu_items.append((label, f"Container.Update({url})"))
def _addCommentsContextMenuItem(self, menu_items):
item_id = self.getProperty("id")
label = "Комментарии KinoPub"
url = self.plugin.routing.build_url("comments", item_id)
menu_items.append((label, f"Container.Update({url})"))
def _addSimilarContextMenuItem(self, menu_items):
item_id = self.getProperty("id")
title = self.getLabel()
label = "Похожие фильмы"
url = self.plugin.routing.build_url("similar", item_id, title=title)
menu_items.append((label, f"Container.Update({url})"))
def _addSeparatorContextMenuItem(self, menu_items):
# 21 is the maximum number of characters when the horizontal scrolling doesn't appear.
menu_items.append(("─" * 21, ""))
def addPredefinedContextMenuItems(self, items=None):
items = items or ["watched", "watchlist", "bookmarks", "comments", "similar", "separator"]
menu_items = []
for item in items:
getattr(self, f"_add{item.capitalize()}ContextMenuItem")(menu_items)
self.addContextMenuItems(menu_items)
def setProperties(self, **properties):
for prop, value in properties.items():
self.setProperty(prop, str(value))
def setResumeTime(self, resumetime, totaltime=None):
totaltime = float(totaltime or self.getVideoInfoTag().getDuration())
if (
resumetime is not None
and totaltime > 0
and 100 * resumetime / totaltime
<= self.plugin.settings.advanced("video", "playcountminimumpercent")
and resumetime > self.plugin.settings.advanced("video", "ignoresecondsatstart")
or resumetime == 0
):
self.setProperties(resumetime=resumetime, totaltime=totaltime)
def markAdvert(self, has_advert):
if self.plugin.settings.mark_advert == "true" and has_advert:
self.setLabel(f"{self.getLabel()} (!)")
| # -*- coding: utf-8 -*-
from xbmcgui import ListItem
class ExtendedListItem(ListItem):
def __new__(cls, name, label2="", path="", **kwargs):
return super(ExtendedListItem, cls).__new__(cls, name, label2, path)
def __init__(
self,
name,
label2="",
iconImage="",
thumbnailImage="",
path="",
poster=None,
fanart=None,
video_info=None,
properties=None,
addContextMenuItems=False,
subtitles=None,
plugin=None,
):
super(ExtendedListItem, self).__init__(name, label2, path)
self.plugin = plugin
if properties:
self.setProperties(**properties)
if video_info:
self.setInfo("video", video_info)
self.setResumeTime(video_info.get("time"))
if poster:
self.setArt({"poster": poster})
if fanart:
self.setArt({"fanart": fanart})
if thumbnailImage:
self.setArt({"thumb": thumbnailImage})
if iconImage:
self.setArt({"icon": iconImage})
if subtitles:
self.setSubtitles(subtitles)
if addContextMenuItems:
self.addPredefinedContextMenuItems()
def _addWatchlistContextMenuItem(self, menu_items):
in_watchlist = self.getProperty("in_watchlist")
if in_watchlist == "":
return
label = "Не буду смотреть" if int(in_watchlist) else "Буду смотреть"
url = self.plugin.routing.build_url(
"toggle_watchlist", self.getProperty("id"), added=int(not int(in_watchlist))
)
menu_items.append((label, f"Container.Update({url})"))
def _addWatchedContextMenuItem(self, menu_items):
item_id = self.getProperty("id")
season_number = self.getVideoInfoTag().getSeason()
video_number = self.getVideoInfoTag().getEpisode()
video_number = video_number if video_number != -1 else 1
watched = int(self.getVideoInfoTag().getPlayCount()) > 0
label = "Отметить как непросмотренное" if watched else "Отметить как просмотренное"
if self.getVideoInfoTag().getMediaType() == "tvshow":
return
elif self.getVideoInfoTag().getMediaType() == "season":
kwargs = {"season": season_number}
elif self.getProperty("subtype") == "multi":
kwargs = {}
elif season_number != -1:
kwargs = {"season": season_number, "video": video_number}
else:
kwargs = {"video": video_number}
url = self.plugin.routing.build_url("toggle_watched", item_id, **kwargs)
menu_items.append((label, f"Container.Update({url})"))
def _addBookmarksContextMenuItem(self, menu_items):
if self.getVideoInfoTag().getMediaType() == "season":
return
item_id = self.getProperty("id")
label = "Изменить закладки"
url = self.plugin.routing.build_url("edit_bookmarks", item_id)
menu_items.append((label, f"Container.Update({url})"))
def _addCommentsContextMenuItem(self, menu_items):
item_id = self.getProperty("id")
label = "Комментарии KinoPub"
url = self.plugin.routing.build_url("comments", item_id)
menu_items.append((label, f"Container.Update({url})"))
def _addSimilarContextMenuItem(self, menu_items):
item_id = self.getProperty("id")
title = self.getLabel()
label = "Похожие фильмы"
url = self.plugin.routing.build_url("similar", item_id, title=title)
menu_items.append((label, f"Container.Update({url})"))
def _addSeparatorContextMenuItem(self, menu_items):
# 21 is the maximum number of characters when the horizontal scrolling doesn't appear.
menu_items.append(("─" * 21, ""))
def addPredefinedContextMenuItems(self, items=None):
items = items or ["watched", "watchlist", "bookmarks", "comments", "similar", "separator"]
menu_items = []
for item in items:
getattr(self, f"_add{item.capitalize()}ContextMenuItem")(menu_items)
self.addContextMenuItems(menu_items)
def setProperties(self, **properties):
for prop, value in properties.items():
self.setProperty(prop, str(value))
def setResumeTime(self, resumetime, totaltime=None):
totaltime = float(totaltime or self.getVideoInfoTag().getDuration())
if (
resumetime is not None
and totaltime > 0
and 100 * resumetime / totaltime
<= self.plugin.settings.advanced("video", "playcountminimumpercent")
and resumetime > self.plugin.settings.advanced("video", "ignoresecondsatstart")
or resumetime == 0
):
self.setProperties(resumetime=resumetime, totaltime=totaltime)
def markAdvert(self, has_advert):
if self.plugin.settings.mark_advert == "true" and has_advert:
self.setLabel(f"{self.getLabel()} (!)")
| pt | 0.132542 | 2.238119 | 2 |
static/firespread.py | thabat12/TetraNet | 0 | 13693 | <reponame>thabat12/TetraNet<gh_stars>0
import numpy as np
import imageio
import tensorflow as tf
from keras.models import load_model
from PIL import Image, ImageOps
import numpy as np
from numpy import asarray
from matplotlib import pyplot as plt
from keras.utils import normalize
import os
import random
import azure_get_unet
import random
# for testing purposes only
def img_dir_to_arr(image_dir):
mask = azure_get_unet.get_mask(image_dir)
mask = mask.astype('uint8')
return mask
def generate_firespread_prediction(image_dir):
original_shape = Image.open(image_dir).size
result = img_dir_to_arr(image_dir)
a = []
for i in range(1, 100):
a.append(random.uniform(0, 1))
print(a)
# Cell States
# 0 = Clear, 1 = Fuel, 2 = Fire
prob = 1.0 # probability of a cell being fuel, otherwise it's clear
total_time = 300 # simulation time
terrain_size = [128, 128] # size of the simulation: 10000 cells
result = asarray(result)
result.flags
state = result.copy()
state.setflags(write=1)
print(state[80][1])
# states hold the state of each cell
states = np.zeros((total_time, *terrain_size))
states[0] = state
states[0][1][110] = 2
print(states.shape)
print(states[0][1])
z = np.where(states[0][1] == 1)
print(z)
# set the middle cell on fire!!!
import random
for t in range(1, total_time):
# Make a copy of the original states
states[t] = states[t - 1].copy()
for x in range(1, terrain_size[0] - 1):
for y in range(1, terrain_size[1] - 1):
if states[t - 1, x, y] == 2: # It's on fire
states[t, x, y] = 0 # Put it out and clear it
# If there's fuel surrounding it
# set it on fire!
temp = random.uniform(0, 1)
if states[t - 1, x + 1, y] == 1 and temp > prob:
states[t, x + 1, y] = 2
temp = random.uniform(0, 1)
if states[t - 1, x - 1, y] == 1 and temp > prob:
states[t, x - 1, y] = 2
temp = random.uniform(0, 1)
if states[t - 1, x, y + 1] == 1 and temp > prob:
states[t, x, y + 1] = 2
temp = random.uniform(0, 1)
if states[t - 1, x, y - 1] == 1 and temp > prob:
states[t, x, y - 1] = 2
colored = np.zeros((total_time, *terrain_size, 3), dtype=np.uint8)
# Color
for t in range(states.shape[0]):
for x in range(states[t].shape[0]):
for y in range(states[t].shape[1]):
value = states[t, x, y].copy()
if value == 0:
colored[t, x, y] = [139, 69, 19] # Clear
elif value == 1:
colored[t, x, y] = [0, 255, 0] # Fuel
elif value == 2:
colored[t, x, y] = [255, 0, 0] # Burning
# Crop
cropped = colored[:200, 1:terrain_size[0] - 1, 1:terrain_size[1] - 1]
imageio.mimsave('./video.gif', cropped)
resized_list = []
for arr in cropped:
img = Image.fromarray(arr)
img = img.resize((original_shape[0], original_shape[1]))
img = asarray(img)
resized_list.append(img)
resized_list = np.array(resized_list)
print(resized_list.shape)
imageio.mimsave('./ppea.gif', resized_list)
| import numpy as np
import imageio
import tensorflow as tf
from keras.models import load_model
from PIL import Image, ImageOps
import numpy as np
from numpy import asarray
from matplotlib import pyplot as plt
from keras.utils import normalize
import os
import random
import azure_get_unet
import random
# for testing purposes only
def img_dir_to_arr(image_dir):
mask = azure_get_unet.get_mask(image_dir)
mask = mask.astype('uint8')
return mask
def generate_firespread_prediction(image_dir):
original_shape = Image.open(image_dir).size
result = img_dir_to_arr(image_dir)
a = []
for i in range(1, 100):
a.append(random.uniform(0, 1))
print(a)
# Cell States
# 0 = Clear, 1 = Fuel, 2 = Fire
prob = 1.0 # probability of a cell being fuel, otherwise it's clear
total_time = 300 # simulation time
terrain_size = [128, 128] # size of the simulation: 10000 cells
result = asarray(result)
result.flags
state = result.copy()
state.setflags(write=1)
print(state[80][1])
# states hold the state of each cell
states = np.zeros((total_time, *terrain_size))
states[0] = state
states[0][1][110] = 2
print(states.shape)
print(states[0][1])
z = np.where(states[0][1] == 1)
print(z)
# set the middle cell on fire!!!
import random
for t in range(1, total_time):
# Make a copy of the original states
states[t] = states[t - 1].copy()
for x in range(1, terrain_size[0] - 1):
for y in range(1, terrain_size[1] - 1):
if states[t - 1, x, y] == 2: # It's on fire
states[t, x, y] = 0 # Put it out and clear it
# If there's fuel surrounding it
# set it on fire!
temp = random.uniform(0, 1)
if states[t - 1, x + 1, y] == 1 and temp > prob:
states[t, x + 1, y] = 2
temp = random.uniform(0, 1)
if states[t - 1, x - 1, y] == 1 and temp > prob:
states[t, x - 1, y] = 2
temp = random.uniform(0, 1)
if states[t - 1, x, y + 1] == 1 and temp > prob:
states[t, x, y + 1] = 2
temp = random.uniform(0, 1)
if states[t - 1, x, y - 1] == 1 and temp > prob:
states[t, x, y - 1] = 2
colored = np.zeros((total_time, *terrain_size, 3), dtype=np.uint8)
# Color
for t in range(states.shape[0]):
for x in range(states[t].shape[0]):
for y in range(states[t].shape[1]):
value = states[t, x, y].copy()
if value == 0:
colored[t, x, y] = [139, 69, 19] # Clear
elif value == 1:
colored[t, x, y] = [0, 255, 0] # Fuel
elif value == 2:
colored[t, x, y] = [255, 0, 0] # Burning
# Crop
cropped = colored[:200, 1:terrain_size[0] - 1, 1:terrain_size[1] - 1]
imageio.mimsave('./video.gif', cropped)
resized_list = []
for arr in cropped:
img = Image.fromarray(arr)
img = img.resize((original_shape[0], original_shape[1]))
img = asarray(img)
resized_list.append(img)
resized_list = np.array(resized_list)
print(resized_list.shape)
imageio.mimsave('./ppea.gif', resized_list) | pt | 0.160758 | 2.405678 | 2 |
streamlitfront/tests/dummy_app.py | i2mint/streamlitfront | 0 | 13694 | <filename>streamlitfront/tests/dummy_app.py
from streamlitfront.base import get_pages_specs, get_func_args_specs, BasePageFunc
import streamlit as st
from pydantic import BaseModel
import streamlit_pydantic as sp
def multiple(x: int, word: str) -> str:
return str(x) + word
class Input(BaseModel):
x: int
y: str
def multiple_input(input: Input):
return input.x * input.y
class SimplePageFunc2(BasePageFunc):
def __call__(self, state):
self.prepare_view(state)
# args_specs = get_func_args_specs(self.func)
element = sp.pydantic_input('input', Input)
st.write(element)
# func_inputs = dict(self.sig.defaults, **state['page_state'][self.func])
func_inputs = {'input': element}
st.write(func_inputs)
# for argname, spec in args_specs.items():
# st.write(f"argname:{argname}")
# st.write(f"spec:{spec}")
# element_factory, kwargs = spec["element_factory"]
# func_inputs[argname] = element_factory(**kwargs)
# st.write(f"element_factory:{element_factory}")
# st.write(f"kwargs:{kwargs}")
submit = st.button('Submit')
if submit:
st.write(self.func(func_inputs['input']))
# state['page_state'][self.func].clear()
DFLT_PAGE_FACTORY = SimplePageFunc2
if __name__ == '__main__':
app = get_pages_specs([multiple_input], page_factory=DFLT_PAGE_FACTORY)
app['Multiple Input'](None)
| <filename>streamlitfront/tests/dummy_app.py
from streamlitfront.base import get_pages_specs, get_func_args_specs, BasePageFunc
import streamlit as st
from pydantic import BaseModel
import streamlit_pydantic as sp
def multiple(x: int, word: str) -> str:
return str(x) + word
class Input(BaseModel):
x: int
y: str
def multiple_input(input: Input):
return input.x * input.y
class SimplePageFunc2(BasePageFunc):
def __call__(self, state):
self.prepare_view(state)
# args_specs = get_func_args_specs(self.func)
element = sp.pydantic_input('input', Input)
st.write(element)
# func_inputs = dict(self.sig.defaults, **state['page_state'][self.func])
func_inputs = {'input': element}
st.write(func_inputs)
# for argname, spec in args_specs.items():
# st.write(f"argname:{argname}")
# st.write(f"spec:{spec}")
# element_factory, kwargs = spec["element_factory"]
# func_inputs[argname] = element_factory(**kwargs)
# st.write(f"element_factory:{element_factory}")
# st.write(f"kwargs:{kwargs}")
submit = st.button('Submit')
if submit:
st.write(self.func(func_inputs['input']))
# state['page_state'][self.func].clear()
DFLT_PAGE_FACTORY = SimplePageFunc2
if __name__ == '__main__':
app = get_pages_specs([multiple_input], page_factory=DFLT_PAGE_FACTORY)
app['Multiple Input'](None)
| pt | 0.122962 | 2.592676 | 3 |
app/build.py | dhost-project/build-microservice | 0 | 13695 | from flask_restful import Resource, reqparse
parser = reqparse.RequestParser()
parser.add_argument('command', required=True)
parser.add_argument('docker', required=True)
class Build(Resource):
def get(self):
return {'status': 'building'}
def post(self):
args = parser.parse_args()
print(args)
return {'status': 'started'}
| from flask_restful import Resource, reqparse
parser = reqparse.RequestParser()
parser.add_argument('command', required=True)
parser.add_argument('docker', required=True)
class Build(Resource):
def get(self):
return {'status': 'building'}
def post(self):
args = parser.parse_args()
print(args)
return {'status': 'started'}
| none | 1 | 2.477288 | 2 |
src/unicon/plugins/windows/__init__.py | nielsvanhooy/unicon.plugins | 18 | 13696 | <reponame>nielsvanhooy/unicon.plugins
__copyright__ = "# Copyright (c) 2018 by cisco Systems, Inc. All rights reserved."
__author__ = "dwapstra"
from unicon.plugins.generic import GenericSingleRpConnection, service_implementation as svc
from unicon.plugins.generic.connection_provider import GenericSingleRpConnectionProvider
from unicon.plugins.generic import ServiceList, service_implementation as svc
from . import service_implementation as windows_svc
from .statemachine import WindowsStateMachine
from .settings import WindowsSettings
class WindowsConnectionProvider(GenericSingleRpConnectionProvider):
"""
Connection provider class for windows connections.
"""
def init_handle(self):
pass
class WindowsServiceList(ServiceList):
""" windows services. """
def __init__(self):
super().__init__()
self.execute = windows_svc.Execute
class WindowsConnection(GenericSingleRpConnection):
"""
Connection class for windows connections.
"""
os = 'windows'
platform = None
chassis_type = 'single_rp'
state_machine_class = WindowsStateMachine
connection_provider_class = WindowsConnectionProvider
subcommand_list = WindowsServiceList
settings = WindowsSettings()
| __copyright__ = "# Copyright (c) 2018 by cisco Systems, Inc. All rights reserved."
__author__ = "dwapstra"
from unicon.plugins.generic import GenericSingleRpConnection, service_implementation as svc
from unicon.plugins.generic.connection_provider import GenericSingleRpConnectionProvider
from unicon.plugins.generic import ServiceList, service_implementation as svc
from . import service_implementation as windows_svc
from .statemachine import WindowsStateMachine
from .settings import WindowsSettings
class WindowsConnectionProvider(GenericSingleRpConnectionProvider):
"""
Connection provider class for windows connections.
"""
def init_handle(self):
pass
class WindowsServiceList(ServiceList):
""" windows services. """
def __init__(self):
super().__init__()
self.execute = windows_svc.Execute
class WindowsConnection(GenericSingleRpConnection):
"""
Connection class for windows connections.
"""
os = 'windows'
platform = None
chassis_type = 'single_rp'
state_machine_class = WindowsStateMachine
connection_provider_class = WindowsConnectionProvider
subcommand_list = WindowsServiceList
settings = WindowsSettings() | pt | 0.090202 | 1.936617 | 2 |
sera/commands/symlink.py | bretth/sera | 0 | 13697 | from pathlib import Path
from shutil import which
from subprocess import run, PIPE
import click
from .main import main, lprint
@main.command()
@click.pass_context
@click.argument('watcher')
def symlink(ctx, watcher):
"""Locally install a symlink to sera"""
if ctx.parent.params['watcher']:
click.echo("This command runs locally")
raise click.Abort
source = Path(which('sera'))
target = source.parent / watcher
if ctx.obj['verbosity']:
click.echo('Installing symlink at %s' % str(target))
out = run(
['ln', '-s', str(source), str(target)],
stdout=PIPE,
stderr=PIPE,
universal_newlines=True)
return lprint(ctx, out) | from pathlib import Path
from shutil import which
from subprocess import run, PIPE
import click
from .main import main, lprint
@main.command()
@click.pass_context
@click.argument('watcher')
def symlink(ctx, watcher):
"""Locally install a symlink to sera"""
if ctx.parent.params['watcher']:
click.echo("This command runs locally")
raise click.Abort
source = Path(which('sera'))
target = source.parent / watcher
if ctx.obj['verbosity']:
click.echo('Installing symlink at %s' % str(target))
out = run(
['ln', '-s', str(source), str(target)],
stdout=PIPE,
stderr=PIPE,
universal_newlines=True)
return lprint(ctx, out) | es | 0.287199 | 2.388567 | 2 |
self-attention.py | dhkim2810/MaskedDatasetCondensation | 0 | 13698 | <reponame>dhkim2810/MaskedDatasetCondensation<gh_stars>0
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.models import resnet18
from torchvision.datasets import CIFAR10
from tqdm import tqdm
from torchvision.utils import save_image, make_grid
from matplotlib import pyplot as plt
from matplotlib.colors import hsv_to_rgb
from matplotlib.image import BboxImage
from matplotlib.transforms import Bbox, TransformedBbox
import numpy as np
from IPython import display
import requests
from io import BytesIO
from PIL import Image
from PIL import Image, ImageSequence
from IPython.display import HTML
import warnings
from matplotlib import rc
import gc
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
gc.enable()
plt.ioff()
def set_model():
num_classes = 10
resnet = resnet18(pretrained=True)
resnet.conv1 = nn.Conv2d(3,64,3,stride=1,padding=1)
resnet_ = list(resnet.children())[:-2]
resnet_[3] = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
classifier = nn.Conv2d(512,num_classes,1)
torch.nn.init.kaiming_normal_(classifier.weight)
resnet_.append(classifier)
resnet_.append(nn.Upsample(size=32, mode='bilinear', align_corners=False))
tiny_resnet = nn.Sequential(*resnet_)
return tiny_resnet
def set_data():
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=8),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = CIFAR10(root='/root/dataset/CIFAR', train=True, download=True, transform=transform_train)
train_iter = DataLoader(trainset, batch_size=128, shuffle=True, num_workers=16, pin_memory=True, drop_last=True)
testset = CIFAR10(root='/root/dataset/CIFAR', train=False, download=True, transform=transform_test)
test_iter = DataLoader(testset, batch_size=100, shuffle=False, num_workers=16, pin_memory=True)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
return train_iter, test_iter, classes
def attention(x):
return torch.sigmoid(torch.logsumexp(x,1, keepdim=True))
def main():
trainloader, testloader, class_name = set_data()
model = nn.DataParallel(set_model()).cuda()
criterion = nn.BCEWithLogitsLoss()
optimizer = torch.optim.SGD(model.parameters(), momentum=0.9, weight_decay=1e-4)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,78,eta_min=0.001)
num_epochs = 50
for epoch in tqdm(range(num_epochs)):
epoch_loss = 0.0
acc = 0.0
var = 0.0
model.train()
train_pbar = trainloader
for i, (x, _label) in enumerate(train_pbar):
x = x.cuda()
_label = _label.cuda()
label = F.one_hot(_label).float()
seg_out = model(x)
attn = attention(seg_out)
# Smooth Max Aggregation
logit = torch.log(torch.exp(seg_out*0.5).mean((-2,-1)))*2
loss = criterion(logit, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_scheduler.step()
epoch_loss += loss.item()
acc += (logit.argmax(-1)==_label).sum()
return 0
if __name__ == "__main__":
main() | import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.models import resnet18
from torchvision.datasets import CIFAR10
from tqdm import tqdm
from torchvision.utils import save_image, make_grid
from matplotlib import pyplot as plt
from matplotlib.colors import hsv_to_rgb
from matplotlib.image import BboxImage
from matplotlib.transforms import Bbox, TransformedBbox
import numpy as np
from IPython import display
import requests
from io import BytesIO
from PIL import Image
from PIL import Image, ImageSequence
from IPython.display import HTML
import warnings
from matplotlib import rc
import gc
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
gc.enable()
plt.ioff()
def set_model():
num_classes = 10
resnet = resnet18(pretrained=True)
resnet.conv1 = nn.Conv2d(3,64,3,stride=1,padding=1)
resnet_ = list(resnet.children())[:-2]
resnet_[3] = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
classifier = nn.Conv2d(512,num_classes,1)
torch.nn.init.kaiming_normal_(classifier.weight)
resnet_.append(classifier)
resnet_.append(nn.Upsample(size=32, mode='bilinear', align_corners=False))
tiny_resnet = nn.Sequential(*resnet_)
return tiny_resnet
def set_data():
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=8),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = CIFAR10(root='/root/dataset/CIFAR', train=True, download=True, transform=transform_train)
train_iter = DataLoader(trainset, batch_size=128, shuffle=True, num_workers=16, pin_memory=True, drop_last=True)
testset = CIFAR10(root='/root/dataset/CIFAR', train=False, download=True, transform=transform_test)
test_iter = DataLoader(testset, batch_size=100, shuffle=False, num_workers=16, pin_memory=True)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
return train_iter, test_iter, classes
def attention(x):
return torch.sigmoid(torch.logsumexp(x,1, keepdim=True))
def main():
trainloader, testloader, class_name = set_data()
model = nn.DataParallel(set_model()).cuda()
criterion = nn.BCEWithLogitsLoss()
optimizer = torch.optim.SGD(model.parameters(), momentum=0.9, weight_decay=1e-4)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,78,eta_min=0.001)
num_epochs = 50
for epoch in tqdm(range(num_epochs)):
epoch_loss = 0.0
acc = 0.0
var = 0.0
model.train()
train_pbar = trainloader
for i, (x, _label) in enumerate(train_pbar):
x = x.cuda()
_label = _label.cuda()
label = F.one_hot(_label).float()
seg_out = model(x)
attn = attention(seg_out)
# Smooth Max Aggregation
logit = torch.log(torch.exp(seg_out*0.5).mean((-2,-1)))*2
loss = criterion(logit, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_scheduler.step()
epoch_loss += loss.item()
acc += (logit.argmax(-1)==_label).sum()
return 0
if __name__ == "__main__":
main() | pt | 0.348731 | 2.238706 | 2 |
src/libraries/maimai_plate.py | Blitz-Raynor/Kiba | 4 | 13699 | from typing import Optional, Dict, List
import aiohttp
plate_to_version = {
'真1': 'maimai',
'真2': 'maimai PLUS',
'超': 'maimai GreeN',
'檄': 'maimai GreeN PLUS',
'橙': 'maimai ORANGE',
'暁': 'maimai ORANGE PLUS',
'晓': 'maimai ORANGE PLUS',
'桃': 'maimai PiNK',
'櫻': 'maimai PiNK PLUS',
'樱': 'maimai PiNK PLUS',
'紫': 'maimai MURASAKi',
'菫': 'maimai MURASAKi PLUS',
'堇': 'maimai MURASAKi PLUS',
'白': 'maimai MiLK',
'雪': 'MiLK PLUS',
'輝': 'maimai FiNALE',
'辉': 'maimai FiNALE',
'熊': 'maimai でらっくす',
'華': 'maimai でらっくす PLUS',
'华': 'maimai でらっくす PLUS',
'爽': 'maimai でらっくす Splash'
}
async def get_player_plate(payload: Dict):
async with aiohttp.request("POST", "https://www.diving-fish.com/api/maimaidxprober/query/plate", json=payload) as resp:
if resp.status == 400:
return None, 400
elif resp.status == 403:
return None, 403
plate_data = await resp.json()
return plate_data, 0 | from typing import Optional, Dict, List
import aiohttp
plate_to_version = {
'真1': 'maimai',
'真2': 'maimai PLUS',
'超': 'maimai GreeN',
'檄': 'maimai GreeN PLUS',
'橙': 'maimai ORANGE',
'暁': 'maimai ORANGE PLUS',
'晓': 'maimai ORANGE PLUS',
'桃': 'maimai PiNK',
'櫻': 'maimai PiNK PLUS',
'樱': 'maimai PiNK PLUS',
'紫': 'maimai MURASAKi',
'菫': 'maimai MURASAKi PLUS',
'堇': 'maimai MURASAKi PLUS',
'白': 'maimai MiLK',
'雪': 'MiLK PLUS',
'輝': 'maimai FiNALE',
'辉': 'maimai FiNALE',
'熊': 'maimai でらっくす',
'華': 'maimai でらっくす PLUS',
'华': 'maimai でらっくす PLUS',
'爽': 'maimai でらっくす Splash'
}
async def get_player_plate(payload: Dict):
async with aiohttp.request("POST", "https://www.diving-fish.com/api/maimaidxprober/query/plate", json=payload) as resp:
if resp.status == 400:
return None, 400
elif resp.status == 403:
return None, 403
plate_data = await resp.json()
return plate_data, 0 | none | 1 | 2.654469 | 3 |