max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/test_edgeql_enums.py | sfermigier/edgedb | 7,302 | 12791331 | <gh_stars>1000+
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os.path
import edgedb
from edb.testbase import server as tb
class TestEdgeQLEnums(tb.QueryTestCase):
SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas',
'enums.esdl')
async def test_edgeql_enums_cast_01(self):
await self.assert_query_result(
r'''
SELECT <color_enum_t>{'RED', 'GREEN', 'BLUE'};
''',
{'RED', 'GREEN', 'BLUE'},
)
async def test_edgeql_enums_cast_02(self):
with self.assertRaisesRegex(
edgedb.InvalidValueError,
r'invalid input value for enum .+color_enum_t.+YELLOW'):
await self.con.execute(r'''
SELECT <color_enum_t>'YELLOW';
''')
async def test_edgeql_enums_cast_03(self):
with self.assertRaisesRegex(
edgedb.InvalidValueError,
r'invalid input value for enum .+color_enum_t.+red'):
await self.con.execute(r'''
SELECT <color_enum_t>'red';
''')
async def test_edgeql_enums_cast_04(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"operator '\+\+' cannot be applied to operands of type "
r"'std::str' and 'default::color_enum_t'"):
await self.con.execute(r'''
INSERT Foo {
color := 'BLUE'
};
SELECT 'The test color is: ' ++ Foo.color;
''')
async def test_edgeql_enums_cast_05(self):
await self.con.execute(
r'''
INSERT Foo {
color := 'BLUE'
};
''')
await self.assert_query_result(
r'''
SELECT 'The test color is: ' ++ <str>Foo.color;
''',
['The test color is: BLUE'],
)
async def test_edgeql_enums_pathsyntax_01(self):
with self.assertRaisesRegex(
edgedb.QueryError,
"enum path expression lacks an enum member name"):
async with self._run_and_rollback():
await self.con.execute('SELECT color_enum_t')
with self.assertRaisesRegex(
edgedb.QueryError,
"enum path expression lacks an enum member name"):
async with self._run_and_rollback():
await self.con.execute(
'WITH e := color_enum_t SELECT e.RED'
)
with self.assertRaisesRegex(
edgedb.QueryError,
"unexpected reference to link property 'RED'"):
async with self._run_and_rollback():
await self.con.execute(
'SELECT color_enum_t@RED'
)
with self.assertRaisesRegex(
edgedb.QueryError,
"enum types do not support backlink"):
async with self._run_and_rollback():
await self.con.execute(
'SELECT color_enum_t.<RED'
)
with self.assertRaisesRegex(
edgedb.QueryError,
"an enum member name must follow enum type name in the path"):
async with self._run_and_rollback():
await self.con.execute(
'SELECT color_enum_t[IS color_enum_t].RED'
)
with self.assertRaisesRegex(
edgedb.QueryError,
"invalid property reference on a primitive type expression"):
async with self._run_and_rollback():
await self.con.execute(
'SELECT color_enum_t.RED.GREEN'
)
with self.assertRaisesRegex(
edgedb.QueryError,
"invalid property reference on a primitive type expression"):
async with self._run_and_rollback():
await self.con.execute(
'WITH x := color_enum_t.RED SELECT x.GREEN'
)
with self.assertRaisesRegex(
edgedb.QueryError,
"enum has no member called 'RAD'",
_hint="did you mean 'RED'?"):
async with self._run_and_rollback():
await self.con.execute(
'SELECT color_enum_t.RAD'
)
async def test_edgeql_enums_pathsyntax_02(self):
await self.assert_query_result(
r'''
SELECT color_enum_t.GREEN;
''',
{'GREEN'},
)
await self.assert_query_result(
r'''
SELECT default::color_enum_t.BLUE;
''',
{'BLUE'},
)
await self.assert_query_result(
r'''
WITH x := default::color_enum_t.RED SELECT x;
''',
{'RED'},
)
async def test_edgeql_enums_assignment_01(self):
# testing the INSERT assignment cast
await self.con.execute(
r'''
INSERT Foo {
color := 'RED'
};
''')
await self.assert_query_result(
r'''
SELECT Foo {
color
};
''',
[{
'color': 'RED',
}],
)
async def test_edgeql_enums_assignment_02(self):
await self.con.execute(
r'''
INSERT Foo {
color := 'RED'
};
''')
# testing the UPDATE assignment cast
await self.con.execute(
r'''
UPDATE Foo
SET {
color := 'GREEN'
};
''')
await self.assert_query_result(
r'''
SELECT Foo {
color
};
''',
[{
'color': 'GREEN',
}],
)
async def test_edgeql_enums_assignment_03(self):
# testing the INSERT assignment cast
await self.con.execute(
r'''
INSERT Bar;
''')
await self.assert_query_result(
r'''
SELECT Bar {
color
};
''',
[{
'color': 'RED',
}],
)
async def test_edgeql_enums_assignment_04(self):
await self.con.execute(
r'''
INSERT Bar;
''')
# testing the UPDATE assignment cast
await self.con.execute(
r'''
UPDATE Bar
SET {
color := 'GREEN'
};
''')
await self.assert_query_result(
r'''
SELECT Bar {
color
};
''',
[{
'color': 'GREEN',
}],
)
async def test_edgeql_enums_json_cast_01(self):
self.assertEqual(
await self.con.query(
"SELECT <json><color_enum_t>'RED'"
),
['"RED"'])
await self.assert_query_result(
"SELECT <color_enum_t><json>'RED'",
['RED'])
await self.assert_query_result(
"SELECT <color_enum_t>'RED'",
['RED'])
async def test_edgeql_enums_json_cast_02(self):
with self.assertRaisesRegex(
edgedb.InvalidValueError,
r'invalid input value for enum .+color_enum_t.+: "BANANA"'):
await self.con.execute("SELECT <color_enum_t><json>'BANANA'")
async def test_edgeql_enums_json_cast_03(self):
with self.assertRaisesRegex(
edgedb.InvalidValueError,
r'expected json string or null; got json number'):
await self.con.execute("SELECT <color_enum_t><json>12")
|
tests/integration/test_configinventory.py | vincentbernat/lldpd | 312 | 12791347 | <filename>tests/integration/test_configinventory.py
import os
import pytest
import platform
import time
import shlex
@pytest.mark.skipif("'LLDP-MED' not in config.lldpd.features",
reason="LLDP-MED not supported")
class TestConfigInventory(object):
def test_configinventory(self, lldpd1, lldpd, lldpcli, namespaces,
replace_file):
with namespaces(2):
if os.path.isdir("/sys/class/dmi/id"):
# /sys/class/dmi/id/*
for what, value in dict(product_version="1.14",
bios_version="1.10",
product_serial="45872512",
sys_vendor="Spectacular",
product_name="Workstation",
chassis_asset_tag="487122").items():
replace_file("/sys/class/dmi/id/{}".format(what),
value)
lldpd("-M", "1")
def test_default_inventory(namespaces, lldpcli):
with namespaces(1):
if os.path.isdir("/sys/class/dmi/id"):
out = lldpcli("-f", "keyvalue", "show", "neighbors", "details")
assert out['lldp.eth0.chassis.name'] == 'ns-2.example.com'
assert out['lldp.eth0.lldp-med.inventory.hardware'] == '1.14'
assert out['lldp.eth0.lldp-med.inventory.firmware'] == '1.10'
assert out['lldp.eth0.lldp-med.inventory.serial'] == '45872512'
assert out['lldp.eth0.lldp-med.inventory.manufacturer'] == \
'Spectacular'
assert out['lldp.eth0.lldp-med.inventory.model'] == 'Workstation'
assert out['lldp.eth0.lldp-med.inventory.asset'] == '487122'
assert out['lldp.eth0.lldp-med.inventory.software'] == \
platform.release()
else:
assert 'lldp.eth0.lldp-med.inventory.hardware' not in out.items()
assert 'lldp.eth0.lldp-med.inventory.firmware' not in out.items()
assert 'lldp.eth0.lldp-med.inventory.serial' not in out.items()
assert 'lldp.eth0.lldp-med.inventory.manufacturer' not in out.items()
assert 'lldp.eth0.lldp-med.inventory.model' not in out.items()
assert 'lldp.eth0.lldp-med.inventory.asset' not in out.items()
assert 'lldp.eth0.lldp-med.inventory.software' not in out.items()
test_default_inventory(namespaces, lldpcli)
custom_values = [
('hardware-revision', 'hardware', 'SQRT2_1.41421356237309504880'),
('software-revision', 'software', 'E_2.7182818284590452354'),
('firmware-revision', 'firmware', 'PI_3.14159265358979323846'),
('serial', 'serial', 'FIBO_112358'),
('manufacturer', 'manufacturer', 'Cybertron'),
('model', 'model', 'OptimusPrime'),
('asset', 'asset', 'SQRT3_1.732050807568877')
]
with namespaces(2):
for what, pfx, value in custom_values:
result = lldpcli(
*shlex.split("configure inventory {} {}".format(what, value)))
assert result.returncode == 0
result = lldpcli("resume")
assert result.returncode == 0
result = lldpcli("update")
assert result.returncode == 0
time.sleep(3)
with namespaces(1):
out = lldpcli("-f", "keyvalue", "show", "neighbors", "details")
for what, pfx, value in custom_values:
key_to_find = "lldp.eth0.lldp-med.inventory.{}".format(pfx)
assert out[key_to_find] == value
with namespaces(2):
for what, pfx, value in custom_values:
result = lldpcli(
*shlex.split("unconfigure inventory {}".format(what)))
assert result.returncode == 0
result = lldpcli("resume")
assert result.returncode == 0
result = lldpcli("update")
assert result.returncode == 0
test_default_inventory(namespaces, lldpcli)
|
webservice/server/server/summ_eval/server/cli/__init__.py | mymusise/emnlp19-moverscore | 141 | 12791390 | def main():
from summ_eval.server import EvalServer
from summ_eval.server.helper import get_run_args
args = get_run_args()
server = EvalServer(args)
server.start()
server.join() |
stats/constants.py | mpope9/nba-sql | 113 | 12791395 | <gh_stars>100-1000
"""
Constants used in the application.
"""
"""
List of seasons.
"""
season_list = [
'1996-97',
'1997-98',
'1998-99',
'1999-00',
'2000-01',
'2001-02',
'2002-03',
'2003-04',
'2004-05',
'2005-06',
'2006-07',
'2007-08',
'2008-09',
'2009-10',
'2010-11',
'2011-12',
'2012-13',
'2013-14',
'2014-15',
'2015-16',
'2016-17',
'2017-18',
'2018-19',
'2019-20',
'2020-21',
'2021-22'
]
"""
Headers.
"""
headers = {
'Connection': 'keep-alive',
'Accept': 'application/json, text/plain, */*',
'x-nba-stats-token': 'true',
'User-Agent': (
#'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) '
#'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130'
#'Safari/537.36'
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'
),
'x-nba-stats-origin': 'stats',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'cors',
'Referer': 'https://stats.nba.com/',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
}
"""
Team IDs. (Thank you nba-api).
"""
team_ids = [
1610612737, # 'ATL'
1610612738, # 'BOS'
1610612739, # 'CLE'
1610612740, # 'NOP'
1610612741, # 'CHI'
1610612742, # 'DAL'
1610612743, # 'DEN'
1610612744, # 'GSW'
1610612745, # 'HOU'
1610612746, # 'LAC'
1610612747, # 'LAL'
1610612748, # 'MIA'
1610612749, # 'MIL'
1610612750, # 'MIN'
1610612751, # 'BKN'
1610612752, # 'NYK'
1610612753, # 'ORL'
1610612754, # 'IND'
1610612755, # 'PHI'
1610612756, # 'PHX'
1610612757, # 'POR'
1610612758, # 'SAC'
1610612759, # 'SAS'
1610612760, # 'OKC'
1610612761, # 'TOR'
1610612762, # 'UTA'
1610612763, # 'MEM'
1610612764, # 'WAS'
1610612765, # 'DET'
1610612766, # 'CHA'
]
"""
Mapping from team abbrev to id.
"""
team_abbrev_mapping = {
'ATL': 1610612737,
'BOS': 1610612738,
'CLE': 1610612739,
'NOP': 1610612740,
'NOK': 1610612740, # Old name.
'NOH': 1610612740, # Old name.
'CHI': 1610612741,
'DAL': 1610612742,
'DEN': 1610612743,
'GSW': 1610612744,
'HOU': 1610612745,
'LAC': 1610612746,
'LAL': 1610612747,
'MIA': 1610612748,
'MIL': 1610612749,
'MIN': 1610612750,
'BKN': 1610612751,
'NJN': 1610612751, # Old name.
'NYK': 1610612752,
'ORL': 1610612753,
'IND': 1610612754,
'PHI': 1610612755,
'PHX': 1610612756,
'POR': 1610612757,
'SAC': 1610612758,
'SAS': 1610612759,
'OKC': 1610612760,
'SEA': 1610612760,
'TOR': 1610612761,
'UTA': 1610612762,
'VAN': 1610612763, # Old name.
'MEM': 1610612763,
'WAS': 1610612764,
'DET': 1610612765,
'CHA': 1610612766,
'CHH': 1610612766, # Old name.
}
"""
Play-by-play data has an EventMsgType field. This is an enum. There
is also the EventMsgActionField, which is a complex enum of
(EventMsgType, SubType).
We're going to make a lookup table of enum to value, then a lookup
table for the (EventMsgType, EventMsgActionType) pair.
"""
event_message_types = [
{'id': 1, 'string': 'FIELD_GOAL_MADE'},
{'id': 2, 'string': 'FIELD_GOAL_MISSED'},
{'id': 3, 'string': 'FREE_THROW'},
{'id': 4, 'string': 'REBOUND'},
{'id': 5, 'string': 'TURNOVER'},
{'id': 6, 'string': 'FOUL'},
{'id': 7, 'string': 'VIOLATION'},
{'id': 8, 'string': 'SUBSTITUTION'},
{'id': 9, 'string': 'TIMEOUT'},
{'id': 10, 'string': 'JUMP_BALL'},
{'id': 11, 'string': 'EJECTION'},
{'id': 12, 'string': 'PERIOD_BEGIN'},
{'id': 13, 'string': 'PERIOD_END'},
{'id': 18, 'string': 'UNKNOWN'}
]
|
descarteslabs/common/graft/interpreter/__init__.py | descarteslabs/descarteslabs-python | 167 | 12791407 | <gh_stars>100-1000
from .interpreter import interpret
from . import exceptions
from .scopedchainmap import ScopedChainMap
__all__ = ["interpret", "exceptions", "ScopedChainMap"]
|
skyline/functions/database/queries/related_to_metric_groups.py | datastreaming/skyline-1 | 396 | 12791408 | <reponame>datastreaming/skyline-1
"""
Get anomalies for a metric id
"""
import logging
import traceback
from ast import literal_eval
from sqlalchemy.sql import select
from database import get_engine, engine_disposal, metric_group_table_meta
from functions.metrics.get_base_name_from_metric_id import get_base_name_from_metric_id
def related_to_metric_groups(current_skyline_app, base_name, metric_id):
"""
Returns a dict of all the metric_groups that a metric is part of.
"""
current_skyline_app_logger = current_skyline_app + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
related_to_metric_groups_dict = {}
related_to_metric_groups_dict['metric'] = base_name
related_to_metric_groups_dict['metric_id'] = metric_id
related_to_metric_groups_dict['related_to_metrics'] = {}
try:
engine, fail_msg, trace = get_engine(current_skyline_app)
if fail_msg != 'got MySQL engine':
current_logger.error('error :: related_to_metric_groups :: could not get a MySQL engine fail_msg - %s' % str(fail_msg))
if trace != 'none':
current_logger.error('error :: related_to_metric_groups :: could not get a MySQL engine trace - %s' % str(trace))
except Exception as err:
current_logger.error(traceback.format_exc())
current_logger.error('error :: related_to_metric_groups :: could not get a MySQL engine - %s' % str(err))
if engine:
try:
metric_group_table, fail_msg, trace = metric_group_table_meta(current_skyline_app, engine)
if fail_msg != 'metric_group meta reflected OK':
current_logger.error('error :: related_to_metric_groups :: could not get metric_group_table_meta fail_msg - %s' % str(fail_msg))
if trace != 'none':
current_logger.error('error :: related_to_metric_groups :: could not get metric_group_table_meta trace - %s' % str(trace))
except Exception as err:
current_logger.error(traceback.format_exc())
current_logger.error('error :: related_to_metric_groups :: metric_group_table_meta - %s' % str(err))
try:
connection = engine.connect()
if metric_id:
stmt = select([metric_group_table]).where(metric_group_table.c.related_metric_id == metric_id).order_by(metric_group_table.c.avg_coefficient.desc())
else:
stmt = select([metric_group_table])
results = connection.execute(stmt)
for row in results:
group_metric_id = row['metric_id']
group_base_name = None
try:
group_base_name = get_base_name_from_metric_id(current_skyline_app, group_metric_id)
except Exception as err:
current_logger.error('error :: related_to_metric_groups :: base_name_from_metric_id failed to determine base_name from metric_id: %s - %s' % (
str(group_metric_id), str(err)))
if group_base_name:
related_to_metric_groups_dict['related_to_metrics'][group_base_name] = dict(row)
connection.close()
except Exception as err:
current_logger.error(traceback.format_exc())
current_logger.error('error :: related_to_metric_groups :: failed to build metric_groups dict - %s' % str(err))
if engine:
engine_disposal(current_skyline_app, engine)
for related_metric in list(related_to_metric_groups_dict['related_to_metrics'].keys()):
for key in list(related_to_metric_groups_dict['related_to_metrics'][related_metric].keys()):
if 'decimal.Decimal' in str(type(related_to_metric_groups_dict['related_to_metrics'][related_metric][key])):
related_to_metric_groups_dict['related_to_metrics'][related_metric][key] = float(related_to_metric_groups_dict['related_to_metrics'][related_metric][key])
if 'datetime.datetime' in str(type(related_to_metric_groups_dict['related_to_metrics'][related_metric][key])):
related_to_metric_groups_dict['related_to_metrics'][related_metric][key] = str(related_to_metric_groups_dict['related_to_metrics'][related_metric][key])
if key == 'shifted_counts':
try:
shifted_counts_str = related_to_metric_groups_dict['related_to_metrics'][related_metric][key].decode('utf-8')
shifted_counts = literal_eval(shifted_counts_str)
except AttributeError:
shifted_counts = related_to_metric_groups_dict['related_to_metrics'][related_metric][key]
related_to_metric_groups_dict['related_to_metrics'][related_metric][key] = shifted_counts
# Remap the metric_id and related_metric_id for clarity
related_to_metric_groups_dict['related_to_metrics'][related_metric]['related_to_metric_id'] = related_to_metric_groups_dict['related_to_metrics'][related_metric]['metric_id']
related_to_metric_groups_dict['related_to_metrics'][related_metric]['metric_id'] = metric_id
del related_to_metric_groups_dict['related_to_metrics'][related_metric]['related_metric_id']
return related_to_metric_groups_dict
|
html_parsing/get_game_genres/parsers/squarefaction_ru.py | DazEB2/SimplePyScripts | 117 | 12791421 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from typing import List
from bs4 import BeautifulSoup
from base_parser import BaseParser
class SquarefactionRu_Parser(BaseParser):
def _parse(self) -> List[str]:
url = f'http://squarefaction.ru/main/search/games?q={self.game_name}'
rs = self.send_get(url)
root = BeautifulSoup(rs.content, 'html.parser')
# http://squarefaction.ru/main/search/games?q=dead+space
if '/main/search/games' in rs.url:
self.log_info(f'Parsing of game list')
for game_block in root.select('#games > .entry'):
title = self.get_norm_text(game_block.select_one('.name'))
if not self.is_found_game(title):
continue
# <div class="infos">TPS,Survival Horror,Action</div>
genres = self.get_norm_text(game_block.select_one('.infos')).split(',')
# Сойдет первый, совпадающий по имени, вариант
return genres
# http://squarefaction.ru/game/dead-space
else:
self.log_info(f'Parsing of game page')
game_block = root.select_one('#page-info')
if game_block:
title = self.get_norm_text(game_block.select_one('#title'))
if not self.is_found_game(title):
self.log_warn(f'Not match game title {title!r}')
# <td class="nowraps-links">
# <a href="/games?genre=tps">TPS</a>,
# <a href="/games?genre=survival-horror">Survival Horror</a>,
# <a href="/games?genre=action">Action</a>
# </td>
genres = [
self.get_norm_text(a) for a in game_block.select('a') if '?genre=' in a['href']
]
# Сойдет первый, совпадающий по имени, вариант
return genres
self.log_info(f'Not found game {self.game_name!r}')
return []
def get_game_genres(game_name: str, *args, **kwargs) -> List[str]:
return SquarefactionRu_Parser(*args, **kwargs).get_game_genres(game_name)
if __name__ == '__main__':
from common import _common_test
_common_test(get_game_genres)
# Search 'Hellgate: London'...
# Genres: ['Action RPG']
#
# Search 'The Incredible Adventures of Van Helsing'...
# Genres: ['Action RPG']
#
# Search 'Dark Souls: Prepare to Die Edition'...
# Genres: []
#
# Search 'Twin Sector'...
# Genres: []
#
# Search 'Call of Cthulhu: Dark Corners of the Earth'...
# Genres: ['Survival Horror']
|
test/test_algos/test_opt_algorithm/test_racos/test_racos.py | IcarusWizard/ZOOpt | 403 | 12791427 | <filename>test/test_algos/test_opt_algorithm/test_racos/test_racos.py
from zoopt.algos.opt_algorithms.racos.racos_common import RacosCommon
from zoopt.algos.opt_algorithms.racos.sracos import SRacos
from zoopt import Solution, Objective, Dimension, Parameter, Opt, ExpOpt, ValueType, Dimension2
import numpy as np
def ackley(solution):
"""
Ackley function for continuous optimization
"""
x = solution.get_x()
bias = 0.2
ave_seq = sum([(i - bias) * (i - bias) for i in x]) / len(x)
ave_cos = sum([np.cos(2.0 * np.pi * (i - bias)) for i in x]) / len(x)
value = -20 * np.exp(-0.2 * np.sqrt(ave_seq)) - np.exp(ave_cos) + 20.0 + np.e
return value
def sphere_discrete_order(solution):
"""
Sphere function for integer continuous optimization
"""
x = solution.get_x()
value = sum([(i-2)*(i-2) for i in x])
return value
class SetCover:
"""
set cover problem for discrete optimization
this problem has some extra initialization tasks, thus we define this problem as a class
"""
def __init__(self):
self.__weight = [0.8356, 0.5495, 0.4444, 0.7269, 0.9960, 0.6633, 0.5062, 0.8429, 0.1293, 0.7355,
0.7979, 0.2814, 0.7962, 0.1754, 0.0267, 0.9862, 0.1786, 0.5884, 0.6289, 0.3008]
self.__subset = []
self.__subset.append([0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0])
self.__subset.append([0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0])
self.__subset.append([1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0])
self.__subset.append([0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0])
self.__subset.append([1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1])
self.__subset.append([0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0])
self.__subset.append([0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0])
self.__subset.append([0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0])
self.__subset.append([0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0])
self.__subset.append([0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1])
self.__subset.append([0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0])
self.__subset.append([0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1])
self.__subset.append([1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1])
self.__subset.append([1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1])
self.__subset.append([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])
self.__subset.append([1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0])
self.__subset.append([1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1])
self.__subset.append([0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1])
self.__subset.append([0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0])
self.__subset.append([0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1])
def fx(self, solution):
"""
Objective function.
:param solution: a Solution object
:return: the value of f(x)
"""
x = solution.get_x()
allweight = 0
countw = 0
for i in range(len(self.__weight)):
allweight += self.__weight[i]
dims = []
for i in range(len(self.__subset[0])):
dims.append(False)
for i in range(len(self.__subset)):
if x[i] == 1:
countw += self.__weight[i]
for j in range(len(self.__subset[i])):
if self.__subset[i][j] == 1:
dims[j] = True
full = True
for i in range(len(dims)):
if dims[i] is False:
full = False
if full is False:
countw += allweight
return countw
@property
def dim(self):
"""
Dimension of set cover problem.
:return: Dimension instance
"""
dim_size = 20
dim_regs = [[0, 1]] * dim_size
dim_tys = [False] * dim_size
return Dimension(dim_size, dim_regs, dim_tys)
class TestRacos(object):
def test_racos_common_extend(self):
a = [1, 2, 3]
b = [2, 3, 4]
assert RacosCommon.extend(a, b) == [1, 2, 3, 2, 3, 4]
def test_racos_common_is_distinct(self):
a = Solution(x=[1, 2, 3])
b = Solution(x=[2, 3, 4])
c = Solution(x=[3, 4, 5])
seti = [a, b]
assert RacosCommon.is_distinct(seti, a) is False and RacosCommon.is_distinct(seti, c) is True
def test_sracos_distance(self):
a = [2, 4]
b = [5, 8]
assert SRacos.distance(a, b) == 5
def test_sracos_binary_search(self):
s0 = Solution(value=0)
s1 = Solution(value=1)
s2 = Solution(value=2)
s3 = Solution(value=3)
s4 = Solution(value=4)
# 1 3 0 2 4
test_s1 = Solution(value=2.1)
test_s2 = Solution(value=4.5)
test_s3 = Solution(value=-1)
test_s4 = Solution(value=2)
set = [s0, s1, s2, s3, s4]
sracos = SRacos()
assert sracos.binary_search(set, test_s1, 0, 4) == 3
assert sracos.binary_search(set, test_s1, 0, 2) == 3
assert sracos.binary_search(set, test_s2, 0, 4) == 5
assert sracos.binary_search(set, test_s3, 0, 4) == 0
assert sracos.binary_search(set, test_s4, 0, 4) == 3
def test_sracos_strategy_wr(self):
s0 = Solution(value=0)
s1 = Solution(value=1)
s2 = Solution(value=2)
s3 = Solution(value=3)
s4 = Solution(value=4)
iset = [s0, s1, s2, s3, s4]
sracos = SRacos()
test_s1 = Solution(value=2.1)
sracos.strategy_wr(iset, test_s1, 'pos')
assert len(iset) == 5 and iset[0].get_value() == 0 and iset[1].get_value() == 1 and iset[2].get_value() == 2 \
and iset[3].get_value() == 2.1 and iset[4].get_value() == 3
iset2 = [s1, s3, s0, s2, s4]
sracos.strategy_wr(iset2, test_s1, 'neg')
assert len(iset2) == 5 and iset2[4].get_value() == 2.1
def test_sracos_strategy_rr(self):
s0 = Solution(value=0)
s1 = Solution(value=1)
s2 = Solution(value=2)
iset = [s0, s1, s2]
sracos = SRacos()
test_s1 = Solution(value=2.1)
sracos.strategy_rr(iset, test_s1)
assert len(iset) == 3 and (iset[0].get_value() == 2.1 or iset[1].get_value() == 2.1 or iset[2].get_value() == 2.1)
def test_sracos_strategy_lm(self):
s0 = Solution(x=[1, 1, 1], value=0)
s1 = Solution(x=[2.2, 2.2, 2.2], value=1)
s2 = Solution(x=[3, 3, 3], value=2)
iset = [s0, s1, s2]
sracos = SRacos()
test_s1 = Solution(x=[2.1, 2.1, 2.1], value=2.1)
sracos.strategy_lm(iset, s0, test_s1)
assert iset[2].get_value() == 2.1
def test_sracos_replace(self):
s0 = Solution(x=[0, 0, 0], value=0.5)
s1 = Solution(x=[1, 1, 1], value=1)
s2 = Solution(x=[2, 2, 2], value=2)
s3 = Solution(x=[3, 3, 3], value=3)
s4 = Solution(x=[4, 4, 4], value=4)
pos_set = [s0, s1, s2, s3, s4]
neg_set = [s2, s3, s1, s4, s0]
x = Solution(x=[2.1, 2.1, 2.1], value=0.1)
sracos = SRacos()
sracos.replace(pos_set, x, 'pos', 'WR')
assert pos_set[4].get_value() == 3 and pos_set[0].get_value() == 0.1
sracos.replace(neg_set, x, 'neg', 'LM')
assert neg_set[3].get_value() == 0.1
def test_racos_performance(self):
# continuous
dim = 100 # dimension
objective = Objective(ackley, Dimension(dim, [[-1, 1]] * dim, [True] * dim)) # setup objective
parameter = Parameter(budget=100 * dim, sequential=False, seed=1)
solution = ExpOpt.min(objective, parameter)[0]
assert solution.get_value() < 0.2
dim = 500
objective = Objective(ackley, Dimension(dim, [[-1, 1]] * dim, [True] * dim)) # setup objective
parameter = Parameter(budget=10000, sequential=False, seed=1)
sol = Opt.min(objective, parameter)
sol.print_solution()
assert solution.get_value() < 2
# discrete
# setcover
problem = SetCover()
dim = problem.dim # the dim is prepared by the class
objective = Objective(problem.fx, dim) # form up the objective function
budget = 100 * dim.get_size() # number of calls to the objective function
parameter = Parameter(budget=budget, sequential=False, seed=777)
sol = Opt.min(objective, parameter)
sol.print_solution()
assert sol.get_value() < 2
# sphere
dim_size = 100 # dimensions
dim_regs = [[-10, 10]] * dim_size # dimension range
dim_tys = [False] * dim_size # dimension type : integer
dim_order = [True] * dim_size
dim = Dimension(dim_size, dim_regs, dim_tys, order=dim_order) # form up the dimension object
objective = Objective(sphere_discrete_order, dim) # form up the objective function
parameter = Parameter(budget=10000, sequential=False, seed=77)
sol = Opt.min(objective, parameter)
sol.print_solution()
assert sol.get_value() < 200
def test_racos_performance2(self):
# continuous
dim = 100 # dimension
one_dim = (ValueType.CONTINUOUS, [-1, 1], 1e-6)
dim_list = [(one_dim)] * dim
objective = Objective(ackley, Dimension2(dim_list)) # setup objective
parameter = Parameter(budget=100 * dim, sequential=False, seed=1)
solution = ExpOpt.min(objective, parameter)[0]
assert solution.get_value() < 0.2
dim = 500
dim_list = [(one_dim)] * dim
objective = Objective(ackley, Dimension2(dim_list)) # setup objective
parameter = Parameter(budget=10000, sequential=False, seed=1)
sol = Opt.min(objective, parameter)
sol.print_solution()
assert solution.get_value() < 2
# discrete
# setcover
problem = SetCover()
dim_size = 20
one_dim = (ValueType.DISCRETE, [0, 1], False)
dim_list = [(one_dim)] * dim_size
dim = Dimension2(dim_list) # the dim is prepared by the class
objective = Objective(problem.fx, dim) # form up the objective function
budget = 100 * dim.get_size() # number of calls to the objective function
parameter = Parameter(budget=budget, sequential=False, seed=777)
sol = Opt.min(objective, parameter)
sol.print_solution()
assert sol.get_value() < 2
# sphere
dim_size = 100 # dimensions
one_dim = (ValueType.DISCRETE, [-10, 10], True)
dim_list = [(one_dim)] * dim_size
dim = Dimension2(dim_list) # form up the dimension object
objective = Objective(sphere_discrete_order, dim) # form up the objective function
parameter = Parameter(budget=10000, sequential=False, seed=77)
sol = Opt.min(objective, parameter)
sol.print_solution()
assert sol.get_value() < 200
def test_sracos_performance(self):
# continuous
dim = 100 # dimension
objective = Objective(ackley, Dimension(dim, [[-1, 1]] * dim, [True] * dim)) # setup objective
parameter = Parameter(budget=100 * dim, seed=77)
solution = Opt.min(objective, parameter)
assert solution.get_value() < 0.2
dim = 500
objective = Objective(ackley, Dimension(dim, [[-1, 1]] * dim, [True] * dim)) # setup objective
parameter = Parameter(budget=10000, seed=777)
solution = Opt.min(objective, parameter)
assert solution.get_value() < 1.5
# discrete
# setcover
problem = SetCover()
dim = problem.dim # the dim is prepared by the class
objective = Objective(problem.fx, dim) # form up the objective function
budget = 100 * dim.get_size() # number of calls to the objective function
parameter = Parameter(budget=budget, seed=777)
sol = Opt.min(objective, parameter)
assert sol.get_value() < 2
# sphere
dim_size = 100 # dimensions
dim_regs = [[-10, 10]] * dim_size # dimension range
dim_tys = [False] * dim_size # dimension type : integer
dim_order = [True] * dim_size
dim = Dimension(dim_size, dim_regs, dim_tys, order=dim_order) # form up the dimension object
objective = Objective(sphere_discrete_order, dim) # form up the objective function
parameter = Parameter(budget=10000)
sol = Opt.min(objective, parameter)
assert sol.get_value() < 200
def test_sracos_performance2(self):
# continuous
dim = 100 # dimension
one_dim = (ValueType.CONTINUOUS, [-1, 1], 1e-6)
dim_list = [(one_dim)] * dim
objective = Objective(ackley, Dimension2(dim_list))
parameter = Parameter(budget=100 * dim, seed=77)
solution = Opt.min(objective, parameter)
assert solution.get_value() < 0.2
dim = 500
one_dim = (ValueType.CONTINUOUS, [-1, 1], 1e-6)
dim_list = [(one_dim)] * dim
objective = Objective(ackley, Dimension2(dim_list)) # setup objective
parameter = Parameter(budget=10000, seed=777)
solution = Opt.min(objective, parameter)
assert solution.get_value() < 1.5
# discrete
# setcover
problem = SetCover()
dim_size = 20
one_dim = (ValueType.DISCRETE, [0, 1], False)
dim_list = [(one_dim)] * dim_size
dim = Dimension2(dim_list) # the dim is prepared by the class
objective = Objective(problem.fx, dim) # form up the objective function
budget = 100 * dim.get_size() # number of calls to the objective function
parameter = Parameter(budget=budget, seed=777)
sol = Opt.min(objective, parameter)
assert sol.get_value() < 2
# sphere
dim_size = 100 # dimensions
one_dim = (ValueType.DISCRETE, [-10, 10], True)
dim_list = [(one_dim)] * dim_size
dim = Dimension2(dim_list) # form up the dimension object
objective = Objective(sphere_discrete_order, dim) # form up the objective function
parameter = Parameter(budget=10000)
sol = Opt.min(objective, parameter)
assert sol.get_value() < 200
def test_asracos_performance(self):
# continuous
dim = 100 # dimension
objective = Objective(ackley, Dimension(dim, [[-1, 1]] * dim, [True] * dim)) # setup objective
parameter = Parameter(budget=100 * dim, parallel=True, server_num=2, seed=2)
# parameter = Parameter(budget=100 * dim, init_samples=[Solution([0] * 100)]) # init with init_samples
solution_list = ExpOpt.min(objective, parameter, repeat=1)
for solution in solution_list:
value = solution.get_value()
assert value < 0.2
# discrete
# setcover
problem = SetCover()
dim = problem.dim # the dim is prepared by the class
objective = Objective(problem.fx, dim) # form up the objective function
budget = 100 * dim.get_size() # number of calls to the objective function
parameter = Parameter(budget=budget, parallel=True, server_num=2, seed=777)
sol = ExpOpt.min(objective, parameter, repeat=1)[0]
assert sol.get_value() < 2
# sphere
dim_size = 100 # dimensions
dim_regs = [[-10, 10]] * dim_size # dimension range
dim_tys = [False] * dim_size # dimension type : integer
dim_order = [True] * dim_size
dim = Dimension(dim_size, dim_regs, dim_tys, order=dim_order) # form up the dimension object
objective = Objective(sphere_discrete_order, dim) # form up the objective function
parameter = Parameter(budget=10000, parallel=True, server_num=2, uncertain_bits=1, seed=1)
sol = ExpOpt.min(objective, parameter)[0]
assert sol.get_value() < 10
|
Testing/test_2D_frames.py | geosharma/PyNite | 199 | 12791433 | <filename>Testing/test_2D_frames.py
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 <NAME>, SE; tamalone1
"""
import unittest
from PyNite import FEModel3D
import math
import sys
from io import StringIO
class Test_2D_Frame(unittest.TestCase):
''' Tests of analyzing 2D frames. '''
def setUp(self):
# Suppress printed output temporarily
sys.stdout = StringIO()
def tearDown(self):
# Reset the print function to normal
sys.stdout = sys.__stdout__
def test_XY_gravity_load(self):
# A First Course in the Finite Element Method, 4th Edition
# <NAME>
# Problem 5.30
# Units for this model are kips and inches
frame = FEModel3D()
# Define the nodes
frame.add_node('N1', 0, 0, 0)
frame.add_node('N2', 0, 30*12, 0)
frame.add_node('N3', 15*12, 40*12, 0)
frame.add_node('N4', 35*12, 40*12, 0)
frame.add_node('N5', 50*12, 30*12, 0)
frame.add_node('N6', 50*12, 0, 0)
# Define the supports
frame.def_support('N1', True, True, True, True, True, True)
frame.def_support('N6', True, True, True, True, True, True)
# Create members (all members will have the same properties in this example)
J = 250
Iy = 250
Iz = 200
E = 30000
G = 250
A = 12
frame.add_member('M1', 'N1', 'N2', E, G, Iy, Iz, J, A)
frame.add_member('M2', 'N2', 'N3', E, G, Iy, Iz, J, A)
frame.add_member('M3', 'N3', 'N4', E, G, Iy, Iz, J, A)
frame.add_member('M4', 'N4', 'N5', E, G, Iy, Iz, J, A)
frame.add_member('M5', 'N5', 'N6', E, G, Iy, Iz, J, A)
# Add nodal loads
frame.add_node_load('N3', 'FY', -30)
frame.add_node_load('N4', 'FY', -30)
# Analyze the model
frame.analyze()
# subTest context manager prints which portion fails, if any
correct_values = [('N1', {'RxnFX': 11.6877,
'RxnFY': 30,
'RxnMZ': -1810.0745}),
('N6', {'RxnFX': -11.6877,
'RxnFY': 30,
'RxnMZ': 1810.0745})]
for name, values in correct_values:
with self.subTest(node=name):
node = frame.Nodes[name]
# Two decimal place accuracy requires +/-0.5% accuracy
# one decimal place requires +/-5%
self.assertAlmostEqual(node.RxnFX['Combo 1']/values['RxnFX'], 1.0, 2)
self.assertAlmostEqual(node.RxnFY['Combo 1']/values['RxnFY'], 1.0, 2)
self.assertAlmostEqual(node.RxnMZ['Combo 1']/values['RxnMZ'], 1.0, 2)
def test_XY_member_ptload(self):
frame = FEModel3D()
# Add nodes
frame.add_node('N1', 0, 0, 0) # ft
frame.add_node('N2', 0, 7.667, 0) # ft
frame.add_node('N3', 7.75, 7.667, 0) # ft
frame.add_node('N4', 7.75, 0, 0) # ft
# Add supports
frame.def_support('N1', True, True, True, True, True, False)
frame.def_support('N4', True, True, True, True, True, False)
# Define material and section properties for a W8x24
E = 29000*12**2 # ksf
G = 1111200*12**2 # ksf
Iy = 18.3/12**4 # ft^4
Iz = 82.7/12**4 # ft^4
J = 0.346/12**4 # ft^4
A = 5.26/12**2 # in^2
# Define members
frame.add_member('M1', 'N1', 'N2', E, G, Iy, Iz, J, A)
frame.add_member('M2', 'N2', 'N3', E, G, Iy, Iz, J, A)
frame.add_member('M3', 'N4', 'N3', E, G, Iy, Iz, J, A)
# Add loads to the frame
frame.add_member_pt_load('M2', 'Fy', -5, 7.75/2) # 5 kips @ midspan
frame.add_member_dist_load('M2', 'Fy', -0.024, -0.024) # W8x24 self-weight
# Analyze the frame
frame.analyze()
calculated_RZ = frame.Nodes['N1'].RZ['Combo 1']
# Update the expected value to an appropriate precision
expected_RZ = 0.00022794540510395617
self.assertAlmostEqual(calculated_RZ/expected_RZ, 1.0, 2)
def test_YZ_gravity_load(self):
# A First Course in the Finite Element Method, 4th Edition
# Daryl <NAME>
# Problem 5.30
# Units for this model are kips and inches
frame = FEModel3D()
# Define the nodes
frame.add_node('N1', 0, 0, 0)
frame.add_node('N2', 0, 30*12, 0)
frame.add_node('N3', 0, 40*12, 15*12)
frame.add_node('N4', 0, 40*12, 35*12)
frame.add_node('N5', 0, 30*12, 50*12)
frame.add_node('N6', 0, 0, 50*12)
# Define the supports
frame.def_support('N1', True, True, True, True, True, True)
frame.def_support('N6', True, True, True, True, True, True)
# Create members (all members will have the same properties in this example)
J = 250
Iy = 250
Iz = 200
E = 30000
G = 250
A = 12
frame.add_member('M1', 'N1', 'N2', E, G, Iz, Iy, J, A)
frame.add_member('M2', 'N2', 'N3', E, G, Iy, Iz, J, A)
frame.add_member('M3', 'N3', 'N4', E, G, Iy, Iz, J, A)
frame.add_member('M4', 'N4', 'N5', E, G, Iy, Iz, J, A)
frame.add_member('M5', 'N5', 'N6', E, G, Iz, Iy, J, A)
# Add nodal loads
frame.add_node_load('N3', 'FY', -30)
frame.add_node_load('N4', 'FY', -30)
# Analyze the model
frame.analyze()
# subTest context manager prints which portion fails, if any
# Check reactions at N1 and N6
correct_reactions = [('N1', {'RxnFZ': 11.6877,
'RxnFY': 30,
'RxnMX': 1810.0745}),
('N6', {'RxnFZ': -11.6877,
'RxnFY': 30,
'RxnMX': -1810.0745})]
for name, values in correct_reactions:
with self.subTest(node=name):
node = frame.Nodes[name]
# Two decimal place accuracy requires +/-0.5% accuracy
# one decimal place requires +/-5%
self.assertAlmostEqual(node.RxnFZ['Combo 1']/values['RxnFZ'], 1.0, 2)
self.assertAlmostEqual(node.RxnFY['Combo 1']/values['RxnFY'], 1.0, 2)
self.assertAlmostEqual(node.RxnMX['Combo 1']/values['RxnMX'], 1.0, 2)
# Check displacements at N3 and N4
correct_displacements = [('N3', {'DY': -6.666757,
'RX': 0.032}),
('N4', {'DY': -6.666757,
'RX': -0.032})]
for name, values in correct_displacements:
with self.subTest(node=name):
node = frame.Nodes[name]
# Two decimal place accuracy requires +/-0.5% accuracy
# one decimal place requires +/-5%
self.assertAlmostEqual(node.DY['Combo 1']/values['DY'], 1.0, 2)
self.assertAlmostEqual(node.RX['Combo 1']/values['RX'], 1.0, 2)
def test_XZ_ptload(self):
# A simply supported beam with a point load.
# Units used in this example are inches, and kips
SimpleBeam = FEModel3D()
# Add nodes (14 ft = 168 in apart)
SimpleBeam.add_node("N1", 0, 0, 0)
SimpleBeam.add_node("N2", 0, 0, 168)
# Add a beam with the following properties:
A = 20
E = 29000
G = 11400
Iy = 100
Iz = 150
J = 250
SimpleBeam.add_member("M1", "N1", "N2", E, G, Iy, Iz, J, A)
# Provide simple supports
SimpleBeam.def_support("N1", True, True, True, False, False, True)
SimpleBeam.def_support("N2", True, True, True, False, False, False)
# Add a point load of 5 kips at the midspan of the beam
SimpleBeam.add_member_pt_load("M1", "Fy", 5, 7 * 12)
# Analyze the beam
SimpleBeam.analyze(False)
# Print reactions at each end of the beam
correct_reactions = [('N1', -2.5),
('N2', -2.5)]
for node_name, rxn in correct_reactions:
with self.subTest(node=node_name):
calculated_reaction = SimpleBeam.Nodes[node_name].RxnFY['Combo 1']
# Two decimal place accuracy requires +/-0.5% accuracy
# one decimal place requires +/-5%
self.assertAlmostEqual(calculated_reaction/rxn, 1.0, 2)
def test_Kassimali_3_35(self):
"""
Tests against Kassimali example 3.35.
This example was selected because it allows us to check the following features:
1. Member loads aligned in global directions.
2. A member internal hinge.
3. A point load at the end of a member.
The example will be run in the XZ plane to change things up a bit.
"""
frame = FEModel3D()
frame.add_node('A', 0, 0, 0)
frame.add_node('B', 0, 0, 24)
frame.add_node('C', 12, 0, 0)
frame.add_node('D', 12, 0, 24)
frame.add_node('E', 24, 0, 12)
E = 29000*12**2
G = 11200*12**2
Iy = 17.3/12**4
Iz = 204/12**4
J = 0.3/12**4
A = 7.65/12**2
frame.add_member('AC', 'A', 'C', E, G, Iy, Iz, J, A)
frame.add_member('BD', 'B', 'D', E, G, Iy, Iz, J, A)
frame.add_member('CE', 'C', 'E', E, G, Iy, Iz, J, A)
frame.add_member('ED', 'E', 'D', E, G, Iy, Iz, J, A)
frame.def_support('A', support_DX=True, support_DY=True, support_DZ=True)
frame.def_support('B', support_DX=True, support_DY=True, support_DZ=True)
frame.def_support('E', support_DY=True)
frame.def_releases('CE', Rzj=True)
frame.add_member_pt_load('AC', 'FZ', 20, 12)
frame.add_member_dist_load('CE', 'FX', -1.5, -1.5)
frame.add_member_dist_load('ED', 'FX', -1.5, -1.5)
# from PyNite.Visualization import render_model
# render_model(frame, text_height=0.5, case='Case 1')
frame.analyze()
AZ = -8.63
AX = 15.46
BZ = -11.37
BX = 35.45
# The reactions were compared manually to Kassimali's solution and the shears were within
# 10% and 7% respectively. That seems like it's a little big to be a rounding error alone.
# Likely the finite element method is a little more accurate than the simplified method
# Kassimali uses.
self.assertLess(abs(frame.Nodes['A'].RxnFZ['Combo 1']/AZ - 1), 0.1)
self.assertLess(abs(frame.Nodes['A'].RxnFX['Combo 1']/AX - 1), 0.05)
self.assertLess(abs(frame.Nodes['B'].RxnFZ['Combo 1']/BZ - 1), 0.7)
self.assertLess(abs(frame.Nodes['B'].RxnFX['Combo 1']/BX - 1), 0.05) |
test/test_fakeservertest.py | yimuniao/collectd-cloudwatch | 220 | 12791454 | import unittest
import requests
from helpers.fake_http_server import FakeServer
class FakeServerTest(unittest.TestCase):
SERVER = None
@classmethod
def setUpClass(cls):
cls.SERVER = FakeServer()
cls.SERVER.start_server()
cls.SERVER.serve_forever()
def setUp(self):
self.server = FakeServerTest.SERVER
def test_is_server_alive(self):
self.assertTrue(self.server.is_alive())
self.assertTrue(self.server.is_ready_to_process())
def test_server_process_forever(self):
self.assertTrue(self.server.is_ready_to_process())
send_and_check_request(self.server.get_url(), "request1")
self.assertTrue(self.server.is_ready_to_process())
send_and_check_request(self.server.get_url(), "request2")
self.assertTrue(self.server.is_ready_to_process())
def test_server_overlapped_listeners(self):
self.assertTrue(self.server.is_ready_to_process())
self.assertRaises(FakeServer.ServerStateException, self.server.serve_once)
self.assertRaises(FakeServer.ServerStateException, self.server.serve_forever)
def test_server_start_overlapped_instances(self):
self.assertRaises(FakeServer.ServerStateException, self.server.start_server)
def test_timeout_triggers_only_once_per_call(self):
timeout = 0.3
self.server.set_timeout_delay(timeout)
with self.assertRaises(requests.exceptions.ReadTimeout):
requests.get(self.server.get_url(), timeout=timeout)
requests.get(self.server.get_url(), timeout=timeout)
def test_server_stop_multiple_times(self):
self.server.stop_server()
self.assertRaises(FakeServer.ServerStateException, self.server.stop_server)
self.server.start_server()
self.server.serve_forever()
def test_set_custom_response(self):
expected_response = "Expected Response"
expected_response_code = 404
self.server.set_expected_response(expected_response, expected_response_code)
response = requests.get(self.server.get_url() + "request")
self.assertEquals(expected_response, response.text)
self.assertEquals(expected_response_code, response.status_code)
@classmethod
def tearDownClass(cls):
try:
cls.SERVER.stop_server()
except:
pass
def send_and_check_request(url, request):
url = url + request
response = requests.get(url)
received_request = open(FakeServer.REQUEST_FILE).read()
assert request in received_request[1:] # skip first character which always is '/'
assert response.status_code == FakeServer.DEFAULT_RESPONSE_CODE
assert response.text == FakeServer.DEFAULT_RESPONSE |
src/ralph/supports/migrations/0006_auto_20160615_0805.py | DoNnMyTh/ralph | 1,668 | 12791475 | <filename>src/ralph/supports/migrations/0006_auto_20160615_0805.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import ralph.lib.mixins.fields
class Migration(migrations.Migration):
dependencies = [
('supports', '0005_auto_20160105_1222'),
]
operations = [
migrations.AlterModelOptions(
name='baseobjectssupport',
options={},
),
migrations.AlterModelTable(
name='baseobjectssupport',
table=None,
),
migrations.SeparateDatabaseAndState(
state_operations=[
migrations.AddField(
model_name='baseobjectssupport',
name='baseobject',
field=ralph.lib.mixins.fields.BaseObjectForeignKey(default=0, verbose_name='Asset', to='assets.BaseObject', related_name='supports'),
preserve_default=False,
),
migrations.AddField(
model_name='baseobjectssupport',
name='support',
field=models.ForeignKey(default=0, to='supports.Support'),
preserve_default=False,
),
],
database_operations=[]
),
]
|
tools/gen_doc_files.py | joshddunn/crsfml | 248 | 12791476 | <filename>tools/gen_doc_files.py
import os
import textwrap
import mkdocs_gen_files
root = mkdocs_gen_files.config["plugins"]["mkdocstrings"].get_handler("crystal").collector.root
nav = mkdocs_gen_files.open(f"api/index.md", "w")
for module in ["System", "Window", "Graphics", "Audio", "Network", ""]:
if module:
print(f"* [{module} module]({module.lower()}.md)", file=nav)
with mkdocs_gen_files.open(f"api/{module.lower()}.md", "w") as f:
f.write(textwrap.dedent(f"""
# ::: SF
selection:
file_filters:
- '/{module.lower()}/'
"""))
for typ in root.lookup("SF").walk_types():
[cur_module] = {os.path.dirname(os.path.relpath(loc.filename, "src")) for loc in typ.locations}
if module.lower() == cur_module:
name = typ.name
full_name = typ.abs_id
path = full_name.replace("::", "/")
indent = bool(module) + full_name.count("::") - 1
print(" " * indent + f"* [{name}]({path}.md)", file=nav)
filename = f"api/{path}.md"
with mkdocs_gen_files.open(filename, "w") as f:
f.write(textwrap.dedent(f"""\
# ::: {full_name}
"""))
if typ.locations:
mkdocs_gen_files.set_edit_path(filename, typ.locations[0].url)
|
startup_scripts/240_virtualization_interfaces.py | systempal/netbox-docker | 691 | 12791478 | import sys
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from virtualization.models import VirtualMachine, VMInterface
interfaces = load_yaml("/opt/netbox/initializers/virtualization_interfaces.yml")
if interfaces is None:
sys.exit()
required_assocs = {"virtual_machine": (VirtualMachine, "name")}
for params in interfaces:
custom_field_data = pop_custom_fields(params)
for assoc, details in required_assocs.items():
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
interface, created = VMInterface.objects.get_or_create(**params)
if created:
set_custom_fields_values(interface, custom_field_data)
print("🧷 Created interface", interface.name, interface.virtual_machine.name)
|
tests/util_test.py | nickgaya/bravado-core | 122 | 12791481 | <reponame>nickgaya/bravado-core
# -*- coding: utf-8 -*-
from inspect import getcallargs
import mock
import pytest
from bravado_core.util import AliasKeyDict
from bravado_core.util import cached_property
from bravado_core.util import determine_object_type
from bravado_core.util import lazy_class_attribute
from bravado_core.util import memoize_by_id
from bravado_core.util import ObjectType
from bravado_core.util import RecursiveCallException
from bravado_core.util import sanitize_name
from bravado_core.util import strip_xscope
def test_cached_property():
class Class(object):
def __init__(self):
self.calls = 0
@cached_property
def property_1(self):
self.calls += 1
return self.calls
assert isinstance(Class.property_1, cached_property)
class_instance = Class()
assert class_instance.calls == 0
assert class_instance.property_1 == 1
assert class_instance.calls == 1
# If property is called twice no calls are received from the method
assert class_instance.property_1 == 1
assert class_instance.calls == 1
# If property is deleted then the method is called again
del class_instance.property_1
assert class_instance.property_1 == 2
assert class_instance.calls == 2
def test_class_cached_property():
class Class(object):
calls = 0
@lazy_class_attribute
def prop(cls):
cls.calls += 1
return cls.calls
class_instance_1 = Class()
assert class_instance_1.calls == 0
assert class_instance_1.prop == 1
assert class_instance_1.calls == 1
class_instance_2 = Class()
assert class_instance_2.calls == 1
assert class_instance_2.prop == 1
assert class_instance_2.calls == 1
def test_memoize_by_id_decorator_recursive_call():
calls = []
@memoize_by_id
def function(a):
calls.append(a)
return function(a)
with pytest.raises(RecursiveCallException):
function(mock.sentinel.A)
assert calls == [mock.sentinel.A]
def test_memoize_by_id_decorator():
calls = []
def function(a, b=None):
calls.append([a, b])
return id(a) + id(b)
decorated_function = memoize_by_id(function)
assert decorated_function(1) == id(1) + id(None)
assert decorated_function.cache == {
(('a', id(1)), ('b', id(None))): id(1) + id(None),
}
assert calls == [[1, None]]
assert decorated_function(2, 3) == id(2) + id(3)
assert decorated_function.cache == {
(('a', id(1)), ('b', id(None))): id(1) + id(None),
(('a', id(2)), ('b', id(3))): id(2) + id(3),
}
assert calls == [[1, None], [2, 3]]
# Calling the decorated method with known arguments will not call the inner method
assert decorated_function(1) == id(1) + id(None)
assert decorated_function.cache == {
(('a', id(1)), ('b', id(None))): id(1) + id(None),
(('a', id(2)), ('b', id(3))): id(2) + id(3),
}
assert calls == [[1, None], [2, 3]]
decorated_function.cache.clear()
assert decorated_function(1) == id(1) + id(None)
assert decorated_function.cache == {
(('a', id(1)), ('b', id(None))): id(1) + id(None),
}
assert calls == [[1, None], [2, 3], [1, None]]
@mock.patch('bravado_core.util.inspect.getcallargs', wraps=getcallargs)
def test_memoize_by_id_do_not_use_inspect_if_only_kwargs_are_provided(mock_getcallargs):
calls = []
def function(a, b=None):
calls.append([a, b])
return id(a) + id(b)
decorated_function = memoize_by_id(function)
assert decorated_function(1) == id(1) + id(None)
mock_getcallargs.assert_called_once_with(function, 1)
assert calls == [[1, None]]
assert decorated_function.cache == {
(('a', id(1)), ('b', id(None))): id(1) + id(None),
}
mock_getcallargs.reset_mock()
assert decorated_function(a=1) == id(1) + id(None)
assert not mock_getcallargs.called
assert decorated_function.cache == {
(('a', id(1)), ('b', id(None))): id(1) + id(None),
}
@pytest.mark.parametrize(
('input', 'expected'), [
('pet.getBy Id', 'pet_getBy_Id'), # simple case
('_getPetById_', 'getPetById'), # leading/trailing underscore
('get__Pet_By__Id', 'get_Pet_By_Id'), # double underscores
('^&#@!$foo%+++:;"<>?/', 'foo'), # bunch of illegal chars
('__foo__', 'foo'), # make sure we strip multiple underscores
('100percent', 'percent'), # make sure we remove all digits
('100.0', '_100_0'), # a name consisting mostly of digits should keep them
],
)
def test_sanitize_name(input, expected):
assert sanitize_name(input) == expected
def test_AliasKeyDict():
alias_dict = AliasKeyDict({'a': 'b', 'c': 'd'})
alias_dict.add_alias('alias_a', 'a')
assert len(alias_dict) == 2
assert set(alias_dict.items()) == set([('a', 'b'), ('c', 'd')])
assert 'alias_a' in alias_dict
assert alias_dict['alias_a'] is alias_dict['a']
assert alias_dict.get('alias_a') is alias_dict.get('a')
assert alias_dict.get('f', 'not there') == 'not there'
assert alias_dict.pop('alias_a') == 'b'
assert len(alias_dict) == 1
assert 'a' not in alias_dict
assert 'alias_a' not in alias_dict
def test_AliasKeyDict_copy():
alias_dict = AliasKeyDict([('foo', 'bar')])
alias_dict.add_alias('baz', 'foo')
dict_copy = alias_dict.copy()
assert set(dict_copy.items()) == set(alias_dict.items())
assert dict_copy.alias_to_key == alias_dict.alias_to_key
def test_AliasKeyDict_del():
alias_dict = AliasKeyDict([('foo', 'bar')])
alias_dict.add_alias('baz', 'foo')
del alias_dict['baz']
assert len(alias_dict) == 0
assert 'baz' not in alias_dict
assert 'foo' not in alias_dict
@pytest.mark.parametrize(
'default_type_to_object, object_dict, expected_object_type',
(
[True, 'anything that is not a dictionary', ObjectType.UNKNOWN],
[True, {'in': 'body', 'name': 'body', 'required': True, 'schema': {'type': 'object'}}, ObjectType.PARAMETER],
[True, {'get': {'responses': {'200': {'description': 'response description'}}}}, ObjectType.PATH_ITEM],
[True, {'description': 'response description', 'schema': {'type': 'object'}}, ObjectType.RESPONSE],
[True, {'description': 'response description', 'parameters': {'param': {'type': 'object'}}}, ObjectType.SCHEMA],
[False, {'description': 'response description', 'parameters': {'param': {'type': 'object'}}}, ObjectType.UNKNOWN], # noqa
),
)
def test_determine_object_type(default_type_to_object, object_dict, expected_object_type):
assert determine_object_type(object_dict, default_type_to_object) == expected_object_type
def test_empty():
assert {} == strip_xscope({})
def test_contained_in_dict():
fragment = {
'MON': {
'$ref': '#/definitions/DayHours',
'x-scope': [
'file:///happyhour/api_docs/swagger.json',
'file:///happyhour/api_docs/swagger.json#/definitions/WeekHours',
],
},
}
expected = {
'MON': {
'$ref': '#/definitions/DayHours',
},
}
assert expected == strip_xscope(fragment)
assert 'x-scope' in fragment['MON']
def test_contained_in_list():
fragment = [
{
'$ref': '#/definitions/DayHours',
'x-scope': [
'file:///happyhour/api_docs/swagger.json',
'file:///happyhour/api_docs/swagger.json#/definitions/WeekHours',
],
},
]
expected = [
{
'$ref': '#/definitions/DayHours',
},
]
assert expected == strip_xscope(fragment)
assert 'x-scope' in fragment[0]
def test_no_op():
fragment = {
'MON': {
'$ref': '#/definitions/DayHours',
},
}
expected = {
'MON': {
'$ref': '#/definitions/DayHours',
},
}
assert expected == strip_xscope(fragment)
def test_petstore_spec(petstore_spec):
assert petstore_spec.client_spec_dict == strip_xscope(petstore_spec.spec_dict)
|
pytest_use_postgresql.py | admariner/django-sql-dashboard | 293 | 12791491 | import os
import pytest
from dj_database_url import parse
from django.conf import settings
from testing.postgresql import Postgresql
postgres = os.environ.get("POSTGRESQL_PATH")
initdb = os.environ.get("INITDB_PATH")
_POSTGRESQL = Postgresql(postgres=postgres, initdb=initdb)
@pytest.hookimpl(tryfirst=True)
def pytest_load_initial_conftests(early_config, parser, args):
os.environ["DJANGO_SETTINGS_MODULE"] = early_config.getini("DJANGO_SETTINGS_MODULE")
settings.DATABASES["default"] = parse(_POSTGRESQL.url())
settings.DATABASES["dashboard"] = parse(_POSTGRESQL.url())
def pytest_unconfigure(config):
_POSTGRESQL.stop()
|
tableauserverclient/models/target.py | zuarbase/server-client-python | 470 | 12791513 | <filename>tableauserverclient/models/target.py
"""Target class meant to abstract mappings to other objects"""
class Target:
def __init__(self, id_, target_type):
self.id = id_
self.type = target_type
def __repr__(self):
return "<Target#{id}, {type}>".format(**self.__dict__)
|
tools/builder.py | dp92987/nginx-amplify-agent | 308 | 12791532 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
from builders import deb, rpm, amazon
from builders.util import shell_call
__author__ = "<NAME>"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
if __name__ == '__main__':
package = 'nginx-amplify-agent' if len(sys.argv) == 1 else sys.argv[1]
if os.path.isfile('/etc/debian_version'):
deb.build(package=package)
elif os.path.isfile('/etc/redhat-release'):
rpm.build(package=package)
else:
os_release = shell_call('cat /etc/os-release', important=False)
if 'amazon linux' in os_release.lower():
amazon.build(package=package)
else:
print("sorry, it will be done later\n")
|
ros/dataset_to_rosbag.py | sn0wflake/gta | 4,498 | 12791549 | #!/usr/bin/env python
from itertools import izip
import numpy as np
import h5py
from progress.bar import Bar
import sys
import rospy
import rosbag
from sensor_msgs.msg import Imu, Image
def main():
if len(sys.argv) < 2:
print("Usage: {} dataset_name".format(sys.argv[0]))
exit(1)
file_name = sys.argv[1]
log_file = h5py.File('../dataset/log/{}.h5'.format(file_name))
camera_file = h5py.File('../dataset/camera/{}.h5'.format(file_name))
zipped_log = izip(
log_file['times'],
log_file['fiber_accel'],
log_file['fiber_gyro'])
with rosbag.Bag('{}.bag'.format(file_name), 'w') as bag:
bar = Bar('Camera', max=len(camera_file['X']))
for i, img_data in enumerate(camera_file['X']):
m_img = Image()
m_img.header.stamp = rospy.Time.from_sec(0.01 * i)
m_img.height = img_data.shape[1]
m_img.width = img_data.shape[2]
m_img.step = 3 * img_data.shape[2]
m_img.encoding = 'rgb8'
m_img.data = np.transpose(img_data, (1, 2, 0)).flatten().tolist()
bag.write('/camera/image_raw', m_img, m_img.header.stamp)
bar.next()
bar.finish()
bar = Bar('IMU', max=len(log_file['times']))
for time, v_accel, v_gyro in zipped_log:
m_imu = Imu()
m_imu.header.stamp = rospy.Time.from_sec(time)
[setattr(m_imu.linear_acceleration, c, v_accel[i]) for i, c in enumerate('xyz')]
[setattr(m_imu.angular_velocity, c, v_gyro[i]) for i, c in enumerate('xyz')]
bag.write('/fiber_imu', m_imu, m_imu.header.stamp)
bar.next()
bar.finish()
if __name__ == "__main__":
main()
|
tests/test_py33_exceptions.py | haypo/trollius | 175 | 12791553 | # -*- coding: utf-8 -*-
"""
Tests for py33_exceptions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from trollius import py33_exceptions
class TestWrapErrors(unittest.TestCase):
def test_ebadf_wrapped_to_OSError(self):
# https://github.com/jamadden/trollius/issues/17
import socket
import os
import errno
s = socket.socket()
os.close(s.fileno())
with self.assertRaises(socket.error) as exc:
s.send(b'abc')
self.assertEqual(exc.exception.errno, errno.EBADF)
with self.assertRaises(OSError) as exc:
py33_exceptions.wrap_error(s.send, b'abc')
self.assertEqual(exc.exception.errno, errno.EBADF)
if __name__ == '__main__':
unittest.main()
|
tests/RunTests/PythonTests/test2011_011.py | maurizioabba/rose | 488 | 12791565 | <gh_stars>100-1000
# tests for dictionary displays
a = {}
b = {1: 2}
c = {3: 4, 5: 6}
d = {7: "seven", 8: "eight", 9: "nine", 10: "one" + "zero"}
print a
print b
print c
print d
|
speech_mixer.py | ZhihaoDU/speech_feature_extractor | 111 | 12791645 | <reponame>ZhihaoDU/speech_feature_extractor
# coding = utf-8
import numpy as np
from read_sphere_wav import read_sphere_wav
from scipy.io import wavfile
from feature_extractor import *
from matplotlib import pyplot as plt
def SNR(x1, x2):
from numpy.linalg import norm
return 20 * np.log10(norm(x1) / norm(x2))
def mix_by_db(x1, x2, snr, handle_method):
x1 = x1.astype(np.int32)
x2 = x2.astype(np.int32)
l1 = x1.shape[0]
l2 = x2.shape[0]
if l1 != l2:
if handle_method == 'cut':
ll = min(l1, l2)
x1 = x1[:ll]
x2 = x2[:ll]
elif handle_method == 'append':
ll = max(l1, l2)
if l1 < ll:
x1 = np.append(x1, x1[:ll-l1])
if l2 < ll:
x2 = np.append(x2, x2[:ll-l1])
from numpy.linalg import norm
x2 = x2 / norm(x2) * norm(x1) / (10.0 ** (0.05 * snr))
mix = x1 + x2
return mix
if __name__ == '__main__':
speech_data, wav_header = read_sphere_wav(u"/media/neo/000C6F0F00042510/Doctor/dataset/TIMIT/train/dr1/fcjf0/sa1.wav")
fs, noise_data = wavfile.read('/media/neo/000C6F0F00042510/Doctor/dataset/DEMAND/PCAFETER/ch01.wav')
plt.figure()
spect = log_power_spectrum_extractor(speech_data, 320, 160, 'hanning', True)
plt.subplot(311)
plt.imshow(spect)
noisy_speech = mix_by_db(speech_data, noise_data, 5, 'cut')
spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True)
plt.subplot(312)
plt.imshow(spect)
noisy_speech = mix_by_db(speech_data, noise_data, 0, 'cut')
spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True)
plt.subplot(313)
plt.imshow(spect)
plt.figure()
noisy_speech = mix_by_db(speech_data, noise_data, -5, 'cut')
spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True)
plt.subplot(211)
plt.imshow(spect)
noisy_speech = mix_by_db(speech_data, noise_data, -10, 'cut')
spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True)
plt.subplot(212)
plt.imshow(spect)
plt.show()
#sd.play(noisy_speech.astype(np.int32), fs, blocking=True)
|
miniboss/__init__.py | afroisalreadyinu/miniboss | 633 | 12791659 | from .main import cli
from .services import Service
from .context import Context
from .types import set_group_name as group_name
|
homeassistant/components/hardkernel/const.py | liangleslie/core | 30,023 | 12791666 | <filename>homeassistant/components/hardkernel/const.py
"""Constants for the Hardkernel integration."""
DOMAIN = "hardkernel"
|
tests/UnitTests/Morphology/Disambiguator/disambiguator_prefix_rule1_test.py | ZenaNugraha/PySastrawi | 282 | 12791668 | import unittest
from Sastrawi.Morphology.Disambiguator.DisambiguatorPrefixRule1 import DisambiguatorPrefixRule1a, DisambiguatorPrefixRule1b
class Test_DisambiguatorPrefixRule1Test(unittest.TestCase):
def setUp(self):
self.subject1a = DisambiguatorPrefixRule1a()
self.subject1b = DisambiguatorPrefixRule1b()
return super(Test_DisambiguatorPrefixRule1Test, self).setUp()
def test_disambiguate1a(self):
self.assertEquals('ia-ia', self.subject1a.disambiguate('beria-ia'))
self.assertIsNone(self.subject1a.disambiguate('berlari'))
def test_disambiguate1b(self):
self.assertEquals('rakit', self.subject1b.disambiguate('berakit'))
self.assertIsNone(self.subject1b.disambiguate('bertabur'))
if __name__ == '__main__':
unittest.main()
|
app/admin/__init__.py | sunshineinwater/flask-Purchase_and_sale | 122 | 12791690 | <filename>app/admin/__init__.py
#-*- coding:utf-8 -*-
# author:Agam
# datetime:2018-11-05
from flask import Blueprint
admin=Blueprint('admin',__name__)
import app.admin.views
|
janitor/functions/groupby_agg.py | thatlittleboy/pyjanitor | 225 | 12791693 | from typing import Callable, List, Union
import pandas_flavor as pf
import pandas as pd
from janitor.utils import deprecated_alias
@pf.register_dataframe_method
@deprecated_alias(new_column="new_column_name", agg_column="agg_column_name")
def groupby_agg(
df: pd.DataFrame,
by: Union[List, Callable, str],
new_column_name: str,
agg_column_name: str,
agg: Union[Callable, str],
dropna: bool = True,
) -> pd.DataFrame:
"""Shortcut for assigning a groupby-transform to a new column.
This method does not mutate the original DataFrame.
Intended to be the method-chaining equivalent of:
```python
df = df.assign(...=df.groupby(...)[...].transform(...))
```
Example: Basic usage.
>>> import pandas as pd
>>> import janitor
>>> df = pd.DataFrame({
... "item": ["shoe", "shoe", "bag", "shoe", "bag"],
... "quantity": [100, 120, 75, 200, 25],
... })
>>> df.groupby_agg(
... by="item",
... agg="mean",
... agg_column_name="quantity",
... new_column_name="avg_quantity",
... )
item quantity avg_quantity
0 shoe 100 140.0
1 shoe 120 140.0
2 bag 75 50.0
3 shoe 200 140.0
4 bag 25 50.0
Example: Set `dropna=False` to compute the aggregation, treating the null
values in the `by` column as an isolated "group".
>>> import pandas as pd
>>> import janitor
>>> df = pd.DataFrame({
... "x": ["a", "a", None, "b"], "y": [9, 9, 9, 9],
... })
>>> df.groupby_agg(
... by="x",
... agg="count",
... agg_column_name="y",
... new_column_name="y_count",
... dropna=False,
... )
x y y_count
0 a 9 2
1 a 9 2
2 None 9 1
3 b 9 1
:param df: A pandas DataFrame.
:param by: Column(s) to groupby on, will be passed into `DataFrame.groupby`.
:param new_column_name: Name of the aggregation output column.
:param agg_column_name: Name of the column to aggregate over.
:param agg: How to aggregate.
:param dropna: Whether or not to include null values, if present in the
`by` column(s). Default is True (null values in `by` are assigned NaN in
the new column).
:returns: A pandas DataFrame.
""" # noqa: E501
return df.assign(
**{
new_column_name: df.groupby(by, dropna=dropna)[
agg_column_name
].transform(agg),
}
)
|
PyFlow/Packages/PyFlowBase/Nodes/forLoopBegin.py | luzpaz/PyFlow | 1,463 | 12791725 | ## Copyright 2015-2019 <NAME>, <NAME>
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from PyFlow.Core import NodeBase
from PyFlow.Core.PathsRegistry import PathsRegistry
from PyFlow.Core.NodeBase import NodePinsSuggestionsHelper
from PyFlow.Core.Common import *
from PyFlow.Packages.PyFlowBase.Nodes import FLOW_CONTROL_ORANGE
import threading
class forLoopBegin(NodeBase):
def __init__(self, name):
super(forLoopBegin, self).__init__(name)
self._working = False
self.currentIndex = 0
self.prevIndex = -1
self.inExec = self.createInputPin('inExec', 'ExecPin', None, self.compute)
self.firstIndex = self.createInputPin('Start', 'IntPin')
self.lastIndex = self.createInputPin('Stop', 'IntPin')
self.loopEndNode = self.createInputPin('Paired block', 'StringPin')
self.loopEndNode.setInputWidgetVariant("ObjectPathWIdget")
self.loopBody = self.createOutputPin('LoopBody', 'ExecPin')
self.index = self.createOutputPin('Index', 'IntPin')
self.headerColor = FLOW_CONTROL_ORANGE
self.setExperimental()
@staticmethod
def pinTypeHints():
helper = NodePinsSuggestionsHelper()
helper.addInputDataType('ExecPin')
helper.addInputDataType('IntPin')
helper.addOutputDataType('ExecPin')
helper.addOutputDataType('IntPin')
helper.addInputStruct(StructureType.Single)
helper.addOutputStruct(StructureType.Single)
return helper
@staticmethod
def category():
return 'FlowControl'
@staticmethod
def keywords():
return ['iter']
@staticmethod
def description():
return 'For loop begin block'
def reset(self):
self.currentIndex = 0
self.prevIndex = -1
#self._working = False
def isDone(self):
indexTo = self.lastIndex.getData()
if self.currentIndex >= indexTo:
self.reset()
#loopEndNode = PathsRegistry().getEntity(self.loopEndNode.getData())
#loopEndNode.completed.call()
self._working = False
return True
return False
def onNext(self, *args, **kwargs):
while not self.isDone():
if self.currentIndex > self.prevIndex:
self.index.setData(self.currentIndex)
self.prevIndex = self.currentIndex
self.loopBody.call()
def compute(self, *args, **kwargs):
self.reset()
endNodePath = self.loopEndNode.getData()
loopEndNode = PathsRegistry().getEntity(endNodePath)
if loopEndNode is not None:
if loopEndNode.loopBeginNode.getData() != self.path():
self.setError("Invalid pair")
return
if self.graph() is not loopEndNode.graph():
err = "block ends in different graphs"
self.setError(err)
loopEndNode.setError(err)
return
else:
self.setError("{} not found".format(endNodePath))
if not self._working:
self.thread = threading.Thread(target=self.onNext,args=(self, args, kwargs))
self.thread.start()
self._working = True
#self.onNext(*args, **kwargs)
|
src/vimpdb/proxy.py | dtrckd/vimpdb | 110 | 12791748 | import os
import socket
import subprocess
from vimpdb import config
from vimpdb import errors
def get_eggs_paths():
import vim_bridge
vimpdb_path = config.get_package_path(errors.ReturnCodeError())
vim_bridge_path = config.get_package_path(vim_bridge.bridged)
return (
os.path.dirname(vimpdb_path),
os.path.dirname(vim_bridge_path),
)
class Communicator(object):
def __init__(self, script, server_name):
self.script = script
self.server_name = server_name
def prepare_subprocess(self, *args):
parts = self.script.split()
parts.extend(args)
return parts
def _remote_expr(self, expr):
parts = self.prepare_subprocess('--servername',
self.server_name, "--remote-expr", expr)
p = subprocess.Popen(parts, stdout=subprocess.PIPE)
return_code = p.wait()
if return_code:
raise errors.RemoteUnavailable()
child_stdout = p.stdout
output = child_stdout.read()
return output.strip()
def _send(self, command):
# add ':<BS>' to hide last keys sent in VIM command-line
command = ''.join((command, ':<BS>'))
parts = self.prepare_subprocess('--servername',
self.server_name, "--remote-send", command)
return_code = subprocess.call(parts)
if return_code:
raise errors.RemoteUnavailable()
class ProxyToVim(object):
"""
use subprocess to launch Vim instance that use clientserver mode
to communicate with Vim instance used for debugging.
"""
def __init__(self, communicator):
self.communicator = communicator
def _send(self, command):
self.communicator._send(command)
config.logger.debug("sent: %s" % command)
def _remote_expr(self, expr):
return self.communicator._remote_expr(expr)
def setupRemote(self):
if not self.isRemoteSetup():
# source vimpdb.vim
proxy_package_path = config.get_package_path(self)
filename = os.path.join(proxy_package_path, "vimpdb.vim")
command = "<C-\><C-N>:source %s<CR>" % filename
self._send(command)
for egg_path in get_eggs_paths():
self._send(':call PDB_setup_egg(%s)<CR>' % repr(egg_path))
self._send(':call PDB_init_controller()')
def isRemoteSetup(self):
status = self._expr("exists('*PDB_setup_egg')")
return status == '1'
def showFeedback(self, feedback):
if not feedback:
return
feedback_list = feedback.splitlines()
self.setupRemote()
self._send(':call PDB_show_feedback(%s)<CR>' % repr(feedback_list))
def displayLocals(self, feedback):
if not feedback:
return
feedback_list = feedback.splitlines()
self.setupRemote()
self._send(':call PDB_reset_watch()<CR>')
for line in feedback_list:
self._send(':call PDB_append_watch([%s])<CR>' % repr(line))
def showFileAtLine(self, filename, lineno):
if os.path.exists(filename):
self._showFileAtLine(filename, lineno)
def _showFileAtLine(self, filename, lineno):
# Windows compatibility:
# Windows command-line does not play well with backslash in filename.
# So turn backslash to slash; Vim knows how to translate them back.
filename = filename.replace('\\', '/')
self.setupRemote()
self._send(':call PDB_show_file_at_line("%s", "%d")<CR>'
% (filename, lineno))
def _expr(self, expr):
config.logger.debug("expr: %s" % expr)
result = self._remote_expr(expr)
config.logger.debug("result: %s" % result)
return result
# code leftover from hacking
# def getText(self, prompt):
# self.setupRemote()
# command = self._expr('PDB_get_command("%s")' % prompt)
# return command
class ProxyFromVim(object):
BUFLEN = 512
socket_factory = socket.socket
def __init__(self, port):
self.socket_inactive = True
self.port = port
def bindSocket(self):
if self.socket_inactive:
self.socket = self.socket_factory(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(('', self.port))
self.socket_inactive = False
def closeSocket(self):
if not self.socket_inactive:
self.socket.close()
self.socket_inactive = True
def waitFor(self, pdb):
self.bindSocket()
(message, address) = self.socket.recvfrom(self.BUFLEN)
config.logger.debug("command: %s" % message)
return message
# code leftover from hacking
# def eat_stdin(self):
# sys.stdout.write('-- Type Ctrl-D to continue --\n')
# sys.stdout.flush()
# sys.stdin.readlines()
|
recognition/predict.py | w-garcia/insightface | 108 | 12791780 | <reponame>w-garcia/insightface
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import sys
import tensorflow as tf
import yaml
from recognition.backbones.resnet_v1 import ResNet_v1_50
from recognition.models.models import MyModel
tf.enable_eager_execution()
def get_embeddings(model, images):
prelogits, _, _ = model(images, training=False)
embeddings = tf.nn.l2_normalize(prelogits, axis=-1)
return embeddings
def parse_args(argv):
parser = argparse.ArgumentParser(description='Train face network')
parser.add_argument('--config_path', type=str, help='path to config path', default='configs/config.yaml')
args = parser.parse_args(argv)
return args
def main():
args = parse_args(sys.argv[1:])
# logger.info(args)
from recognition.data.generate_data import GenerateData
with open(args.config_path) as cfg:
config = yaml.load(cfg, Loader=yaml.FullLoader)
gd = GenerateData(config)
train_data, _ = gd.get_train_data()
model = MyModel(ResNet_v1_50, embedding_size=config['embedding_size'])
ckpt_dir = os.path.expanduser(config['ckpt_dir'])
ckpt = tf.train.Checkpoint(backbone=model.backbone)
ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial()
print("Restored from {}".format(tf.train.latest_checkpoint(ckpt_dir)))
# for layer in tf.train.list_variables(tf.train.latest_checkpoint(ckpt_dir)):
# print(layer)
for img, _ in train_data.take(1):
embs = get_embeddings(model, img)
for i in range(embs.shape[0]):
for j in range(embs.shape[0]):
val = 0
for k in range(512):
val += embs[i][k] * embs[j][k]
print(i, j, val)
if __name__ == '__main__':
# logger.info("hello, insightface/recognition")
main()
|
Lib/test/test_compiler/testcorpus/04_assign.py | diogommartins/cinder | 1,886 | 12791781 | a = 1
b = "foo"
c = (d, e)
di = {f: 1, g: 2}
|
setup.py | Mumbleskates/jsane | 131 | 12791792 | <filename>setup.py
#!/usr/bin/env python
import sys
from jsane import __version__
assert sys.version >= '2.7', ("Requires Python v2.7 or above, get with the "
"times, grandpa.")
from setuptools import setup
classifiers = [
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development :: Libraries :: Python Modules",
]
install_requires = []
setup_requires = ['pytest-runner']
tests_require = ['pep8', 'pytest'] + install_requires
setup(
name="jsane",
version=__version__,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/skorokithakis/jsane/",
description="A saner way to parse JSON.",
long_description=open("README.rst").read(),
license="MIT",
classifiers=classifiers,
packages=["jsane"],
setup_requires=setup_requires,
tests_require=tests_require,
install_requires=install_requires,
test_suite='jsane.tests',
)
|
packages/testcases/input/nameprep/extract-tests.py | taarushv/ethers.js | 4,494 | 12791847 | import json
import re
output = ""
for line in file("test-vectors-00.txt"):
line = line.strip()
if line == "" or line[0:1] == "#":
continue
if line.startswith("Josefsson") or line.startswith("Internet-Draft"):
continue
output += line.replace("\n", "")
Tests = [ ]
def get_byte(v):
if len(v) == 1:
return ord(v)
return int(v[2:4], 16)
def get_string(value):
value = value.strip()
if value[0] == '"' and value[-1] == '"':
return map(get_byte, re.findall("(\\\\x[0-9a-fA-F]{2}|.)", value[1:-1].replace('""', '')))
if value.lower() == "null":
return None
raise Exception("unhandled")
Tests = [ ]
matches = re.findall("({(?:.|\n)*?})", output)
for m in matches:
comps = m[1:-1].split(",")
test = dict(
comment = comps[0].strip()[1:-1],
input = get_string(comps[1]),
output = get_string(comps[2])
)
if len(comps) >= 4:
test["profile"] = get_string(comps[3])
if len(comps) >= 5:
test["flags"] = comps[4].strip()
if len(comps) >= 6:
test["rc"] = comps[5].strip()
Tests.append(test)
print json.dumps(Tests)
|
lightreid/models/architectures/build.py | nataliamiccini/light-reid | 296 | 12791903 | <gh_stars>100-1000
from lightreid.utils import Registry
ARCHs_REGISTRY = Registry('arch')
|
gh_build.py | sonvt1710/manga-py | 337 | 12791914 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from helpers.gh_pages import main
main()
|
tests/unit/test_lists.py | scherroman/mugen | 119 | 12791918 | import pytest
from mugen import lists
from mugen.lists import MugenList
class Dummy(object):
foo = 1
@pytest.fixture
def mugen_list() -> MugenList:
return MugenList([Dummy(), Dummy(), Dummy(), Dummy(), Dummy(), Dummy()])
@pytest.mark.parametrize("l, expected_foo", [
(mugen_list(), [1, 1, 1, 1, 1, 1])
])
def test_lget(l, expected_foo):
assert l.lget('foo') == expected_foo
@pytest.mark.parametrize("l, expected_l", [
([1, [2, 3], [[4, 5], [6, 7]]], [1, 2, 3, 4, 5, 6, 7])
])
def test_flatten(l, expected_l):
assert lists.flatten(l) == expected_l
def test_mugen_list__operations_yield_mugen_list():
assert type(MugenList() + MugenList()) == MugenList
assert type(MugenList()[1:2]) == MugenList
|
crabageprediction/venv/Lib/site-packages/fontTools/ttLib/tables/_c_i_d_g.py | 13rianlucero/CrabAgePrediction | 2,705 | 12791927 | # coding: utf-8
from .otBase import BaseTTXConverter
class table__c_i_d_g(BaseTTXConverter):
"""The AAT ``cidg`` table has almost the same structure as ``gidc``,
just mapping CIDs to GlyphIDs instead of the reverse direction.
It is useful for fonts that may be used by a PDF renderer in lieu of
a font reference with a known glyph collection but no subsetted
glyphs. For instance, a PDF can say “please use a font conforming
to Adobe-Japan-1”; the ``cidg`` mapping is necessary if the font is,
say, a TrueType font. ``gidc`` is lossy for this purpose and is
obsoleted by ``cidg``.
For example, the first font in ``/System/Library/Fonts/PingFang.ttc``
(which Apple ships pre-installed on MacOS 10.12.6) has a ``cidg`` table.
"""
pass
|
services/workers/settings/base.py | paulowe/aws-boilerplate | 711 | 12791949 | import json
import boto3
from environs import Env
env = Env()
AWS_ENDPOINT_URL = env('AWS_ENDPOINT_URL', None)
SMTP_HOST = env('SMTP_HOST', None)
EMAIL_ENABLED = env.bool('EMAIL_ENABLED', default=True)
secrets_manager_client = boto3.client('secretsmanager', endpoint_url=AWS_ENDPOINT_URL)
def fetch_db_secret(db_secret_arn):
if db_secret_arn is None:
return None
response = secrets_manager_client.get_secret_value(SecretId=db_secret_arn)
return json.loads(response['SecretString'])
LAMBDA_TASK_ROOT = env('LAMBDA_TASK_ROOT', '')
DB_CONNECTION = env('DB_CONNECTION', None)
if DB_CONNECTION:
DB_CONNECTION = json.loads(DB_CONNECTION)
else:
DB_CONNECTION = fetch_db_secret(env('DB_SECRET_ARN', None))
FROM_EMAIL = env('FROM_EMAIL', None)
|
WebMirror/management/rss_parser_funcs/feed_parse_extractKendalblackBlogspotCom.py | fake-name/ReadableWebProxy | 193 | 12791996 | def extractKendalblackBlogspotCom(item):
'''
DISABLED
Parser for 'kendalblack.blogspot.com'
'''
return None |
dephell/__main__.py | OliverHofkens/dephell | 1,880 | 12792035 | # app
from .cli import entrypoint
entrypoint()
|
docs/examples/use_cases/tensorflow/efficientdet/dataset/create_tfrecord_indexes.py | cyyever/DALI | 3,967 | 12792087 | <gh_stars>1000+
# Copyright 2021 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Generate TFRecord index files necessary when using DALI preprocessing.
Example usage:
python create_tfrecord_indexes.py --tfrecord2idx_script=~/DALI/tools/tfrecord2idx \
--tfrecord_file_pattern=tfrecord/pascal*.tfrecord
"""
from absl import app
from absl import flags
from absl import logging
from glob import glob
from subprocess import call
import os.path
flags.DEFINE_string("tfrecord_file_pattern", None, "Glob for tfrecord files.")
flags.DEFINE_string(
"tfrecord2idx_script", None, "Absolute path to tfrecord2idx script."
)
FLAGS = flags.FLAGS
def main(_):
if FLAGS.tfrecord_file_pattern is None:
raise RuntimeError("Must specify --tfrecord_file_pattern.")
if FLAGS.tfrecord2idx_script is None:
raise RuntimeError("Must specify --tfrecord2idx_script")
tfrecord_files = glob(FLAGS.tfrecord_file_pattern)
tfrecord_idxs = [filename + "_idx" for filename in tfrecord_files]
if not os.path.isfile(FLAGS.tfrecord2idx_script):
raise ValueError(
"{FLAGS.tfrecord2idx_script} does not lead to valid tfrecord2idx script."
)
for tfrecord, tfrecord_idx in zip(tfrecord_files, tfrecord_idxs):
logging.info(f"Generating index file for {tfrecord}")
call([FLAGS.tfrecord2idx_script, tfrecord, tfrecord_idx])
if __name__ == "__main__":
app.run(main)
|
Hackerrank/World Cup 2016/World Cup/Problem F/gen.py | VastoLorde95/Competitive-Programming | 170 | 12792121 | from math import *
from fractions import *
from random import *
n = 1000000000
q = 200000
print n, q
for _ in xrange(q):
t = randrange(1,4)
l,r,c = randrange(1,n+1), randrange(1,n+1), 1000000000
if t < 3:
print t,l,r,c
else:
print t,l,r
#print 3, 1, 1000000000
|
core/views.py | HortenciaArliane/speakerfight | 369 | 12792124 |
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from django.http import Http404, HttpResponseRedirect
from django.contrib import messages
from django.shortcuts import get_object_or_404
from django.utils import translation
from vanilla import TemplateView, DetailView, UpdateView
from deck.models import Event, Proposal
from core.models import Profile
from core.forms import ProfileForm, ProfilePictureForm, ProfileChangeLanguageForm
from core.mixins import LoginRequiredMixin, FormValidRedirectMixing
class IndexView(TemplateView):
template_name = 'index.html'
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context.update(
events=Event.objects.count(),
proposals=Proposal.objects.count(),
users=User.objects.count()
)
return context
class AboutView(TemplateView):
template_name = 'about.html'
class ProfileView(DetailView):
template_name = 'account/profile.html'
model = Profile
lookup_field = 'user__username'
def get_object(self, **kwargs):
queryset = self.get_queryset()
username = self.kwargs.get('user__username')
if not username and self.request.user.is_authenticated():
return self.request.user.profile
else:
return get_object_or_404(queryset, user__username=username)
def get_context_data(self, **kwargs):
context = super(ProfileView, self).get_context_data(**kwargs)
self.object = self.get_object()
context.update(
profile_form=ProfileForm(instance=self.object),
language_form=ProfileChangeLanguageForm(instance=self.object),
events=self.object.get_profile_events(),
proposals=self.object.get_profile_proposals(),
)
return context
class ProfileUpdateView(LoginRequiredMixin,
FormValidRedirectMixing,
UpdateView):
template_name = 'account/profile.html'
model = Profile
form_class = ProfileForm
lookup_field = 'user__username'
def get_object(self, **kwargs):
queryset = self.get_queryset()
username = self.kwargs.get('user__username')
if not username and self.request.user.is_authenticated():
return self.request.user.profile
elif (username == self.request.user.username or
self.request.user.is_superuser):
return get_object_or_404(queryset, user__username=username)
else:
raise Http404
def form_valid(self, form):
self.object = form.save()
return self.success_redirect(_(u'Profile updated.'))
def get(self, *args, **kwargs):
self.object = self.get_object()
return HttpResponseRedirect(
self.object.get_absolute_url()
)
def form_invalid(self, form):
for error in form.errors.itervalues():
messages.error(self.request, error.as_data()[0].message)
return self.get()
class ProfileUpdatePictureView(ProfileUpdateView):
form_class = ProfilePictureForm
def form_valid(self, form):
self.object = form.save()
return self.success_redirect(_(u'Photo changed.'))
class ProfileChangeLanguageView(ProfileUpdateView):
form_class = ProfileChangeLanguageForm
def form_valid(self, form):
self.object = form.save()
translation.activate(self.object.language)
self.request.session[
translation.LANGUAGE_SESSION_KEY
] = self.object.language
return self.success_redirect(_(u'Language changed.'))
|
OpenAttack/data/test.py | e-tornike/OpenAttack | 444 | 12792136 | NAME = "test"
DOWNLOAD = "/TAADToolbox/test.pkl"
|
tests/test_context_manager.py | timgates42/tasktiger | 1,143 | 12792144 | <reponame>timgates42/tasktiger
"""Child context manager tests."""
import redis
from tasktiger import Worker
from .tasks import exception_task, simple_task
from .test_base import BaseTestCase
from .config import TEST_DB, REDIS_HOST
class ContextManagerTester(object):
"""
Dummy context manager class.
Uses Redis to track number of enter/exit calls
"""
def __init__(self, name):
self.name = name
self.conn = redis.Redis(
host=REDIS_HOST, db=TEST_DB, decode_responses=True
)
self.conn.set('cm:{}:enter'.format(self.name), 0)
self.conn.set('cm:{}:exit'.format(self.name), 0)
self.conn.set('cm:{}:exit_with_error'.format(self.name), 0)
def __enter__(self):
self.conn.incr('cm:{}:enter'.format(self.name))
def __exit__(self, exc_type, exc_val, exc_tb):
self.conn.incr('cm:{}:exit'.format(self.name))
if exc_type is not None:
self.conn.incr('cm:{}:exit_with_error'.format(self.name))
self.conn.close()
class TestChildContextManagers(BaseTestCase):
"""Child context manager tests."""
def _get_context_managers(self, number):
return [ContextManagerTester('cm' + str(i)) for i in range(number)]
def _test_context_managers(self, num, task, should_fail=False):
cms = self._get_context_managers(num)
self.tiger.config['CHILD_CONTEXT_MANAGERS'] = cms
self.tiger.delay(task)
Worker(self.tiger).run(once=True)
for i in range(num):
assert self.conn.get('cm:{}:enter'.format(cms[i].name)) == '1'
assert self.conn.get('cm:{}:exit'.format(cms[i].name)) == '1'
if should_fail:
assert (
self.conn.get('cm:{}:exit_with_error'.format(cms[i].name))
== '1'
)
else:
assert (
self.conn.get('cm:{}:exit_with_error'.format(cms[i].name))
== '0'
)
def test_fixture(self):
cms = self._get_context_managers(1).pop()
with cms:
pass
assert self.conn.get('cm:{}:enter'.format(cms.name)) == '1'
assert self.conn.get('cm:{}:exit'.format(cms.name)) == '1'
def test_single_context_manager(self):
self._test_context_managers(1, simple_task)
self._test_context_managers(1, exception_task, should_fail=True)
def test_multiple_context_managers(self):
self._test_context_managers(10, simple_task)
self._test_context_managers(10, exception_task, should_fail=True)
|
docs/examples/save_geotiff.py | carderne/descarteslabs-python | 167 | 12792145 | """
==================================================
Save image to GeoTIFF
==================================================
This example demonstrates how to save an image
to your local machine in GeoTiff format.
"""
import descarteslabs as dl
# Create an aoi feature to clip imagery to
box = {
"type": "Polygon",
"coordinates": [
[
[-108.64292971398066, 33.58051349561343],
[-108.27082685426221, 33.58051349561343],
[-108.27082685426221, 33.83925599538719],
[-108.64292971398066, 33.83925599538719],
[-108.64292971398066, 33.58051349561343],
]
],
}
# Two predefined image IDs for mosaic and download. These can be obtained through a Metadata or Scenes API search
images = [
"landsat:LC08:01:RT:TOAR:meta_LC08_L1TP_035037_20180602_20180602_01_RT_v1",
"landsat:LC08:01:RT:TOAR:meta_LC08_L1TP_035036_20180602_20180602_01_RT_v1",
]
# The Raster API call to download an image mosaic. Other parameters are available
# The file is written in to the same directory as the script.
raster_client = dl.Raster()
raster_client.raster(
inputs=images,
bands=["red", "green", "blue", "alpha"],
scales=[[0, 5500], [0, 5500], [0, 5500], None],
data_type="Byte",
cutline=box,
save=True,
outfile_basename="save_local",
resolution=60,
)
|
core/migrations/remove_provider_dns_server_ip_model.py | simpsonw/atmosphere | 197 | 12792165 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-09-07 21:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', 'remove_atmosphereuser_selected_identity'),
]
operations = [
migrations.AlterUniqueTogether(
name='providerdnsserverip',
unique_together=set([]),
),
migrations.RemoveField(
model_name='providerdnsserverip',
name='provider',
),
migrations.DeleteModel(name='ProviderDNSServerIP', ),
]
|
custom/inddex/food.py | dimagilg/commcare-hq | 471 | 12792192 | """
This file contains the logic to generate the master dataset for the INDDEX reports
Overview
--------
Beneficiaries are asked about their diet in a "recall" session. This results in
a "foodrecall" case. Every food they mention results in the creation of a "food"
case that's a child of this foodrecall.
This dataset has a row for every food, with metadata about the recall session,
calculated nutritional information, and auditing columns reporting on what data
is or isn't available. Some of these foods are recipes, and their ingredients
appear as separate rows in the report.
Standard recipes have their ingredients enumerated in the "recipes" lookup
table. This dataset has additional rows inserted for each ingredient. These
rows are associated with the recipe case, but don't have a case of their own.
Nonstandard recipes are defined by the user and beneficiary during a recall
session. The ingredients of the recipe are entered as additional food cases and
linked to the recipe by `recipe_case_id`.
Beneficiaries may report eating a nonstandard recipe more than once, in which
case subsequent references point to the recipe definition with
already_reported_recipe_case_id and don't enumerate the ingredients again. We
need to insert duplicates of the previously reported ingredients into the
report for them.
Components
----------
FoodData :: This is the interface to this dataset, it glues together all the
component pieces and presents the result as a unified dataset.
FoodRow :: Class responsible for row-wise calculations and indicator definitions.
"""
import operator
import uuid
from collections import defaultdict
from functools import reduce
from memoized import memoized
from corehq.apps.es import users as user_es
from corehq.apps.reports.filters.case_list import CaseListFilter as EMWF
from corehq.apps.reports.standard.cases.utils import get_case_owners
from custom.inddex.ucr_data import FoodCaseData
from .const import (
AGE_RANGES,
FOOD_ITEM,
NON_STANDARD_RECIPE,
STANDARD_RECIPE,
ConvFactorGaps,
FctGaps,
)
from .fixtures import FixtureAccessor
IN_UCR = 'in_ucr'
IN_FOOD_FIXTURE = 'in_food_fixture'
IS_RECALL_META = 'is_recall_meta'
CALCULATED_LATER = 'calculated_later'
class I:
def __init__(self, slug, *tags):
self.slug = slug
tags = set(tags)
self.in_ucr = IN_UCR in tags
self.in_food_fixture = IN_FOOD_FIXTURE in tags
self.is_recall_meta = IS_RECALL_META in tags
self.is_calculated_later = CALCULATED_LATER in tags
# Indicator descriptions can be found here:
# https://docs.google.com/spreadsheets/d/1znPjfQSFEUFP_R_G8VYE-Bd5dg72k5sP-hZPuy-3RZo/edit
INDICATORS = [
I('unique_respondent_id', IN_UCR, IS_RECALL_META),
I('location_id', IN_UCR, IS_RECALL_META),
I('respondent_id', IN_UCR, IS_RECALL_META),
I('recall_case_id', IN_UCR, IS_RECALL_META),
I('opened_by_username', IN_UCR, IS_RECALL_META),
I('owner_name', IN_UCR, IS_RECALL_META),
I('visit_date', IN_UCR, IS_RECALL_META),
I('opened_on', IN_UCR, IS_RECALL_META),
I('recall_status', IN_UCR, IS_RECALL_META),
I('gender', IN_UCR, IS_RECALL_META),
I('age_years_calculated', IN_UCR, IS_RECALL_META),
I('age_months_calculated', IN_UCR, IS_RECALL_META),
I('age_range', IS_RECALL_META),
I('pregnant', IN_UCR, IS_RECALL_META),
I('breastfeeding', IN_UCR, IS_RECALL_META),
I('urban_rural', IN_UCR, IS_RECALL_META),
I('supplements', IN_UCR, IS_RECALL_META),
I('food_code', IN_UCR),
I('food_name', IN_UCR, IN_FOOD_FIXTURE),
I('recipe_name', IN_UCR, CALCULATED_LATER),
I('caseid'),
I('food_type', IN_UCR, IN_FOOD_FIXTURE),
I('food_status', IN_UCR, IS_RECALL_META),
I('reference_food_code'),
I('base_term_food_code', IN_UCR),
I('include_in_analysis'),
I('fao_who_gift_food_group_code'),
I('fao_who_gift_food_group_description'),
I('user_food_group'),
I('eating_time', IN_UCR, IS_RECALL_META),
I('time_block', IN_UCR, IS_RECALL_META),
I('already_reported_food', IN_UCR),
I('already_reported_food_case_id', IN_UCR),
I('already_reported_recipe', IN_UCR),
I('already_reported_recipe_case_id', IN_UCR),
I('already_reported_recipe_name', IN_UCR),
I('is_ingredient', IN_UCR),
I('ingredient_type', CALCULATED_LATER),
I('recipe_case_id', IN_UCR),
I('ingr_recipe_code'),
I('ingr_fraction'),
I('ingr_recipe_total_grams_consumed', CALCULATED_LATER),
I('short_name', IN_UCR),
I('food_base_term', IN_UCR, IN_FOOD_FIXTURE),
I('tag_1', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_1', IN_UCR),
I('tag_2', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_2', IN_UCR),
I('tag_3', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_3', IN_UCR),
I('tag_4', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_4', IN_UCR),
I('tag_5', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_5', IN_UCR),
I('tag_6', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_6', IN_UCR),
I('tag_7', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_7', IN_UCR),
I('tag_8', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_8', IN_UCR),
I('tag_9', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_9', IN_UCR),
I('tag_10', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_10', IN_UCR),
I('conv_method_code', IN_UCR),
I('conv_method_desc', IN_UCR),
I('conv_option_code', IN_UCR),
I('conv_option_desc', IN_UCR),
I('measurement_amount', IN_UCR),
I('conv_units', IN_UCR),
I('portions', IN_UCR),
I('nsr_conv_method_code_post_cooking', IN_UCR),
I('nsr_conv_method_desc_post_cooking', IN_UCR),
I('nsr_conv_option_code_post_cooking', IN_UCR),
I('nsr_conv_option_desc_post_cooking', IN_UCR),
I('nsr_measurement_amount_post_cooking', IN_UCR),
I('nsr_consumed_cooked_fraction', IN_UCR),
I('recipe_num_ingredients', CALCULATED_LATER),
I('conv_factor_food_code'),
I('conv_factor_base_term_food_code'),
I('conv_factor_used'),
I('conv_factor'),
I('fct_food_code_exists'),
I('fct_base_term_food_code_exists'),
I('fct_reference_food_code_exists'),
I('fct_data_used'),
I('fct_code'),
I('total_grams', CALCULATED_LATER),
I('conv_factor_gap_code'),
I('conv_factor_gap_desc'),
I('fct_gap_code', CALCULATED_LATER),
I('fct_gap_desc', CALCULATED_LATER),
]
_INDICATORS_BY_SLUG = {i.slug: i for i in INDICATORS}
NSR_COLS_TO_COPY = [
'nsr_conv_method_code_post_cooking',
'nsr_conv_method_desc_post_cooking',
'nsr_conv_option_code_post_cooking',
'nsr_conv_option_desc_post_cooking',
'nsr_measurement_amount_post_cooking',
'nsr_consumed_cooked_fraction',
]
class FoodRow:
def __init__(self, ucr_row, fixtures, ingredient=None):
self.uuid = uuid.uuid4()
self.ucr_row = ucr_row
self.fixtures = fixtures
self._is_std_recipe_ingredient = bool(ingredient)
if self._is_std_recipe_ingredient:
self.food_code = ingredient.ingr_code
self._set_ingredient_fields(ingredient)
else:
self.caseid = ucr_row['doc_id']
self.food_code = ucr_row['food_code']
if not self.food_code and self.food_name in self.fixtures.foods_by_name:
self.food_code = self.fixtures.foods_by_name[self.food_name].food_code
if not self.base_term_food_code and self.food_base_term in self.fixtures.foods_by_name:
self.base_term_food_code = self.fixtures.foods_by_name[self.food_base_term].food_code
self._set_composition()
self._set_conversion_factors()
self.is_recipe = self.food_type in (STANDARD_RECIPE, NON_STANDARD_RECIPE)
self.include_in_analysis = not self.is_recipe
self.measurement_amount = _maybe_float(self.measurement_amount)
self.portions = _maybe_float(self.portions)
self.nsr_consumed_cooked_fraction = _maybe_float(self.nsr_consumed_cooked_fraction)
self.enrichment_complete = False
def _set_ingredient_fields(self, ingredient):
if self._is_std_recipe_ingredient:
self.is_ingredient = 'yes'
self.ingr_recipe_code = ingredient.recipe_code
self.ingr_fraction = ingredient.ingr_fraction
def _set_composition(self):
# Get the food composition corresponding to food_code, fall back to base_term_food_code
fct = self.fixtures.food_compositions
self.fct_food_code_exists = bool(self.food_code and self.food_code in fct)
self.fct_base_term_food_code_exists = bool(self.base_term_food_code and self.base_term_food_code in fct)
self.fct_code = None
if self.fct_food_code_exists:
self.fct_code = self.food_code
self.fct_data_used = 'food_code'
elif self.fct_base_term_food_code_exists:
self.fct_code = self.base_term_food_code
self.fct_data_used = 'base_term_food_code'
if self.fct_code:
self.composition = fct[self.fct_code]
self.fao_who_gift_food_group_code = self.composition.fao_who_gift_food_group_code
self.fao_who_gift_food_group_description = self.composition.fao_who_gift_food_group_description
self.user_food_group = self.composition.user_defined_food_group
self.reference_food_code = self.composition.reference_food_code_for_food_composition
if self.fct_data_used == 'food_code' and self.reference_food_code:
self.fct_data_used = 'reference_food_code'
self.fct_reference_food_code_exists = bool(self.reference_food_code)
def set_fct_gap(self, ingredients=None):
if ingredients:
for row in ingredients:
row.set_fct_gap()
self.fct_gap_code = FctGaps.NOT_AVAILABLE
if self.food_type == FOOD_ITEM and self.fct_code:
self.fct_gap_code = {
'food_code': FctGaps.AVAILABLE,
'base_term_food_code': FctGaps.BASE_TERM,
'reference_food_code': FctGaps.REFERENCE,
}[self.fct_data_used]
if self.is_recipe and ingredients:
if all(i.fct_gap_code == FctGaps.AVAILABLE for i in ingredients):
self.fct_gap_code = FctGaps.AVAILABLE
else:
self.fct_gap_code = FctGaps.INGREDIENT_GAPS
self.fct_gap_desc = FctGaps.DESCRIPTIONS[self.fct_gap_code]
def _set_conversion_factors(self):
self.conv_factor_gap_code = ConvFactorGaps.NOT_AVAILABLE
if (self.food_type == FOOD_ITEM and self._is_std_recipe_ingredient
or self.food_type == NON_STANDARD_RECIPE):
self.conv_factor_gap_code = ConvFactorGaps.NOT_APPLICABLE
elif self.food_type in (FOOD_ITEM, STANDARD_RECIPE) and self.conv_method_code:
self.conv_factor_food_code = self.fixtures.conversion_factors.get(
(self.food_code, self.conv_method_code, self.conv_option_code))
self.conv_factor_base_term_food_code = self.fixtures.conversion_factors.get(
(self.base_term_food_code, self.conv_method_code, self.conv_option_code))
if self.conv_factor_food_code:
self.conv_factor_used = 'food_code'
self.conv_factor = self.conv_factor_food_code
self.conv_factor_gap_code = ConvFactorGaps.AVAILABLE
elif self.conv_factor_base_term_food_code:
self.conv_factor_used = 'base_term_food_code'
self.conv_factor = self.conv_factor_base_term_food_code
self.conv_factor_gap_code = ConvFactorGaps.BASE_TERM
self.conv_factor_gap_desc = ConvFactorGaps.DESCRIPTIONS[self.conv_factor_gap_code]
@property
def age_range(self):
if not self.age_months_calculated:
return None
for age_range in AGE_RANGES:
if age_range.lower_bound <= getattr(self, age_range.column) < age_range.upper_bound:
return age_range.name
def get_nutrient_per_100g(self, nutrient_name):
if self.fct_code:
return self.composition.nutrients.get(nutrient_name)
def get_nutrient_amt(self, nutrient_name):
return _multiply(self.get_nutrient_per_100g(nutrient_name), self.total_grams, 0.01)
def __getattr__(self, name):
if name in _INDICATORS_BY_SLUG:
indicator = _INDICATORS_BY_SLUG[name]
if indicator.is_calculated_later:
if not self.enrichment_complete:
raise AttributeError(f"{name} hasn't yet been set. It will be "
"calculated outside the scope of FoodRow.")
return None
if self._is_std_recipe_ingredient:
# If it's an indicator that hasn't been explicitly set, check if it can
# be pulled from the food fixture or from the parent food case's UCR
if indicator.in_food_fixture:
return getattr(self.fixtures.foods[self.food_code], indicator.slug)
if indicator.is_recall_meta:
return self.ucr_row[indicator.slug]
return None
else:
# If it's an indicator in the UCR that hasn't been explicitly set, return that val
return self.ucr_row[indicator.slug] if indicator.in_ucr else None
raise AttributeError(f"FoodRow has no definition for {name}")
class FoodData:
"""Generates the primary dataset for INDDEX reports. See file docstring for more."""
IN_MEMORY_FILTERS = ['gap_type', 'fao_who_gift_food_group_code', 'food_type']
FILTERABLE_COLUMNS = IN_MEMORY_FILTERS + FoodCaseData.FILTERABLE_COLUMNS
def __init__(self, domain, *, datespan, filter_selections):
for slug in filter_selections:
if slug not in self.FILTERABLE_COLUMNS:
raise AssertionError(f"{slug} is not a valid filter slug")
self.fixtures = FixtureAccessor(domain)
self._in_memory_filter_selections = {
slug: filter_selections[slug] for slug in self.IN_MEMORY_FILTERS
if slug in filter_selections
}
self._ucr = FoodCaseData({
'domain': domain,
'startdate': str(datespan.startdate),
'enddate': str(datespan.enddate),
**{k: v for k, v in filter_selections.items()
if k in FoodCaseData.FILTERABLE_COLUMNS}
})
@classmethod
def from_request(cls, domain, request):
return cls(
domain,
datespan=request.datespan,
filter_selections={'owner_id': cls._get_owner_ids(domain, request),
**{k: [v for v in request.GET.getlist(k) if v]
for k in cls.FILTERABLE_COLUMNS if k != 'owner_id'}}
)
@staticmethod
def _get_owner_ids(domain, request):
slugs = request.GET.getlist(EMWF.slug)
if EMWF.no_filters_selected(slugs) or EMWF.show_all_data(slugs) or EMWF.show_project_data(slugs):
return [] # don't filter by owner
if EMWF.show_deactivated_data(slugs):
return (user_es.UserES()
.show_only_inactive()
.domain(domain)
.get_ids())
return get_case_owners(request, domain, slugs)
def _matches_in_memory_filters(self, row):
# If a gap type is specified, show only rows with gaps of that type
gap_type = self._in_memory_filter_selections.get('gap_type')
if gap_type == ConvFactorGaps.slug and row.conv_factor_gap_code == ConvFactorGaps.AVAILABLE:
return False
if gap_type == FctGaps.slug and row.fct_gap_code == FctGaps.AVAILABLE:
return False
food_types = self._in_memory_filter_selections.get('food_type')
if food_types and row.food_type not in food_types:
return False
food_groups = self._in_memory_filter_selections.get('fao_who_gift_food_group_code')
if food_groups and row.fao_who_gift_food_group_code not in food_groups:
return False
return True
def _get_grouped_rows(self):
"""Return raw case rows grouped by recipe"""
rows = defaultdict(lambda: {
'recipe': None,
'references': [],
'ingredients': [],
})
for row in self._ucr.get_data():
if row['food_type'] in (STANDARD_RECIPE, NON_STANDARD_RECIPE):
if row['already_reported_recipe_case_id']:
rows[row['already_reported_recipe_case_id']]['references'].append(row)
else:
rows[row['doc_id']]['recipe'] = row
elif row['recipe_case_id']:
rows[row['recipe_case_id']]['ingredients'].append(row)
else:
# this isn't part of a recipe
rows[row['doc_id']]['ingredients'].append(row)
return rows.values()
def _get_all_rows(self):
for group in self._get_grouped_rows():
master_recipe = group['recipe']
references = group['references']
ingredients = group['ingredients']
if not master_recipe:
yield from self._non_recipe_rows(references + ingredients)
else:
yield from self._recipe_rows(master_recipe, ingredients)
for recipe in references:
recipe = _insert_nsr_cols(recipe, master_recipe)
yield from self._recipe_rows(recipe, ingredients)
@property
@memoized
def rows(self):
rows = []
for row in self._get_all_rows():
if self._matches_in_memory_filters(row):
rows.append(row)
return rows
def _non_recipe_rows(self, rows):
"""These rows aren't part of a recipe, or it wasn't found"""
for raw_row in rows:
row = FoodRow(raw_row, self.fixtures)
row.total_grams = _multiply(row.measurement_amount, row.conv_factor, row.portions)
row.set_fct_gap()
row.enrichment_complete = True
yield row
def _recipe_rows(self, raw_recipe, raw_ingredients):
recipe = FoodRow(raw_recipe, self.fixtures)
if recipe.food_type == STANDARD_RECIPE:
# std recipe ingredients come from the DB, NOT ingredient cases
ingredients = [FoodRow(raw_recipe, self.fixtures, ingredient_data)
for ingredient_data in self.fixtures.recipes[recipe.food_code]]
else: # NON_STANDARD_RECIPE
ingredients = [FoodRow(raw, self.fixtures) for raw in raw_ingredients]
total_grams = _calculate_total_grams(recipe, ingredients)
recipe.set_fct_gap(ingredients)
recipe.recipe_name = recipe.ucr_row['recipe_name']
for row in [recipe] + ingredients:
row.total_grams = total_grams[row.uuid]
row.recipe_num_ingredients = len(ingredients)
row.recipe_case_id = recipe.caseid
if row.is_ingredient == 'yes':
row.recipe_name = recipe.recipe_name
if recipe.food_type == STANDARD_RECIPE:
row.ingredient_type = 'std_recipe_ingredient'
row.ingr_recipe_total_grams_consumed = total_grams[recipe.uuid]
else:
row.ingredient_type = 'non_std_recipe_ingredient'
for col in NSR_COLS_TO_COPY: # Copy these values from the recipe case
setattr(row, col, getattr(recipe, col))
row.enrichment_complete = True
yield row
def _insert_nsr_cols(raw_recipe, master_recipe):
# nsr references are missing some values, insert them from the master recipe
nsr_cols = {col: master_recipe[col] for col in NSR_COLS_TO_COPY}
amount = _maybe_float(raw_recipe['measurement_amount'])
portions = _maybe_float(raw_recipe['portions'])
amount_post_cooking = _maybe_float(master_recipe['nsr_measurement_amount_post_cooking'])
if all(val is not None for val in [amount, portions, amount_post_cooking]):
nsr_cols['nsr_consumed_cooked_fraction'] = amount * portions / amount_post_cooking
else:
nsr_cols['nsr_consumed_cooked_fraction'] = None
return {**raw_recipe, **nsr_cols}
def _calculate_total_grams(recipe, ingredients):
if recipe.food_type == STANDARD_RECIPE:
res = {}
recipe_total = _multiply(recipe.measurement_amount, recipe.conv_factor, recipe.portions)
res[recipe.uuid] = recipe_total
for row in ingredients:
res[row.uuid] = _multiply(recipe_total, row.ingr_fraction)
return res
else: # NON_STANDARD_RECIPE
res = {}
for row in ingredients:
res[row.uuid] = _multiply(row.measurement_amount, row.conv_factor,
row.portions, recipe.nsr_consumed_cooked_fraction)
try:
res[recipe.uuid] = sum(res.values()) if res else None
except TypeError:
res[recipe.uuid] = None
return res
def _multiply(*args):
try:
return reduce(operator.mul, args)
except TypeError:
return None
def _maybe_float(val):
return float(val) if val not in (None, '') else None
|
testbook/reference.py | loichuder/testbook | 291 | 12792220 | <reponame>loichuder/testbook
from .exceptions import (
TestbookExecuteResultNotFoundError,
TestbookAttributeError,
TestbookSerializeError,
TestbookRuntimeError
)
from .utils import random_varname
from .translators import PythonTranslator
class TestbookObjectReference:
def __init__(self, tb, name):
self.tb = tb
self.name: str = name
@property
def _type(self):
return self.tb.value(f"type({self.name}).__name__")
def __repr__(self):
return repr(self.tb.value(f"repr({self.name})"))
def __getattr__(self, name):
if self.tb.value(f"hasattr({self.name}, '{name}')"):
return TestbookObjectReference(self.tb, f"{self.name}.{name}")
raise TestbookAttributeError(f"'{self._type}' object has no attribute {name}")
def __eq__(self, rhs):
return self.tb.value(
"{lhs} == {rhs}".format(lhs=self.name, rhs=PythonTranslator.translate(rhs))
)
def __len__(self):
return self.tb.value(f"len({self.name})")
def __iter__(self):
iterobjectname = f"___iter_object_{random_varname()}"
self.tb.inject(f"""
{iterobjectname} = iter({self.name})
""")
return TestbookObjectReference(self.tb, iterobjectname)
def __next__(self):
try:
return self.tb.value(f"next({self.name})")
except TestbookRuntimeError as e:
if e.eclass is StopIteration:
raise StopIteration
else:
raise
def __getitem__(self, key):
try:
return self.tb.value(f"{self.name}.__getitem__({PythonTranslator.translate(key)})")
except TestbookRuntimeError as e:
if e.eclass is TypeError:
raise TypeError(e.evalue)
elif e.eclass is IndexError:
raise IndexError(e.evalue)
else:
raise
def __setitem__(self, key, value):
try:
return self.tb.inject("{name}[{key}] = {value}".format(
name=self.name,
key=PythonTranslator.translate(key),
value=PythonTranslator.translate(value)
), pop=True)
except TestbookRuntimeError as e:
if e.eclass is TypeError:
raise TypeError(e.evalue)
elif e.eclass is IndexError:
raise IndexError(e.evalue)
else:
raise
def __contains__(self, item):
return self.tb.value(f"{self.name}.__contains__({PythonTranslator.translate(item)})")
def __call__(self, *args, **kwargs):
code = self.tb._construct_call_code(self.name, args, kwargs)
try:
return self.tb.value(code)
except TestbookExecuteResultNotFoundError:
# No return value from function call
pass
except TestbookSerializeError as e:
return TestbookObjectReference(self.tb, e.save_varname)
def resolve(self):
return self.tb.value(self.name)
|
k8s_snapshots/logconf.py | gmarkey/k8s-snapshots | 326 | 12792239 | <filename>k8s_snapshots/logconf.py<gh_stars>100-1000
import logging
import logging.config
from collections import OrderedDict
from typing import Optional, List, Any, Dict
import structlog
import sys
from k8s_snapshots import serialize
class ProcessStructuredErrors:
def __init__(self):
pass
def __call__(self, logger, method_name, event_dict):
exc_info = event_dict.pop('exc_info', None)
if exc_info is None:
return event_dict
exc_type, exc, exc_tb = structlog.processors._figure_out_exc_info(
exc_info)
__structlog__ = getattr(exc, '__structlog__', None)
if not callable(__structlog__):
event_dict['exc_info'] = exc_info
return event_dict
structured_error = __structlog__()
event_dict['structured_error'] = structured_error
return event_dict
def add_message(logger, method_name, event_dict):
"""
Creates a ``message`` value based on the ``hint`` and ``key_hint`` keys.
``key_hint`` : ``Optional[str]``
a '.'-separated path of dictionary keys.
``hint`` : ``Optional[str]``
will be formatted using ``.format(**event_dict)``.
"""
def from_hint(ed):
hint = event_dict.pop('hint', None)
if hint is None:
return
try:
return hint.format(**event_dict)
except Exception as exc:
return f'! error formatting message: {exc!r}'
def path_value(dict_: Dict[str, Any], key_path: str) -> Optional[Any]:
value = dict_
for key in key_path.split('.'):
if value is None:
return
__structlog__ = getattr(value, '__structlog__', None)
if __structlog__ is not None:
value = __structlog__()
value = value.get(key)
return value
def from_key_hint(ed) -> Optional[str]:
key_hint = ed.pop('key_hint', None)
if key_hint is None:
return
value = path_value(ed, key_hint)
return format_kv(key_hint, value)
def from_key_hints(ed) -> List[str]:
key_hints = ed.pop('key_hints', None)
if key_hints is None:
return []
return [
format_kv(key_hint, path_value(ed, key_hint))
for key_hint in key_hints
]
def format_kv(key: str, value: Any) -> str:
return f'{key}={serialize.process(value)}'
hints = [
from_hint(event_dict),
from_key_hint(event_dict)
]
hints += from_key_hints(event_dict)
if all(hint is None for hint in hints):
if event_dict.get('message') is None:
event_dict['message'] = event_dict.get('event')
return event_dict
prefix = event_dict['event']
hint = ', '.join(hint for hint in hints if hint is not None)
message = event_dict.get('message')
if message is not None:
message = f'{prefix}: {message}, {hint}'
else:
message = f'{prefix}: {hint}'
event_dict['message'] = message
return event_dict
def configure_from_config(config):
configure_logging(
level_name=config['log_level'],
for_humans=not config['json_log'],
json_indent=config['structlog_json_indent'] or None,
)
def configure_logging(
level_name: str='INFO',
for_humans: bool=False,
json_indent: Optional[int]=None,
):
configure_structlog(
for_humans=for_humans,
json_indent=json_indent,
level_name=level_name,
)
def configure_structlog(
for_humans: bool=False,
json_indent: Optional[int]=None,
level_name: str='INFO'
):
key_order = ['message', 'event', 'level']
timestamper = structlog.processors.TimeStamper(fmt='ISO')
processors = [
event_enum_to_str,
ProcessStructuredErrors(),
structlog.stdlib.add_logger_name,
structlog.stdlib.add_log_level,
rename_level_to_severity,
timestamper,
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
add_func_name,
add_message,
order_keys(key_order),
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
]
if for_humans:
renderer = structlog.dev.ConsoleRenderer() # <===
else:
# Make it so that 0 ⇒ None
indent = json_indent or None
renderer = structlog.processors.JSONRenderer(
indent=indent,
serializer=serialize.dumps
)
foreign_pre_chain = [
# Add the log level and a timestamp to the event_dict if the log entry
# is not from structlog.
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
structlog.stdlib.add_log_level,
structlog.stdlib.add_logger_name,
foreign_event_to_message,
rename_level_to_severity,
timestamper,
]
if level_name == 'DEBUG':
root_logger_level = 'DEBUG'
else:
root_logger_level = 'ERROR'
logging_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'structlog': {
'()': structlog.stdlib.ProcessorFormatter,
'processor': renderer,
'foreign_pre_chain': foreign_pre_chain,
},
},
'handlers': {
'default': {
'level': level_name,
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'formatter': 'structlog',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': root_logger_level,
'propagate': True,
},
'k8s_snapshots': {
'level': 'DEBUG',
}
}
}
logging.config.dictConfig(logging_config)
structlog.configure(
processors=processors,
context_class=OrderedDict,
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
def foreign_event_to_message(logger, method_name, event_dict):
event = event_dict.get('event')
if event is not None and 'message' not in event_dict:
event_dict['message'] = event
event_dict['event'] = 'foreign'
return event_dict
def rename_level_to_severity(logger, method_name, event_dict):
level = event_dict.pop('level', None)
event_dict['severity'] = level.upper()
return event_dict
def add_func_name(logger, method_rame, event_dict):
record = event_dict.get('_record')
if record is None:
return event_dict
event_dict['function'] = record.funcName
return event_dict
def order_keys(order):
"""
Order keys for JSON readability when not using json_log=True
"""
def processor(logger, method_name, event_dict):
if not isinstance(event_dict, OrderedDict):
return event_dict
for key in reversed(order):
if key in event_dict:
event_dict.move_to_end(key, last=False)
return event_dict
return processor
def event_enum_to_str(logger, method_name, event_dict):
from k8s_snapshots import events
event = event_dict.get('event')
if event is None:
return event_dict
if isinstance(event, events.EventEnum):
event_dict['event'] = event.value
return event_dict
|
setup.py | samir-joshi/tmtoolkit | 167 | 12792250 | <filename>setup.py<gh_stars>100-1000
"""
tmtoolkit setuptools based setup module
"""
import os
from codecs import open
from setuptools import setup, find_packages
__title__ = 'tmtoolkit'
__version__ = '0.10.0'
__author__ = '<NAME>'
__license__ = 'Apache License 2.0'
GITHUB_URL = 'https://github.com/WZBSocialScienceCenter/tmtoolkit'
DEPS_BASE = ['numpy>=1.19.0,<2', 'scipy>=1.5.0,<1.6', 'pandas>=1.1.0,<1.2', 'xlrd>=1.2.0',
'globre>=0.1.5,<0.2', 'matplotlib>=3.3.0,<3.4', 'spacy>=2.3.0,<2.4']
DEPS_EXTRA = {
'datatable': ['datatable>=0.10.0,<0.11'],
'nltk': ['nltk>=3.5.0,<3.6'],
'excel_export': ['openpyxl>=3.0.0'],
'wordclouds': ['wordcloud>=1.7.0,<1.8', 'Pillow>=7.2.0,<7.3'],
'lda': ['ldafork>=1.2.0,<1.3'],
'sklearn': ['scikit-learn>=0.23,<0.24'],
'gensim': ['gensim>=3.8.0,<3.9'],
'topic_modeling_eval_extra': ['gmpy2>=2.0.0,<3'],
'test': ['pytest>=6.0.0,<7', 'hypothesis>=5.23.0<5.24', 'decorator>=4.4.0,<4.5'],
'doc': ['Sphinx>=3.1.0', 'sphinx-rtd-theme>=0.5.0', 'nbsphinx>=0.7.0'],
'dev': ['coverage>=5.2', 'coverage-badge>=1.0.0', 'pytest-cov>=2.10.0', 'twine>=3.2.0',
'ipython>=7.16.0', 'jupyter>=1.0.0', 'notebook>=6.0.0', 'tox>=3.18.0'],
}
DEPS_EXTRA['recommended'] = DEPS_EXTRA['excel_export'] + DEPS_EXTRA['wordclouds']
DEPS_EXTRA['all'] = []
for k, deps in DEPS_EXTRA.items():
if k not in {'recommended', 'all'}:
DEPS_EXTRA['all'].extend(deps)
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name=__title__,
version=__version__,
description='Text Mining and Topic Modeling Toolkit',
long_description=long_description,
long_description_content_type='text/x-rst',
url=GITHUB_URL,
project_urls={
'Bug Reports': GITHUB_URL + '/issues',
'Source': GITHUB_URL,
},
author=__author__,
author_email='<EMAIL>',
license=__license__,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
keywords='textmining textanalysis text mining analysis preprocessing topicmodeling topic modeling evaluation',
packages=find_packages(exclude=['tests', 'examples']),
include_package_data=True,
python_requires='>=3.6',
install_requires=DEPS_BASE,
extras_require=DEPS_EXTRA
)
|
src/genie/libs/parser/junos/tests/ShowServicesAccountingErrors/cli/equal/golden_output_1_expected.py | balmasea/genieparser | 204 | 12792251 | expected_output = {
"services-accounting-information": {
"v9-error-information": [
{
"interface-name": "ms-9/0/0",
"service-set-dropped": "0",
"active-timeout-failures": "0",
"export-packet-failures": "0",
"flow-creation-failures": "0",
"memory-overload": "No",
}
]
}
}
|
open/core/betterself/views/activity_log_views.py | lawrendran/open | 105 | 12792291 | from open.core.betterself.models.activity_log import ActivityLog
from open.core.betterself.serializers.activity_log_serializers import (
ActivityLogReadSerializer,
ActivityLogCreateUpdateSerializer,
)
from open.core.betterself.views.mixins import (
BaseGetUpdateDeleteView,
BaseCreateListView,
)
class ActivityLogCreateListView(BaseCreateListView):
model_class = ActivityLog
read_serializer_class = ActivityLogReadSerializer
create_serializer_class = ActivityLogCreateUpdateSerializer
class ActivityLogGetUpdateView(BaseGetUpdateDeleteView):
model_class = ActivityLog
read_serializer_class = ActivityLogReadSerializer
update_serializer_class = ActivityLogCreateUpdateSerializer
|
src/scripts/apply_json_metadata.py | charlottestanton/covid-19-open-data | 430 | 12792361 | <filename>src/scripts/apply_json_metadata.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud import storage
import sys
def apply_json_metadata(bucket_name, prefix_name):
"""
Applies Content-Type and gzip Content-Encoding to json files in a bucket prefix
In order to allow for decompressive transcoding and serving of gzipped assets to clients
who can decompress themselves, both the content type and content encoding meta data need
to be set on JSON objects. Most methods of transferring objects into a bucket do not
correctly set this meta data, so we have this utility to correct for this after the fact.
See also: https://cloud.google.com/storage/docs/transcoding
"""
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
for blob in bucket.list_blobs(prefix=prefix_name):
if(blob.name.endswith("json")):
print(blob.name)
if(blob.content_type != "application/json" or
blob.content_encoding != "gzip" or
blob.content_disposition != "inline"):
blob.content_type = "application/json"
blob.content_encoding = "gzip"
blob.content_disposition = "inline"
blob.patch()
if __name__ == "__main__":
if(len(sys.argv) != 3):
print("Usage: apply_json_meta [bucket_name] [prefix_name]")
else:
apply_json_metadata(sys.argv[1],sys.argv[2])
|
attributes/continuous_integration/__init__.py | Lufedi/reaper | 106 | 12792425 | <reponame>Lufedi/reaper
class CiService(object):
@staticmethod
def is_enabled(path):
raise NotImplementedError()
|
nemo/collections/nlp/models/glue_benchmark/metrics_for_glue.py | vinayphadnis/NeMo | 4,145 | 12792435 | # Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import f1_score, matthews_corrcoef
__all__ = ['compute_metrics']
def accuracy(preds: List[int], labels: List[int]):
return {"acc": (preds == labels).mean()}
def acc_and_f1(preds: List[int], labels: List[int]):
accuracy = (preds == labels).mean()
f1 = f1_score(y_true=labels, y_pred=preds)
return {"acc": accuracy, "f1": f1}
def mcc(preds: List[int], labels: List[int]):
return {"mcc": matthews_corrcoef(labels, preds)}
def pearson_and_spearman(preds: List[int], labels: List[int]):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {"pearson": pearson_corr, "spearmanr": spearman_corr, "pear+spear av": (pearson_corr + spearman_corr) / 2}
def compute_metrics(task_name: str, preds: List[int], labels: List[int]) -> Dict[str, float]:
"""
Computes metrics for GLUE tasks
Args:
task_name: GLUE task name
preds: model predictions
labels: golden labels
Returns:
metrics
"""
if len(preds) != len(labels):
raise ValueError("Predictions and labels must have the same length")
metric_fn = accuracy
if task_name == 'cola':
metric_fn = mcc
elif task_name in ['mrpc', 'qqp']:
metric_fn = acc_and_f1
elif task_name == 'sts-b':
metric_fn = pearson_and_spearman
return metric_fn(preds, labels)
|
opentapioca/tagger.py | heathersherry/opentapioca | 191 | 12792438 | <reponame>heathersherry/opentapioca<filename>opentapioca/tagger.py
import json
import requests
import logging
import re
from math import log
from .languagemodel import BOWLanguageModel
from .wikidatagraph import WikidataGraph
from .tag import Tag
from .mention import Mention
# solr_collection = 'wd_multilingual'
logger = logging.getLogger(__name__)
class Tagger(object):
"""
The tagger indexes a Wikidata dump in Solr
and uses it to detect efficiently mentions of Wikidata
items in text.
"""
def __init__(self, solr_collection, bow, graph):
"""
Creates a tagger from:
- a solr collection name, which has been adequately initialized with a compatible index and filled with documents
- a bag of words language model, adequately trained, which will be used to evaluate the likelihood of phrases
- a wikidata graph, adequately loaded, which will be used to compute the page rank and the edges between items
"""
self.bow = bow
self.graph = graph
self.solr_endpoint = 'http://localhost:8983/solr/{}/tag'.format(solr_collection)
self.prune_re = re.compile(r'^(\w\w?|[\d ]{,4})$')
self.max_length = 10000
def tag_and_rank(self, phrase, prune=True):
"""
Given some text, use the solr index to retrieve candidate items mentioned in the text.
:param prune: if True, ignores lowercase mentions shorter than 3 characters
"""
# Tag
phrase = phrase[:self.max_length]
logger.debug('Tagging text with solr (length {})'.format(len(phrase)))
r = requests.post(self.solr_endpoint,
params={'overlaps':'NO_SUB',
'tagsLimit':500,
'fl':'id,label,aliases,extra_aliases,desc,nb_statements,nb_sitelinks,edges,types',
'wt':'json',
'indent':'off',
},
headers ={'Content-Type':'text/plain'},
data=phrase.encode('utf-8'))
r.raise_for_status()
logger.debug('Tagging succeeded')
resp = r.json()
# Enhance mentions with page rank and edge similarity
mentions_json = [
self._dictify(mention)
for mention in resp.get('tags', [])
]
docs = {
doc['id']:doc
for doc in resp.get('response', {}).get('docs', [])
}
mentions = [
self._create_mention(phrase, mention, docs, mentions_json)
for mention in mentions_json
]
pruned_mentions = [
mention
for mention in mentions
if not self.prune_phrase(mention.phrase)
]
return pruned_mentions
def prune_phrase(self, phrase):
"""
Should this phrase be pruned? It happens when
it is shorter than 3 characters and appears in lowercase in the text,
or only consists of digits.
This is mostly introduced to remove matches of Wikidata items about characters,
or to prevent short words such as "of" or "in" to match with initials "OF", "IN",
as well as sport scores, postcodes, and so on.
"""
return self.prune_re.match(phrase) is not None and phrase.lower() == phrase
def _create_mention(self, phrase, mention, docs, mentions):
"""
Adds more info to the mentions returned from Solr, to prepare
them for ranking by the classifier.
:param phrase: the original document
:param mention: the JSON mention to enhance with scores
:param docs: dictionary from qid to item
:param mentions: the list of all mentions in the document
:returns: the enhanced mention, as a Mention object
"""
start = mention['startOffset']
end = mention['endOffset']
surface = phrase[start:end]
surface_score = self.bow.log_likelihood(surface)
ranked_tags = []
for qid in mention['ids']:
item = dict(docs[qid].items())
item['rank'] = 23. + log(self.graph.get_pagerank(qid))
ranked_tags.append(Tag(**item))
return Mention(
phrase=surface,
start=start,
end=end,
log_likelihood=-surface_score,
tags=sorted(ranked_tags, key=lambda tag: -tag.rank)[:10],
)
def _dictify(self, lst):
"""
Converts a list of [key1,val1,key2,val2,...] to a dict
"""
return {
lst[2*k]: lst[2*k+1]
for k in range(len(lst)//2)
}
if __name__ == '__main__':
import sys
fname = sys.argv[1]
print('Loading '+fname)
bow = BOWLanguageModel()
bow.load(fname)
print('Loading '+sys.argv[2])
graph = WikidataGraph()
graph.load_pagerank(sys.argv[2])
tagger = Tagger(bow, graph)
while True:
phrase = input('>>> ')
tags = tagger.tag_and_rank(phrase)
for mention in tags:
for tag in mention.get('tags', []):
if 'edges' in tag:
del tag['edges']
if 'aliases' in tag:
del tag['aliases']
print(json.dumps(tags, indent=2, sort_keys=True))
|
tests/x-custom_tests.py | ivoupton/sheet2dict | 208 | 12792463 | import sys
from pathlib import Path
sys.path.append(str(Path(".").absolute().parent))
from sheet2dict import Worksheet
from io import BytesIO
ws = Worksheet()
ws.xlsx_to_dict(path="inventory.xlsx")
print(">>", ws.header)
print("ALL:", ws.sheet_items)
print("SANITIZED:", ws.sanitize_sheet_items)
path = "inventory.xlsx"
xlsx_file = open(path, "rb")
xlsx_file = BytesIO(xlsx_file.read())
ws = Worksheet()
ws.xlsx_to_dict(path=xlsx_file)
print(">>", ws.header)
ws = Worksheet()
path = "inventory.csv"
csv_file = open(path, "r", encoding="utf-8-sig")
ws.csv_to_dict(csv_file=csv_file, delimiter=";")
print("ALL:", ws.sheet_items)
print("SANITIZED:", ws.sanitize_sheet_items)
|
new_venv/Lib/site-packages/cardio/core/utils.py | Shlyankin/cardio | 250 | 12792478 | """Miscellaneous ECG Batch utils."""
import functools
import pint
import numpy as np
from sklearn.preprocessing import LabelBinarizer as LB
UNIT_REGISTRY = pint.UnitRegistry()
def get_units_conversion_factor(old_units, new_units):
"""Return a multiplicative factor to convert a measured quantity from old
to new units.
Parameters
----------
old_units : str
Current units in SI format.
new_units : str
Target units in SI format.
Returns
-------
factor : float
A factor to convert quantities between units.
"""
try: # pint exceptions are wrapped with ValueError exceptions because they don't implement __repr__ method
factor = UNIT_REGISTRY(old_units).to(new_units).magnitude
except Exception as error:
raise ValueError(error.__class__.__name__ + ": " + str(error))
return factor
def partialmethod(func, *frozen_args, **frozen_kwargs):
"""Wrap a method with partial application of given positional and keyword
arguments.
Parameters
----------
func : callable
A method to wrap.
frozen_args : misc
Fixed positional arguments.
frozen_kwargs : misc
Fixed keyword arguments.
Returns
-------
method : callable
Wrapped method.
"""
@functools.wraps(func)
def method(self, *args, **kwargs):
"""Wrapped method."""
return func(self, *frozen_args, *args, **frozen_kwargs, **kwargs)
return method
class LabelBinarizer(LB):
"""Encode categorical features using a one-hot scheme.
Unlike ``sklearn.preprocessing.LabelBinarizer``, each label will be
encoded using ``n_classes`` numbers even for binary problems.
"""
# pylint: disable=invalid-name
def transform(self, y):
"""Transform ``y`` using one-hot encoding.
Parameters
----------
y : 1-D ndarray of shape ``[n_samples,]``
Class labels.
Returns
-------
Y : 2-D ndarray of shape ``[n_samples, n_classes]``
One-hot encoded labels.
"""
Y = super().transform(y)
if len(self.classes_) == 1:
Y = 1 - Y
if len(self.classes_) == 2:
Y = np.hstack((1 - Y, Y))
return Y
def inverse_transform(self, Y, threshold=None):
"""Transform one-hot encoded labels back to class labels.
Parameters
----------
Y : 2-D ndarray of shape ``[n_samples, n_classes]``
One-hot encoded labels.
threshold : float, optional
The threshold used in the binary and multi-label cases. If
``None``, it is assumed to be half way between ``neg_label`` and
``pos_label``.
Returns
-------
y : 1-D ndarray of shape ``[n_samples,]``
Class labels.
"""
if len(self.classes_) == 1:
y = super().inverse_transform(1 - Y, threshold)
elif len(self.classes_) == 2:
y = super().inverse_transform(Y[:, 1], threshold)
else:
y = super().inverse_transform(Y, threshold)
return y
|
flake8_pytest_style/visitors/fail.py | kianmeng/flake8-pytest-style | 125 | 12792504 | <reponame>kianmeng/flake8-pytest-style<gh_stars>100-1000
import ast
from flake8_plugin_utils import Visitor
from flake8_pytest_style.config import Config
from flake8_pytest_style.errors import AssertAlwaysFalse, FailWithoutMessage
from flake8_pytest_style.utils import (
get_simple_call_args,
is_empty_string,
is_fail_call,
is_falsy_constant,
)
class FailVisitor(Visitor[Config]):
def _check_fail_call(self, node: ast.Call) -> None:
"""Checks for PT016."""
args = get_simple_call_args(node)
msg_argument = args.get_argument('msg', 0)
if not msg_argument or is_empty_string(msg_argument):
self.error_from_node(FailWithoutMessage, node)
def visit_Assert(self, node: ast.Assert) -> None:
"""Checks for PT015."""
if is_falsy_constant(node.test):
self.error_from_node(AssertAlwaysFalse, node)
def visit_Call(self, node: ast.Call) -> None:
if is_fail_call(node):
self._check_fail_call(node)
|
RecoHI/HiEgammaAlgos/python/HiIsolationCommonParameters_cff.py | ckamtsikis/cmssw | 852 | 12792509 | import FWCore.ParameterSet.Config as cms
isolationInputParameters = cms.PSet(
barrelBasicCluster = cms.InputTag("islandBasicClusters","islandBarrelBasicClusters"),
endcapBasicCluster = cms.InputTag("islandBasicClusters","islandEndcapBasicClusters"),
horeco = cms.InputTag("horeco"),
hfreco = cms.InputTag("hfreco"),
hbhereco = cms.InputTag("hbhereco"),
track = cms.InputTag("hiGeneralTracks"),
photons = cms.InputTag("cleanPhotons")
)
|
examples/scheduling.py | arnimarj/crochet | 152 | 12792569 | <filename>examples/scheduling.py
#!/usr/bin/python
"""
An example of scheduling time-based events in the background.
Download the latest EUR/USD exchange rate from Yahoo every 30 seconds in the
background; the rendered Flask web page can use the latest value without
having to do the request itself.
Note this is example is for demonstration purposes only, and is not actually
used in the real world. You should not do this in a real application without
reading Yahoo's terms-of-service and following them.
"""
from __future__ import print_function
from flask import Flask
from twisted.internet.task import LoopingCall
from twisted.web.client import getPage
from twisted.python import log
from crochet import wait_for, run_in_reactor, setup
setup()
# Twisted code:
class _ExchangeRate(object):
"""Download an exchange rate from Yahoo Finance using Twisted."""
def __init__(self, name):
self._value = None
self._name = name
# External API:
def latest_value(self):
"""Return the latest exchange rate value.
May be None if no value is available.
"""
return self._value
def start(self):
"""Start the background process."""
self._lc = LoopingCall(self._download)
# Run immediately, and then every 30 seconds:
self._lc.start(30, now=True)
def _download(self):
"""Download the page."""
print("Downloading!")
def parse(result):
print("Got %r back from Yahoo." % (result,))
values = result.strip().split(",")
self._value = float(values[1])
d = getPage(
"http://download.finance.yahoo.com/d/quotes.csv?e=.csv&f=c4l1&s=%s=X"
% (self._name,))
d.addCallback(parse)
d.addErrback(log.err)
return d
# Blocking wrapper:
class ExchangeRate(object):
"""Blocking API for downloading exchange rate."""
def __init__(self, name):
self._exchange = _ExchangeRate(name)
@run_in_reactor
def start(self):
self._exchange.start()
@wait_for(timeout=1)
def latest_value(self):
"""Return the latest exchange rate value.
May be None if no value is available.
"""
return self._exchange.latest_value()
EURUSD = ExchangeRate("EURUSD")
app = Flask(__name__)
@app.route('/')
def index():
rate = EURUSD.latest_value()
if rate is None:
rate = "unavailable, please refresh the page"
return "Current EUR/USD exchange rate is %s." % (rate,)
if __name__ == '__main__':
import sys, logging
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
EURUSD.start()
app.run()
|
update-attempt-ids.py | inducer/courseflow | 284 | 12792584 | <filename>update-attempt-ids.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django
django.setup()
from course.models import GradeChange
for gchange in GradeChange.objects.all():
if gchange.flow_session is not None:
gchange.attempt_id = "flow-session-%d" % gchange.flow_session.id
gchange.save()
|
backend/auth/security.py | restato/bunnybook | 131 | 12792593 | from typing import Optional, Dict
import jwt
import sentry_sdk
from fastapi import HTTPException
from starlette import status
from starlette.requests import Request
from auth.models import Role
from auth.models import User
from config import cfg
def get_user(request: Request) -> User:
"""
Protect route from anonymous access, requiring and returning current
authenticated user.
:param request: web request
:return: current user, otherwise raise an HTTPException (status=401)
"""
return _check_and_extract_user(request)
def get_admin(request: Request) -> User:
"""
Allow access only to an 'admin' account, returning current
authenticated admin account data.
:param request: web request
:return: current admin user, otherwise raise an HTTPException (status=401)
"""
user = _check_and_extract_user(request)
if user.role != Role.ADMIN:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
return user
def get_optional_user(request: Request) -> Optional[User]:
"""
Return authenticated user or None if session is anonymous.
:param request: web request
:return: current user or None for anonymous sessions
"""
try:
return _check_and_extract_user(request)
except HTTPException:
if request.headers.get("Authorization"):
raise
def extract_user_from_token(access_token: str, verify_exp: bool = True) -> User:
"""
Extract User object from jwt token, with optional expiration check.
:param access_token: encoded access token string
:param verify_exp: whether to perform verification or not
:return: User object stored inside the jwt
"""
return User(**jwt.decode(
access_token,
key=cfg.jwt_secret,
algorithms=[cfg.jwt_algorithm],
options={"verify_exp": verify_exp})["user"])
def decode_jwt_refresh_token(
encoded_refresh_token: str,
verify_exp: bool = True) -> Dict:
"""
Decode an encoded refresh token, with optional expiration check.
:param encoded_refresh_token: encoded refresh token string
:param verify_exp: whether to perform verification or not
:return: decoded jwt refresh token as dictionary
"""
return jwt.decode(
encoded_refresh_token,
key=cfg.jwt_secret,
algorithms=[cfg.jwt_algorithm],
options={"verify_exp": verify_exp})
def _check_and_extract_user(request: Request) -> User:
authorization_header = request.headers.get("Authorization")
if not authorization_header:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
try:
access_token = authorization_header.replace("Bearer ", "")
user = extract_user_from_token(access_token, )
if cfg.sentry_dsn:
sentry_sdk.set_user({
"id": user.id,
"username": user.username,
"email": user.email,
"ip_address": request.client.host
})
return user
except jwt.exceptions.ExpiredSignatureError:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
|
src/commands/get_ast.py | PranjalPansuriya/JavaScriptEnhancements | 690 | 12792595 | import sublime, sublime_plugin
import os
from ..libs import util
from ..libs import NodeJS
from ..libs import javaScriptEnhancements
from ..libs.global_vars import *
class JavascriptEnhancementsGetAstCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
view = self.view
flow_cli = "flow"
is_from_bin = True
chdir = ""
use_node = True
bin_path = ""
node = NodeJS(check_local=True)
result = node.execute_check_output(
flow_cli,
[
'ast',
'--from', 'sublime_text',
'--pretty'
],
is_from_bin=is_from_bin,
use_fp_temp=True,
fp_temp_contents=view.substr(sublime.Region(0, view.size())),
is_output_json=False,
chdir=chdir,
bin_path=bin_path,
use_node=use_node
)
print(result[1])
def is_enabled(self, **args) :
view = self.view
if not util.selection_in_js_scope(view) or not DEVELOPER_MODE :
return False
return True
def is_visible(self, **args) :
view = self.view
if not util.selection_in_js_scope(view) or not DEVELOPER_MODE :
return False
return True |
tests/test-cases/basic/ssa_case-5.py | SMAT-Lab/Scalpel | 102 | 12792624 | # Imports
import os
import random
from collections import Counter, defaultdict
import random
from nltk.tag import StanfordNERTagger
from nltk.tokenize import word_tokenize
from nltk import pos_tag
from nltk.chunk import conlltags2tree
from nltk.tree import Tree
import pandas as pd
from htrc_features import FeatureReader
import geocoder
import folium
from pprint import pprint
from tqdm import tqdm
# Set environment variable
# Geonames requires a username to access the API but we do not want to expose personal info in code
#
# Run this locally by adding USERNAME to environment variables, e.g. to .env, as follows:
# > export USERNAME=<insert username here>
USERNAME = os.getenv('USERNAME')
# Setup Stanford NER Tagger
# Ignore deprecation warning for now; we'll deal with it when the time comes!
st = StanfordNERTagger('/usr/local/share/stanford-ner/classifiers/english.all.3class.distsim.crf.ser.gz',
'/usr/local/share/stanford-ner/stanford-ner.jar',
encoding='utf-8')
# Functions for putting together with inside-outside-beginning (IOB) logic
# Cf. https://stackoverflow.com/a/30666949
#
# For more information on IOB tagging, see https://en.wikipedia.org/wiki/Inside–outside–beginning_(tagging)
# Sample HathiTrust ID
# This is the HTID for...
# "Ancient Corinth: A guide to the excavations," <NAME>, <NAME>, and <NAME>
htid = "wu.89079728994"
# Get HTEF data for this ID; specifically tokenlist
fr = FeatureReader(ids=[htid])
for vol in fr:
tokens = vol.tokenlist()
# Create pandas dataframe with relevant data
temp = tokens.index.values.tolist()
counts = pd.DataFrame.from_records(temp, columns=['page', 'section', 'token', 'pos'])
counts['count'] = tokens['count'].tolist()
counts[:10]
# Reconstruct text using tokens and counts
text_data = list(zip(counts['token'].tolist(), counts['count'].tolist()))
# Loop through and multiply words by counts
text_list = []
for w, c in text_data:
for i in range(0, c):
text_list.append(w)
random.shuffle(text_list) # Necessary?
text_reconstruction = " ".join(text_list)
#page_words_extended = page_words+page_ner
tokens = word_tokenize(text_reconstruction)
tagged_tokens = st.tag(tokens)
tagged_tokens = [item for item in tagged_tokens if item[0] != '']
ne_tree = stanfordNE2tree(tagged_tokens)
ne_in_sent = []
for subtree in ne_tree:
if type(subtree) == Tree: # If subtree is a noun chunk, i.e. NE != "O"
ne_label = subtree.label()
ne_string = " ".join([token for token, pos in subtree.leaves()])
ne_in_sent.append((ne_string, ne_label))
locations = [tag[0].title() for tag in ne_in_sent if tag[1] == 'LOCATION']
print(locations)
most_common_locations = Counter(locations).most_common(10)
pprint(most_common_locations)
# Organize some data for map info
places_list = [name for name, _ in most_common_locations][:3] # Limit to top three
most_common_locations = dict(most_common_locations) # Turn mcl into dictionary
# Retrieve json from geonames API (for fun this time using geocoder)
geocoder_results = []
for place in places_list:
results = geocoder.geonames(place, maxRows=5, key=USERNAME)
jsons = []
for result in results:
jsons.append(result.json)
geocoder_results.append(jsons)
# Create a list of 'country' from the geonames json results
countries = []
for results in geocoder_results:
for item in results:
if 'country' in item.keys():
countries.append(item['country'])
# Determine which country appears most often
top_country = sorted(Counter(countries))[0]
print(top_country)
# Iterate over geocoder_results and keep the first lat/long that matches the top country
coordinates = []
for i, results in enumerate(geocoder_results):
for item in results:
if item['country'] == top_country:
coordinates.append((float(item['lat']), float(item['lng'])))
break # Only get the first item for now
print(places_list)
print(coordinates)
# Set up Folium and populate with weighted coordinates
basemap = folium.Map(location=[37.97945, 23.71622], zoom_start=8, tiles='cartodbpositron', width=960, height=512)
for i, c in enumerate(coordinates):
folium.CircleMarker([c[0], c[1]], radius=most_common_locations[places_list[i]]*.25, color='#3186cc',
fill=True, fill_opacity=0.5, fill_color='#3186cc',
popup='{} ({}, {}) appears {} times in book.'.format(places_list[i], c[0], c[1], most_common_locations[places_list[i]])).add_to(basemap)
print('Map of relevant locations in Broneer et al.\'s "Ancient Corinth: A guide to the excavations," weighted by frequency.')
basemap
page = 87
test = counts[counts['page'] == page]['token'].tolist()
print(test)
print(len(test))
from nltk.corpus import stopwords
stops = set(stopwords.words('english'))
pns_list = []
for i in range(1, max(counts['page'])+1):
tokens = counts[counts['page'] == i]['token'].tolist()
tokens = [token for token in tokens if token.lower() not in stops and len(token) > 2]
pns = [token for token in tokens if token[0].isupper()]
combs = [f'{x} {y}' for x, y in combinations(pns, 2)]
pns_list.extend(combs)
|
implementation-contributed/v8/wasm-js/testcfg.py | katemihalikova/test262 | 1,849 | 12792628 | <reponame>katemihalikova/test262<gh_stars>1000+
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
from testrunner.local import testsuite
from testrunner.objects import testcase
ANY_JS = ".any.js"
WPT_ROOT = "/wasm/jsapi/"
META_SCRIPT_REGEXP = re.compile(r"META:\s*script=(.*)")
class TestSuite(testsuite.TestSuite):
def __init__(self, *args, **kwargs):
super(TestSuite, self).__init__(*args, **kwargs)
self.testroot = os.path.join(self.root, "data", "test", "js-api")
self.mjsunit_js = os.path.join(os.path.dirname(self.root), "mjsunit",
"mjsunit.js")
def ListTests(self):
tests = []
for dirname, dirs, files in os.walk(self.testroot):
for dotted in [x for x in dirs if x.startswith(".")]:
dirs.remove(dotted)
dirs.sort()
files.sort()
for filename in files:
if (filename.endswith(ANY_JS)):
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.testroot) + 1 : -len(ANY_JS)]
testname = relpath.replace(os.path.sep, "/")
test = self._create_test(testname)
tests.append(test)
return tests
def _test_class(self):
return TestCase
class TestCase(testcase.D8TestCase):
def _get_files_params(self):
files = [os.path.join(self.suite.mjsunit_js),
os.path.join(self.suite.root, "testharness.js")]
source = self.get_source()
for script in META_SCRIPT_REGEXP.findall(source):
if script.startswith(WPT_ROOT):
# Matched an absolute path, strip the root and replace it with our
# local root.
script = os.path.join(self.suite.testroot, script[len(WPT_ROOT):])
elif not script.startswith("/"):
# Matched a relative path, prepend this test's directory.
thisdir = os.path.dirname(self._get_source_path())
script = os.path.join(thisdir, script)
else:
raise Exception("Unexpected absolute path for script: \"%s\"" % script);
files.append(script)
files.extend([
self._get_source_path(),
os.path.join(self.suite.root, "testharness-after.js")
])
return files
def _get_source_path(self):
# All tests are named `path/name.any.js`
return os.path.join(self.suite.testroot, self.path + ANY_JS)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
|
tests/st/ops/gpu/test_relu_op.py | GuoSuiming/mindspore | 3,200 | 12792700 | <reponame>GuoSuiming/mindspore<filename>tests/st/ops/gpu/test_relu_op.py
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops.operations import _inner_ops as inner
class NetRelu(nn.Cell):
def __init__(self):
super(NetRelu, self).__init__()
self.relu = P.ReLU()
def construct(self, x):
return self.relu(x)
class NetReluDynamic(nn.Cell):
def __init__(self):
super(NetReluDynamic, self).__init__()
self.conv = inner.GpuConvertToDynamicShape()
self.relu = P.ReLU()
def construct(self, x):
x_conv = self.conv(x)
return self.relu(x_conv)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_relu_float32():
x = Tensor(np.array([[[[-1, 1, 10],
[1, -1, 1],
[10, 1, -1]]]]).astype(np.float32))
expect = np.array([[[[0, 1, 10,],
[1, 0, 1,],
[10, 1, 0.]]]]).astype(np.float32)
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
relu = NetRelu()
output = relu(x)
assert (output.asnumpy() == expect).all()
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
relu = NetRelu()
output = relu(x)
assert (output.asnumpy() == expect).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_relu_int8():
x = Tensor(np.array([[[[-1, 1, 10],
[1, -1, 1],
[10, 1, -1]]]]).astype(np.int8))
expect = np.array([[[[0, 1, 10,],
[1, 0, 1,],
[10, 1, 0.]]]]).astype(np.int8)
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
relu = NetRelu()
output = relu(x)
assert (output.asnumpy() == expect).all()
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
relu = NetRelu()
output = relu(x)
assert (output.asnumpy() == expect).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_relu_int32():
x = Tensor(np.array([[[[-1, 1, 10],
[1, -1, 1],
[10, 1, -1]]]]).astype(np.int32))
expect = np.array([[[[0, 1, 10,],
[1, 0, 1,],
[10, 1, 0.]]]]).astype(np.int32)
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
relu = NetRelu()
output = relu(x)
assert (output.asnumpy() == expect).all()
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
relu = NetRelu()
output = relu(x)
assert (output.asnumpy() == expect).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_relu_int64():
x = Tensor(np.array([[[[-1, 1, 10],
[1, -1, 1],
[10, 1, -1]]]]).astype(np.int64))
expect = np.array([[[[0, 1, 10,],
[1, 0, 1,],
[10, 1, 0.]]]]).astype(np.int64)
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
relu = NetRelu()
output = relu(x)
print(output.asnumpy(), expect)
assert (output.asnumpy() == expect).all()
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
relu = NetRelu()
output = relu(x)
assert (output.asnumpy() == expect).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_relu_int64_dynamic_shape():
x = Tensor(np.array([[[[-1, 1, 10],
[1, -1, 1],
[10, 1, -1]]]]).astype(np.int64))
expect = np.array([[[[0, 1, 10,],
[1, 0, 1,],
[10, 1, 0.]]]]).astype(np.int64)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
relu_dynamic = NetReluDynamic()
output = relu_dynamic(x)
assert (output.asnumpy() == expect).all()
|
wagtailmenus/migrations/0010_auto_20160201_1558.py | pierremanceaux/wagtailmenus | 329 | 12792720 | <reponame>pierremanceaux/wagtailmenus
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailmenus', '0009_auto_20160201_0859'),
]
operations = [
migrations.RenameField(
model_name='mainmenuitem',
old_name='add_subnav',
new_name='allow_subnav',
),
]
|
src/zapv2/users.py | tnir/zap-api-python | 146 | 12792723 | # Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2017 the ZAP development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file was automatically generated.
"""
import six
class users(object):
def __init__(self, zap):
self.zap = zap
def users_list(self, contextid=None):
"""
Gets a list of users that belong to the context with the given ID, or all users if none provided.
"""
params = {}
if contextid is not None:
params['contextId'] = contextid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/usersList/', params)))
def get_user_by_id(self, contextid, userid):
"""
Gets the data of the user with the given ID that belongs to the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getUserById/', {'contextId': contextid, 'userId': userid})))
def get_authentication_credentials_config_params(self, contextid):
"""
Gets the configuration parameters for the credentials of the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentialsConfigParams/', {'contextId': contextid})))
def get_authentication_credentials(self, contextid, userid):
"""
Gets the authentication credentials of the user with given ID that belongs to the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentials/', {'contextId': contextid, 'userId': userid})))
def get_authentication_state(self, contextid, userid):
"""
Gets the authentication state information for the user identified by the Context and User Ids.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationState/', {'contextId': contextid, 'userId': userid})))
def get_authentication_session(self, contextid, userid):
"""
Gets the authentication session information for the user identified by the Context and User Ids, e.g. cookies and realm credentials.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationSession/', {'contextId': contextid, 'userId': userid})))
def new_user(self, contextid, name, apikey=''):
"""
Creates a new user with the given name for the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/newUser/', {'contextId': contextid, 'name': name, 'apikey': apikey})))
def remove_user(self, contextid, userid, apikey=''):
"""
Removes the user with the given ID that belongs to the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/removeUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey})))
def set_user_enabled(self, contextid, userid, enabled, apikey=''):
"""
Sets whether or not the user, with the given ID that belongs to the context with the given ID, should be enabled.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserEnabled/', {'contextId': contextid, 'userId': userid, 'enabled': enabled, 'apikey': apikey})))
def set_user_name(self, contextid, userid, name, apikey=''):
"""
Renames the user with the given ID that belongs to the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserName/', {'contextId': contextid, 'userId': userid, 'name': name, 'apikey': apikey})))
def set_authentication_credentials(self, contextid, userid, authcredentialsconfigparams=None, apikey=''):
"""
Sets the authentication credentials for the user with the given ID that belongs to the context with the given ID.
"""
params = {'contextId': contextid, 'userId': userid, 'apikey': apikey}
if authcredentialsconfigparams is not None:
params['authCredentialsConfigParams'] = authcredentialsconfigparams
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationCredentials/', params)))
def authenticate_as_user(self, contextid, userid, apikey=''):
"""
Tries to authenticate as the identified user, returning the authentication request and whether it appears to have succeeded.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/authenticateAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey})))
def poll_as_user(self, contextid, userid, apikey=''):
"""
Tries to poll as the identified user, returning the authentication request and whether it appears to have succeeded. This will only work if the polling verification strategy has been configured.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/pollAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey})))
def set_authentication_state(self, contextid, userid, lastpollresult=None, lastpolltimeinms=None, requestssincelastpoll=None, apikey=''):
"""
Sets fields in the authentication state for the user identified by the Context and User Ids.
"""
params = {'contextId': contextid, 'userId': userid, 'apikey': apikey}
if lastpollresult is not None:
params['lastPollResult'] = lastpollresult
if lastpolltimeinms is not None:
params['lastPollTimeInMs'] = lastpolltimeinms
if requestssincelastpoll is not None:
params['requestsSinceLastPoll'] = requestssincelastpoll
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationState/', params)))
def set_cookie(self, contextid, userid, domain, name, value, path=None, secure=None, apikey=''):
"""
Sets the specified cookie for the user identified by the Context and User Ids.
"""
params = {'contextId': contextid, 'userId': userid, 'domain': domain, 'name': name, 'value': value, 'apikey': apikey}
if path is not None:
params['path'] = path
if secure is not None:
params['secure'] = secure
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setCookie/', params)))
|
test/inserted_test.py | screamingskulls/sofi | 402 | 12792727 | from sofi.ui import Inserted
def test_basic():
assert(str(Inserted()) == "<ins></ins>")
def test_text():
assert(str(Inserted("text")) == "<ins>text</ins>")
def test_custom_class_ident_style_and_attrs():
assert(str(Inserted("text", cl='abclass', ident='123', style="font-size:0.9em;", attrs={"data-test": 'abc'}))
== "<ins id=\"123\" class=\"abclass\" style=\"font-size:0.9em;\" data-test=\"abc\">text</ins>")
|
insights/components/openstack.py | lhuett/insights-core | 121 | 12792736 | """
IsOpenStackCompute
==================
The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine
OpenStack Compute node. It checks if 'nova-compute' process exist, if not raises
``SkipComponent`` so that the dependent component will not fire. Can be added as
a dependency of a parser so that the parser only fires if the
``IsIsOpenStackCompute`` dependency is met.
"""
from insights.core.plugins import component
from insights.parsers.ps import PsAuxcww
from insights.core.dr import SkipComponent
@component(PsAuxcww)
class IsOpenStackCompute(object):
"""The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine
OpenStack Compute node. It checks if ``nova-compute`` process exist, if not
raises ``SkipComponent``.
Raises:
SkipComponent: When ``nova-compute`` process does not exist.
"""
def __init__(self, ps):
if 'nova-compute' not in ps.running:
raise SkipComponent('Not OpenStack Compute node')
|
codigo/Live29/exemplo_6.py | cassiasamp/live-de-python | 572 | 12792753 | <reponame>cassiasamp/live-de-python
class Pessoa:
def __init__(self, n, s):
self.n = n
self.s = s
def __hash__(self):
return hash((self.n,self.s))
ll = Pessoa('Lugão','Ricardo')
lulu = Pessoa('Lugão','Ricardinho')
print(hash(ll)) # True
print(hash(lulu)) # True
|
cd4ml/feature_set.py | camila-contreras/CD4ML-Scenarios | 113 | 12792775 | <reponame>camila-contreras/CD4ML-Scenarios<gh_stars>100-1000
import logging
def _exclude(fields, excluded):
return [field for field in fields if field not in excluded]
def _combine_dicts(*args):
results = {}
for arg in args:
results.update(arg)
return results
class FeatureSetBase:
"""
Generic interface for feature sets
"""
def __init__(self, identifier_field, target_field):
# fields to be filled out in derived class
self.logger = logging.getLogger(__name__)
self.params = None
self.info = None
self.identifier_field = identifier_field
self.target_field = target_field
def fields_excluded_from_features(self):
id_target = [self.identifier_field, self.target_field]
return id_target + self.params['extra_information_fields']
def _exclude_non_features(self, fields):
return _exclude(fields, self.fields_excluded_from_features())
def base_feature_fields_numerical(self):
fields = self.params['base_fields_numerical']
return self._exclude_non_features(fields)
def base_feature_fields_categorical(self):
fields = sorted(self.params['base_categorical_n_levels_dict'].keys())
return self._exclude_non_features(fields)
def base_feature_fields(self):
return self.base_feature_fields_numerical() + self.base_feature_fields_categorical()
def derived_feature_fields_numerical(self):
return self.params['derived_fields_numerical']
def derived_feature_fields_categorical(self):
return sorted(self.params['derived_categorical_n_levels_dict'].keys())
def derived_feature_fields(self):
return self.derived_feature_fields_numerical() + self.derived_feature_fields_categorical()
def available_feature_fields_numerical(self):
return self.base_feature_fields_numerical() + self.derived_feature_fields_numerical()
def available_feature_fields_categorical(self):
return self.base_feature_fields_categorical() + self.derived_feature_fields_categorical()
def encoded_feature_fields_numerical(self):
return _exclude(self.available_feature_fields_numerical(), self.params['encoder_excluded_fields'])
def encoded_feature_fields_categorical(self):
return _exclude(self.available_feature_fields_categorical(), self.params['encoder_excluded_fields'])
def encoded_feature_fields(self):
return self.encoded_feature_fields_numerical() + self.encoded_feature_fields_categorical()
def omitted_feature_fields_for_input(self):
encoded = self.encoded_feature_fields()
return [field for field in encoded if field not in self.base_feature_fields()]
# feature transformations
def base_features_numerical(self, processed_row):
return {k: processed_row[k] for k in self.base_feature_fields_numerical()}
def base_features_categorical(self, processed_row):
return {k: processed_row[k] for k in self.base_feature_fields_categorical()}
def base_features(self, processed_row):
return {k: processed_row[k] for k in self.base_feature_fields()}
def derived_features_categorical(self, processed_row):
# TODO: override
assert isinstance(processed_row, dict)
return {}
def derived_features_numerical(self, processed_row):
# TODO: override
assert isinstance(processed_row, dict)
return {}
def derived_features(self, processed_row):
num = self.derived_features_numerical(processed_row)
cat = self.derived_features_categorical(processed_row)
return _combine_dicts(num, cat)
def features(self, processed_row):
base = self.base_features(processed_row)
derv = self.derived_features(processed_row)
return _combine_dicts(base, derv)
def ml_fields(self):
categorical_n_levels_dict = self.params['base_categorical_n_levels_dict'].copy()
categorical_n_levels_dict.update(self.params['derived_categorical_n_levels_dict'])
cat_encoded = {k: v for k, v in categorical_n_levels_dict.items()
if k in self.encoded_feature_fields_categorical()}
numeric_fields = self.encoded_feature_fields_numerical()
intersection = set(cat_encoded.keys()).intersection(numeric_fields)
if intersection:
self.logger.info('categorical')
self.logger.info(cat_encoded)
self.logger.info('numerical')
self.logger.info(numeric_fields)
self.logger.info('intersection')
self.logger.info(intersection)
raise ValueError('categorical and numeric overlap')
return {'categorical': cat_encoded,
'numerical': numeric_fields,
'target_name': self.target_field}
|
src/aiofiles/__init__.py | q0w/aiofiles | 1,947 | 12792812 | <gh_stars>1000+
"""Utilities for asyncio-friendly file handling."""
from .threadpool import open
from . import tempfile
__all__ = ["open", "tempfile"]
|
tests/test_wificontrol.py | TopperBG/pywificontrol | 115 | 12792862 | <filename>tests/test_wificontrol.py
# Written by <NAME> and <NAME> <<EMAIL>>
#
# Copyright (c) 2016, Emlid Limited
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pytest
import mock
from wificontrol import WiFiControl
@pytest.fixture
def ssid():
network = {
'ssid': 'Test'
}
return network
class FakeWiFiControl(WiFiControl):
def __init__(self):
self.wifi = mock.MagicMock()
self.wpasupplicant = mock.MagicMock()
self.hotspot = mock.MagicMock()
class TestWiFiControl:
def setup_method(self):
self.manager = FakeWiFiControl()
def test_host_mode(self):
self.manager.hotspot.started = mock.Mock(return_value=False)
self.manager.start_host_mode()
assert self.manager.wpasupplicant.stop.call_count == 1
assert self.manager.hotspot.started.call_count == 1
assert self.manager.hotspot.start.call_count == 1
def test_client_mode(self):
self.manager.wpasupplicant.started = mock.Mock(return_value=False)
self.manager.start_client_mode()
assert self.manager.hotspot.stop.call_count == 1
assert self.manager.wpasupplicant.started.call_count == 1
assert self.manager.wpasupplicant.start.call_count == 1
def test_wifi_turn_on(self):
self.manager.wpasupplicant.started = mock.Mock(return_value=False)
self.manager.hotspot.started = mock.Mock(return_value=False)
self.manager.turn_on_wifi()
assert self.manager.wifi.unblock.call_count == 1
assert self.manager.wpasupplicant.started.call_count == 1
assert self.manager.wpasupplicant.start.call_count == 1
self.manager.wpasupplicant.started.return_value = True
assert self.manager.get_wifi_turned_on() is True
def test_wifi_turn_off(self):
self.manager.wpasupplicant.started = mock.Mock(return_value=True)
self.manager.hotspot.started = mock.Mock(return_value=False)
self.manager.turn_off_wifi()
assert self.manager.wifi.block.call_count == 1
assert self.manager.hotspot.stop.call_count == 1
assert self.manager.wpasupplicant.stop.call_count == 1
self.manager.wpasupplicant.started.return_value = False
assert self.manager.get_wifi_turned_on() is False
def test_wifi_turn_on_if_wifi_is_on(self):
self.manager.wpasupplicant.started = mock.Mock(return_value=False)
self.manager.hotspot.started = mock.Mock(return_value=True)
self.manager.turn_on_wifi()
assert self.manager.wifi.unblock.call_count == 0
assert self.manager.wpasupplicant.started.call_count == 1
assert self.manager.hotspot.started.call_count == 1
assert self.manager.wpasupplicant.start.call_count == 0
assert self.manager.hotspot.start.call_count == 0
def test_network_add(self, ssid):
self.manager.add_network(ssid)
assert self.manager.wpasupplicant.add_network.is_called_once_with(ssid)
def test_network_remove(self, ssid):
self.manager.remove_network(ssid)
assert self.manager.wpasupplicant.remove_network.is_called_once_with(ssid)
def test_status_get(self, ssid):
self.manager.wpasupplicant.started = mock.Mock(return_value=False)
self.manager.hotspot.started = mock.Mock(return_value=True)
state, status = self.manager.get_status()
assert state == self.manager.HOST_STATE
assert status is None
self.manager.wpasupplicant.started.return_value = True
self.manager.hotspot.started.return_value = False
self.manager.wpasupplicant.get_status = mock.Mock(return_value=ssid)
state, status = self.manager.get_status()
assert state == self.manager.WPA_STATE
assert status == ssid
def test_start_connection(self, ssid):
def start_connecting(*args):
self.manager.hotspot.started.return_value = False
self.manager.revert_on_connect_failure(result=None)
self.manager.wpasupplicant.started = mock.Mock(return_value=False)
self.manager.wpasupplicant.start_connecting.side_effect = start_connecting
self.manager.hotspot.started = mock.Mock(return_value=True)
self.manager.start_connecting(ssid)
assert self.manager.wpasupplicant.started.call_count == 1
assert self.manager.hotspot.stop.call_count == 1
assert self.manager.wpasupplicant.start.call_count == 1
args = (ssid, self.manager.revert_on_connect_failure, None, 10)
assert self.manager.wpasupplicant.start_connecting.is_called_once_with(args)
assert self.manager.hotspot.started.call_count == 1
assert self.manager.wpasupplicant.stop.call_count == 1
assert self.manager.hotspot.start.call_count == 1
def test_reconnection(self, ssid):
def start_connecting(result, callback, args, timeout):
self.manager.hotspot.started.return_value = False
if args:
callback({}, *args)
else:
callback(result)
self.manager.wpasupplicant.started = mock.Mock(return_value=False)
self.manager.wpasupplicant.start_connecting.side_effect = start_connecting
self.manager.hotspot.started = mock.Mock(return_value=True)
self.manager.start_connecting(ssid, callback=self.manager.reconnect,
args=(ssid,))
assert self.manager.wpasupplicant.start_connecting.call_count == 2
def test_supplicant_functions(self):
self.manager.scan()
assert self.manager.wpasupplicant.scan.call_count == 1
self.manager.get_scan_results()
assert self.manager.wpasupplicant.get_scan_results.call_count == 1
self.manager.get_added_networks()
assert self.manager.wpasupplicant.get_added_networks.call_count == 1
self.manager.get_ip()
assert self.manager.wifi.get_device_ip.call_count == 1
self.manager.stop_connecting()
assert self.manager.wpasupplicant.stop_connecting.call_count == 1
self.manager.disconnect()
assert self.manager.wpasupplicant.disconnect.call_count == 1
self.manager.get_device_name()
assert self.manager.hotspot.get_host_name.call_count == 1
self.manager.get_hostap_name()
assert self.manager.hotspot.get_hostap_name.call_count == 1
name = 'test'
self.manager.set_device_names(name)
assert self.manager.wpasupplicant.set_p2p_name.call_count == 1
assert self.manager.wpasupplicant.set_p2p_name.is_called_once_with(name)
assert self.manager.hotspot.set_hostap_name.call_count == 1
assert self.manager.hotspot.set_hostap_name.is_called_once_with(name)
assert self.manager.hotspot.set_host_name.call_count == 1
assert self.manager.hotspot.set_host_name.is_called_once_with(name)
assert self.manager.wifi.restart_dns.call_count == 1
self.manager.set_hostap_password(name)
assert self.manager.hotspot.set_hostap_password.is_called_once_with(name)
def test_verify_names(self):
name = 'test'
mac_addr = '11:22:33:44:55:66'
self.manager.hotspot.get_host_name.return_value = name
self.manager.wpasupplicant.get_p2p_name.return_value = name
self.manager.hotspot.get_hostap_name.return_value = "{}{}".format(name, mac_addr[-6:])
self.manager.hotspot.get_device_mac.return_value = mac_addr[-6:]
assert self.manager.verify_hostap_name(name)
assert self.manager.verify_device_names(name)
assert self.manager.hotspot.get_host_name.call_count == 1
assert self.manager.wpasupplicant.get_p2p_name.call_count == 1
|
vision/visualization.py | yihui-he2020/epipolar-transformers | 360 | 12792888 | import os.path, sys, re, cv2, glob, numpy as np
import os.path as osp
from tqdm import tqdm
from IPython import embed
import scipy
import matplotlib.pyplot as plt
from skimage.transform import resize
from mpl_toolkits.mplot3d import Axes3D
from sklearn.metrics import auc
from matplotlib.patches import Circle
import torch
# from .ipv_vis import *
from vision.triangulation import triangulate
from vision.multiview import pix2coord, coord2pix
from core import cfg
from vision.multiview import de_normalize
from vision.visualizer_human import draw_2d_pose
from vision.visualizer_hand import plot_hand_3d
class Cursor(object):
def __init__(self, sample_ax, draw_ax):
self.sample_ax = sample_ax
self.draw_ax = draw_ax
self.lx = sample_ax.axhline(color='k') # the horiz line
self.ly = sample_ax.axvline(color='k') # the vert line
# text location in axes coords
self.txt = sample_ax.text(0, 0, '', va="bottom", ha="left")
def mouse_down(self, event):
if not event.inaxes:
return
x, y = event.xdata, event.ydata
# update the line positions
self.lx.set_ydata(y)
self.ly.set_xdata(x)
self.txt.set_text('x=%1.1f, y=%1.1f' % (x, y))
self.sample_ax.figure.canvas.draw()
for i in self.draw_ax:
i.clear()
i.figure.canvas.draw()
self.sample_ax.imshow(ref_img)
a, b, heatmap = heatmapat(x, y, weights[0])
im1= self.draw_ax[1].imshow(heatmap, cmap=cmap.hot)
self.draw_ax[1].set_title("%f~%f" % (a, b))
a, b, heatmap = heatmapat(x, y, weights[1])
im2= self.draw_ax[2].imshow(heatmap, cmap=cmap.hot)
self.draw_ax[2].set_title("%f~%f" % (a, b))
a, b, heatmap = heatmapat(x, y, weights[2])
im3= self.draw_ax[3].imshow(heatmap, cmap=cmap.hot)
self.draw_ax[3].set_title("%f~%f" % (a, b))
# fig.colorbar(im2, ax=axs[0, 1])
circ = Circle((x, y),2,color='r')
axs[0, 0].add_patch(circ)
plt.show()
class Cursor_for_epipolar_line(object):
def __init__(self, sample_ax, draw_ax, sample_locs, H, W, axs, img2, outs):
self.sample_ax = sample_ax
self.draw_ax = draw_ax
self.lx = sample_ax.axhline(color='k') # the horiz line
self.ly = sample_ax.axvline(color='k') # the vert line
# text location in axes coords
self.txt = sample_ax.text(0, 0, '', va="bottom", ha="left")
self.sample_locs = sample_locs
self.H = H
self.W = W
self.axs = axs
self.img2 = img2
self.outs = outs
def mouse_down(self, event):
if not event.inaxes:
return
x, y = event.xdata, event.ydata
self.lx.set_ydata(y)
self.ly.set_xdata(x)
# pr_cost_volume = self.depth[:, int(y), int(x)]
# cost_volume_xs = np.arange(0, pr_cost_volume.shape[0])
# xx, yy = self.corr_pos_pred[int(y)][int(x)]
self.txt.set_text('x=%1.1f, y=%1.1f' % (x, y))
self.sample_ax.figure.canvas.draw()
for i in self.draw_ax:
i.clear()
i.figure.canvas.draw()
self.axs[1, 0].clear()
self.axs[1, 0].imshow(self.img2)
inty, intx = int(y+0.5), int(x+0.5)
print(self.sample_locs[:, inty, intx])
_, _, _, debugsample_locs, intersections, mask, valid_intersections, start, vec = self.outs
print(intx, inty)
print('debugsample_locs', debugsample_locs[:, 0, inty, intx])
print('intersections', intersections.view(-1, 64, 64, 4, 2)[0, inty, intx])
print('mask', mask.view(-1, 64, 64, 4)[0, inty, intx])
print('valid_intersections', valid_intersections.view(-1, 64, 64, 2, 2)[0, inty, intx])
print('start', start.view(-1, 64, 64, 2)[0, inty, intx])
print('vec', vec.view(-1, 64, 64, 2)[0, inty, intx])
for i in range(64):
# pos = self.sample_locs[i][int(y+0.5)][int(x+0.5)]
pos = debugsample_locs[i, 0, inty, intx].cpu().numpy().copy()
depos = de_normalize(pos, self.H, self.W)
# circ = Circle((int(depos[0]), int(depos[1])),1,color='b', alpha=0.5)
circ = Circle((depos[0], depos[1]), 1 , color='b', alpha=0.5)
self.axs[1, 0].add_patch(circ)
# circ = Circle((xx, yy),2,color='r')
self.axs[1, 0].add_patch(circ)
plt.show()
class Cursor_for_corrspondence(object):
def __init__(self, sample_ax, draw_ax, depth, corr_pos_pred, sample_locs, H, W):
self.sample_ax = sample_ax
self.draw_ax = draw_ax
self.lx = sample_ax.axhline(color='k') # the horiz line
self.ly = sample_ax.axvline(color='k') # the vert line
# text location in axes coords
self.txt = sample_ax.text(0, 0, '', va="bottom", ha="left")
self.depth = depth
self.corr_pos_pred = corr_pos_pred
self.sample_locs = sample_locs
self.H = H
self.W = W
def mouse_down(self, event):
if not event.inaxes:
return
x, y = event.xdata, event.ydata
self.lx.set_ydata(y)
self.ly.set_xdata(x)
pr_cost_volume = self.depth[:, int(y), int(x)]
cost_volume_xs = np.arange(0, pr_cost_volume.shape[0])
xx, yy = self.corr_pos_pred[int(y)][int(x)]
self.txt.set_text('x=%1.1f, y=%1.1f depth=%.5f\nCorr xx=%d, yy=%d' % (x, y, np.max(pr_cost_volume), xx, yy))
self.sample_ax.figure.canvas.draw()
for i in self.draw_ax:
i.clear()
i.figure.canvas.draw()
axs[1, 0].clear()
axs[1, 0].imshow(img2)
for i in range(64):
pos = sample_locs[i][int(y)][int(x)]
depos = de_normalize(pos, H, W)
circ = Circle((int(depos[0]), int(depos[1])),1,color='b', alpha=0.5)
axs[1, 0].add_patch(circ)
circ = Circle((xx, yy),2,color='r')
axs[1, 0].add_patch(circ)
plt.show()
def toimg(x):
return x.squeeze().numpy().transpose([1,2,0])
def de_transform(img):
img[..., 0, :, :] = img[..., 0, :, :] * 0.229 + 0.485
img[..., 1, :, :] = img[..., 1, :, :] * 0.224 + 0.456
img[..., 2, :, :] = img[..., 2, :, :] * 0.225 + 0.406
return img
def draw_auc(predictions, pck, auc_path):
max_threshold = 20
thresholds = np.linspace(0, max_threshold, num=20)
pck = np.sum(pck, axis=0)
auc_value = auc(thresholds, pck) / max_threshold
print('AUC: ', auc_value)
plt.plot(thresholds, pck, 'r')
plt.axis([0, 20, 0, 1])
plt.savefig(auc_path)
plt.show()
def get_point_cloud(img1, img2, KRT1, KRT2, RT1, RT2, corr_pos, score):
"""
KRT:
corr_pos: feat_h x feat_w x 2
score: sample_size x feat_h x feat_w
"""
y = np.arange(0, img1.shape[0]) # 128
x = np.arange(0, img1.shape[1]) # 84
grid_x, grid_y = np.meshgrid(x, y)
grid_y = pix2coord(grid_y, cfg.BACKBONE.DOWNSAMPLE)
grid_y = grid_y * cfg.DATASETS.IMAGE_RESIZE * cfg.DATASETS.PREDICT_RESIZE
grid_x = pix2coord(grid_x, cfg.BACKBONE.DOWNSAMPLE)
grid_x = grid_x * cfg.DATASETS.IMAGE_RESIZE * cfg.DATASETS.PREDICT_RESIZE
# 2668 * 4076
grid_corr = pix2coord(corr_pos, cfg.BACKBONE.DOWNSAMPLE)
grid_corr = grid_corr * cfg.DATASETS.IMAGE_RESIZE * cfg.DATASETS.PREDICT_RESIZE
grid = np.stack((grid_x, grid_y))
grid = grid.reshape(2, -1)
grid_corr = grid_corr.reshape(-1, 2).transpose()
from scipy.misc import imresize
sample_size, fh, fw = score.shape
resized_img2 = imresize(img2, (fh, fw))
max_score = np.max(score.reshape(sample_size, -1), axis=0).reshape(fh, fw)
select_pos1 = max_score > 0.02
print('->', np.sum(select_pos1))
select_pos2 = np.sum(resized_img2, axis=2) > 20
print('->',np.sum(select_pos2))
select_pos3 = np.sum(corr_pos, axis=2) > -50
print('->',np.sum(select_pos2))
select_pos = np.logical_and(select_pos3, select_pos2).reshape(-1)
# select_pos = select_pos3
print('-->',np.sum(select_pos))
select_pos = select_pos.reshape(-1)
select_img_point = resized_img2.reshape(fh*fw, 3)[select_pos, :]
print(select_pos.shape)
print('total pos', sum(select_pos))
p3D = cv2.triangulatePoints(KRT2, KRT1, grid_corr[:,select_pos], grid[:,select_pos])
# p3D = cv2.triangulatePoints(KRT2, KRT1, grid_corr, grid)
# depth = np.ones((fh, fw)) * np.min((KRT1@p3D)[2, :])
depth = np.ones((fh, fw)) * np.max((KRT1@p3D)[2, :])
cnt = 0
for i in range(fh):
for j in range(fw):
if not select_pos[i*fw+j]:
continue
p_homo = (KRT1 @ p3D[:, cnt])
p = p_homo / p_homo[2]
depth[int(coord2pix(p[1], 32)), int(coord2pix(p[0], 32))] = p_homo[2]
cnt += 1
p3D /= p3D[3]
p3D = p3D[:3].squeeze()
depth = (depth - depth.min()) / (depth.max() - depth.min()) + 1
depth = np.log(depth)
depth = (depth - depth.min()) / (depth.max() - depth.min())
#######vis
fig = plt.figure(1)
ax1_1 = fig.add_subplot(331)
ax1_1.imshow(img1)
ax1_2 = fig.add_subplot(332)
ax1_2.imshow(img2)
w = corr_pos[:, :, 0]
w = (w - w.min()) / (w.max() - w.min())
ax1_1 = fig.add_subplot(334)
ax1_1.imshow(w)
w = corr_pos[:, :, 1]
w = (w - w.min()) / (w.max() - w.min())
ax1_1 = fig.add_subplot(335)
ax1_1.imshow(w)
# w1 = corr_pos[:, :, 0]
# w1 = (w1 - w1.min()) / (w1.max() - w1.min())
# w2 = corr_pos[:, :, 1]
# w2 = (w2 - w2.min()) / (w2.max() - w2.min())
# W = np.stack([w1, w2, np.ones(w2.shape)], axis=0)
# ax2_1 = fig.add_subplot(336)
# ax2_1.imshow(W.transpose(1,2,0))
ax1_1 = fig.add_subplot(336)
ax1_1.imshow(depth)
w = select_pos1.reshape(fh,fw)
# w = (w - w.min()) / (w.max() - w.min())
ax2_1 = fig.add_subplot(337)
ax2_1.imshow(w)
w = select_pos2.reshape(fh,fw)
# w = (w - w.min()) / (w.max() - w.min())
ax2_1 = fig.add_subplot(338)
ax2_1.imshow(w)
w = select_pos.reshape(fh,fw)
# w = (w - w.min()) / (w.max() - w.min())
ax2_1 = fig.add_subplot(339)
ax2_1.imshow(w)
####### end vis
# w = select_img_point[:, :10000].reshape(-1, 100, 100).transpose(1,2,0)
# w = (w - w.min()) / (w.max() - w.min())
# ax2_1 = fig.add_subplot(326)
# ax2_1.imshow(w)
plt.show()
return p3D, select_img_point
def visualization(cfg):
if cfg.VIS.POINTCLOUD and 'h36m' not in cfg.OUTPUT_DIR:
output_dir = cfg.OUTPUT_DIR
dataset_names = cfg.DATASETS.TEST
predictions = torch.load(os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "predictions.pth"))
print(os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "predictions.pth"))
cnt = 0
# for inputs, pred in predictions:
while True:
inputs, pred = predictions[cnt]
heatmap = inputs.get('heatmap')
points2d = inputs.get('points-2d')
KRT = inputs.get('KRT')[0]
RT = inputs.get('RT')[0]
image_path = inputs.get('img-path')
print('image path:', image_path)
img = resize(plt.imread(image_path), (128, 84, 3))
other_KRT = inputs.get('other_KRT')[0]
other_RT = inputs.get('other_RT')[0]
other_image_path = inputs.get('other_img_path')[0]
print('other image path', other_image_path)
other_img = resize(plt.imread(other_image_path), (128, 84, 3))
heatmap_pred = pred.get('heatmap_pred')
score_pred = pred.get('score_pred')
corr_pos_pred = pred.get('corr_pos')
sim = pred.get('depth')
import pdb; pdb.set_trace()
# p3D, img_pt = get_point_cloud(img, other_img, KRT, other_KRT, RT, other_RT, corr_pos_pred, sim)
output = {
# 'p3D': p3D,
# 'img_pt': img_pt,
'img1': img,
'img2' : other_img,
'img1_path': image_path,
'img2_path': other_image_path,
'RT' : RT,
'other_RT': other_RT,
'corr_pos_pred': corr_pos_pred,
'depth': sim,
}
if 'sample_locs' in pred:
sample_locs = pred.get('sample_locs')
output['sample_locs'] = sample_locs
else:
print('No sample_locs!!!!!')
import pickle
with open('baseline_' + "output_{:d}.pkl".format(cnt),"wb") as f:
pickle.dump(output, f)
print('saved! to ', 'baseline_' + "output_{:d}.pkl".format(cnt))
cnt += 1
# break
# ipv_prepare(ipv)
# ipv_draw_point_cloud(ipv, p3D, colors=img_pt, pt_size=1)
# ipv.xyzlim(500)
# ipv.show()
if cfg.VIS.POINTCLOUD and 'h36m' in cfg.OUTPUT_DIR:
output_dir = cfg.OUTPUT_DIR
dataset_names = cfg.DATASETS.TEST
baseline = "baseline" in cfg.VIS.SAVE_PRED_NAME
name = "_baseline" if baseline else ""
predictions = torch.load(os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "predictions"+name+".pth"))
print(os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "predictions"+name+".pth"))
cnt = 0
# for inputs, pred in predictions:
while True:
inputs, pred = predictions[cnt]
print('input keys:')
print(inputs.keys())
print('pred keys:')
print(pred.keys())
heatmap = inputs.get('heatmap')
other_heatmap = inputs.get('other_heatmap')
points2d = inputs.get('points-2d')
KRT = inputs.get('KRT')[0]
camera = inputs.get('camera')
other_camera = inputs.get('other_camera')
image_path = inputs.get('img-path')[0]
print(image_path)
# image_path = 'images.zip@'
image_file = osp.join("datasets", 'h36m', 'images.zip@', 'images',
image_path)
# from utils import zipreader
# data_numpy = zipreader.imread(
# image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
# img = data_numpy[:1000]
# assert img.shape == (1000, 1000, 3), img.shape
img = inputs.get('img')
other_KRT = inputs.get('other_KRT')[0]
# other_RT = inputs.get('other_RT')[0]
other_image_path = inputs.get('other_img-path')[0]
print('other image path', other_image_path)
other_image_file = osp.join("datasets", 'h36m', 'images.zip@', 'images',
other_image_path)
other_img = inputs.get('other_img')
heatmap_pred = pred.get('heatmap_pred')
score_pred = pred.get('score_pred')
corr_pos_pred = pred.get('corr_pos')
sim = pred.get('depth')
batch_locs = pred.get('batch_locs')
# p3D, img_pt = get_point_cloud(img, other_img, KRT, other_KRT, RT, other_RT, corr_pos_pred, sim)
output = {
# 'p3D': p3D,
# 'img_pt': img_pt,
'img1': img,
'img2' : other_img,
'img1_path': image_file,
'img2_path': other_image_file,
# 'RT' : RT,
# 'other_RT': other_RT,
'heatmap': heatmap,
'other_heatmap': other_heatmap,
'points-2d': points2d,
'corr_pos_pred': corr_pos_pred,
'depth': sim,
'heatmap_pred': heatmap_pred,
'batch_locs': batch_locs,
'camera': camera,
'other_camera': other_camera,
}
if 'sample_locs' in pred:
sample_locs = pred.get('sample_locs')
output['sample_locs'] = sample_locs
else:
print('No sample_locs!!!!!')
import pickle
with open(cfg.OUTPUT_DIR + "/visualizations/h36m/output{}_{:d}.pkl".format(name, cnt),"wb") as f:
pickle.dump(output,f)
print('saved!')
cnt += 1
# depth = output['depth']
# corr_pos_pred = output['corr_pos_pred']
# sample_locs = output['sample_locs']
if cfg.EPIPOLAR.VIS:
if 'h36m' in cfg.OUTPUT_DIR:
from data.build import make_data_loader
if cfg.VIS.MULTIVIEWH36M:
data_loader = make_data_loader(cfg, is_train=True, force_shuffle=True)
elif cfg.VIS.H36M:
from data.datasets.joints_dataset import JointsDataset
from data.datasets.multiview_h36m import MultiViewH36M
data_loader = MultiViewH36M('datasets', 'validation', True)
print(len(data_loader))
for i in tqdm(range(len(data_loader))):
data_loader.__getitem__(i)
data_loader = make_data_loader(cfg, is_train=False)[0]
# data_loader = make_data_loader(cfg, is_train=True, force_shuffle=True)
# data_loader = make_data_loader(cfg, is_train=False, force_shuffle=True)[0]
# for idx, batchdata in enumerate(tqdm(data_loader)):
if not cfg.VIS.MULTIVIEWH36M and not cfg.VIS.H36M:
cpu = lambda x: x.cpu().numpy() if isinstance(x, torch.Tensor) else x
from modeling.layers.epipolar import Epipolar
imgmodel = Epipolar()
debugmodel = Epipolar(debug=True)
KRT0 = batchdata['KRT'].squeeze()[None, 0]
KRT1 = batchdata['other_KRT'].squeeze()[None, 0]
# batchdata['img']: 1 x 4 x 3 x 256 x 256
input_img = batchdata['img'].squeeze()[None, 0, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::4]
input_other_img = batchdata['other_img'].squeeze()[None, 0, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::4]
outs = debugmodel(input_img, input_other_img, KRT0, KRT1)
H, W = input_img.shape[-2:]
print(H, W)
orig_img = de_transform(cpu(batchdata['img'].squeeze()[None, ...])[0][0])
orig_other_img = de_transform(cpu(batchdata['other_img'].squeeze()[None, ...])[0][0])
# outs = imgmodel(batchdata['heatmap'][:, 0], batchdata['heatmap'][:, 1], batchdata['KRT'][:, 0], batchdata['other_KRT'][:, 1])
out, sample_locs = imgmodel.imgforward_withdepth(input_img, input_other_img, KRT0, KRT1, outs[2][0])
if not cfg.VIS.CURSOR:
# show_img = de_transform(cpu(batchdata['img'][:, 0, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::4])[0][0])
# show_other_img = de_transform(cpu(batchdata['other_img'][:, 0, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::4])[0][0])
fig = plt.figure(1)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
ax1.imshow(orig_img[::-1].transpose((1,2,0)))
ax2.imshow(orig_other_img[::-1].transpose((1,2,0)))
ax3.imshow(cpu(batchdata['heatmap'])[0][0].sum(0))
ax4.imshow(cpu(batchdata['other_heatmap'])[0][0].sum(0))
# ax5.imshow(cpu(outs[0])[0].sum(0))
print(out.shape)
out_img = de_transform(cpu(out)[0, ::-1].transpose((1,2,0)))
ax5.imshow(out_img)
plt.show()
else:
print(sample_locs.shape) # 64 x 1 x H x W x 2
sample_locs = sample_locs[:, 0, :, :, :]
# import pdb; pdb.set_trace()
fig, axs = plt.subplots(2, 2)
cus = Cursor_for_epipolar_line(axs[0,0], [axs[0,1], axs[1,0], axs[1,1]], sample_locs, H, W, axs, \
cpu(input_other_img)[0, :, :, :][::-1].transpose((1,2,0)), outs)
axs[0, 0].imshow(cpu(input_img)[0, :, :, :][::-1].transpose((1,2,0)))
# prob_im = axs[1, 1].imshow(max_score)
fig.canvas.mpl_connect('button_press_event', cus.mouse_down)
plt.show()
return
output_dir = cfg.OUTPUT_DIR
dataset_names = cfg.DATASETS.TEST
predictions = torch.load(os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "predictions.pth"))
pck = torch.load(os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "pck.pth"))
if cfg.VIS.AUC:
auc_path = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "auc.png")
draw_auc(predictions, pck, auc_path)
total = 0
for inputs, pred in predictions:
heatmap = inputs.get('heatmap')
points2d = inputs.get('points-2d')
hand_side = inputs.get('hand-side')
img = inputs.get('img')
can_3dpoints = inputs.get('can-points-3d')
normed_3d = inputs.get('normed-points-3d')
target_global = inputs.get('points-3d')
rot_mat = inputs.get('rotation')
R_global = inputs.get('R')
keypoint_scale = inputs.get('scale')
visibility = inputs.get('visibility')
unit = inputs.get('unit')
image_path = inputs.get('img-path')
can_pred = pred.get('can_pred')
normed_pred = pred.get('normed_pred')
heatmap_pred = pred.get('heatmap_pred')
im = plt.imread(image_path)
image = np.array(im, dtype=np.int)
if cfg.DATASETS.TASK == 'keypoint':
fig = plt.figure(1)
ax1 = fig.add_subplot(331)
ax2 = fig.add_subplot(332)
ax3 = fig.add_subplot(333)
#ax1.imshow(image)
print(heatmap.min(), heatmap.max())
print(heatmap_pred.min(), heatmap_pred.max())
ax2.imshow(heatmap.sum(0).T)
ax3.imshow(heatmap_pred.sum(0).T)
else:
total += 1
visibility = visibility.squeeze()[..., None]
can_3dpoints = can_3dpoints * visibility
can_pred = can_pred * visibility
normed_3d = normed_3d * visibility
normed_pred = normed_pred * visibility
delta = normed_pred - normed_3d
print(delta)
print('L1 err = ', np.abs(delta).sum())
print('L2 err = ', ((delta**2).sum(-1)**0.5).mean())
fig = plt.figure(1)
ax1_1 = fig.add_subplot(331)
ax1_2 = fig.add_subplot(332)
#ax1_3 = fig.add_subplot(333)
#ax2 = fig.add_subplot(222)
ax2_1 = fig.add_subplot(334, projection='3d')
ax2_2 = fig.add_subplot(335, projection='3d')
ax2_3 = fig.add_subplot(336, projection='3d')
ax3_1 = fig.add_subplot(337, projection='3d')
ax3_2 = fig.add_subplot(338, projection='3d')
ax3_3 = fig.add_subplot(333, projection='3d')
ax1_1.imshow(image)
ax1_2.imshow(image)
#ax1_3.imshow(image)
#ax2.imshow(image)
plot_hand_3d(can_3dpoints, visibility, ax2_1)
ax2_1.view_init(azim=-90.0, elev=-90.0) # aligns the 3d coord with the camera view
plot_hand_3d(can_pred, visibility, ax2_2)
ax2_2.view_init(azim=-90.0, elev=-90.0) # aligns the 3d coord with the camera view
plot_hand_3d(can_3dpoints, visibility, ax2_3)
plot_hand_3d(can_pred, visibility, ax2_3)
ax2_3.view_init(azim=-90.0, elev=-90.0) # aligns the 3d coord with the camera view
# ax3.set_xlim([-3, 3])
# ax3.set_ylim([-3, 3])
# ax3.set_zlim([-3, 3])
plot_hand_3d(normed_3d, visibility, ax3_1)
ax3_1.view_init(azim=-90.0, elev=-90.0) # aligns the 3d coord with the camera view
plot_hand_3d(normed_pred, visibility, ax3_2)
ax3_2.view_init(azim=-90.0, elev=-90.0) # aligns the 3d coord with the camera view
plot_hand_3d(normed_3d, visibility, ax3_3)
plot_hand_3d(normed_pred, visibility, ax3_3)
ax3_3.view_init(azim=-90.0, elev=-90.0) # aligns the 3d coord with the camera view
# ax3.set_xlim([-3, 3])
# ax3.set_ylim([-3, 3])
# ax3.set_zlim([-3, 3])
plt.show()
print("show")
|
examples/poll_card.py | smaeda-ks/twitter-python-ads-sdk | 162 | 12792897 | from twitter_ads.campaign import Tweet
from twitter_ads.client import Client
from twitter_ads.creative import MediaLibrary, PollCard
from twitter_ads.enum import MEDIA_TYPE
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_TOKEN = ''
ACCESS_TOKEN_SECRET = ''
ACCOUNT_ID = ''
# initialize the client
client = Client(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
# load the advertiser account instance
account = client.accounts(ACCOUNT_ID)
# most recent Media Library video
ml = MediaLibrary(account).all(account, media_type=MEDIA_TYPE.VIDEO)
media_key = ml.first.media_key
# create Poll Card with video
pc = PollCard(account)
pc.duration_in_minutes = 10080 # one week
pc.first_choice = 'Northern'
pc.second_choice = 'Southern'
pc.name = ml.first.name + ' poll card from SDK'
pc.media_key = media_key
pc.save()
# create Tweet
Tweet.create(account, text='Which hemisphere do you prefer?', card_uri=pc.card_uri)
# https://twitter.com/apimctestface/status/973002610033610753
|
kibitzr/cli.py | paulmassen/kibitzr | 478 | 12792900 | import sys
import logging
import click
import entrypoints
LOG_LEVEL_CODES = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
}
def merge_extensions(click_group):
"""
Each extension is called with click group for
ultimate agility while preserving cli context.
"""
for extension in load_extensions():
extension(click_group)
return click_group
def load_extensions():
"""Return list of Kibitzr CLI extensions"""
return [
point.load()
for point in entrypoints.get_group_all("kibitzr.cli")
]
@click.group()
@click.option("-l", "--log-level", default="info",
type=click.Choice(LOG_LEVEL_CODES.keys()),
help="Logging level")
@click.pass_context
def cli(ctx, log_level):
"""Run kibitzr COMMAND --help for detailed descriptions"""
ctx.obj = {'log_level': LOG_LEVEL_CODES[log_level.lower()]}
@cli.command()
def version():
"""Print version"""
from kibitzr import __version__ as kibitzr_version
print(kibitzr_version)
@cli.command()
def firefox():
"""Launch Firefox with persistent profile"""
from kibitzr.app import Application
Application().run_firefox()
@cli.command()
@click.argument('name', nargs=-1)
@click.pass_context
def once(ctx, name):
"""Run kibitzr checks once and exit"""
from kibitzr.app import Application
app = Application()
sys.exit(app.run(once=True, log_level=ctx.obj['log_level'], names=name))
@cli.command()
@click.argument('name', nargs=-1)
@click.pass_context
def run(ctx, name):
"""Run kibitzr in the foreground mode"""
from kibitzr.app import Application
app = Application()
sys.exit(app.run(once=False, log_level=ctx.obj['log_level'], names=name))
@cli.command()
def init():
"""Create boilerplate configuration files"""
from kibitzr.app import Application
Application.bootstrap()
@cli.command()
def telegram_chat():
"""Return chat id for the last message sent to Telegram Bot"""
# rename import to escape name clashing:
from kibitzr.app import Application
app = Application()
app.telegram_chat()
@cli.command()
def clean():
"""Clean change history"""
from kibitzr.storage import PageHistory
PageHistory.clean()
@cli.command()
def stash():
"""Print stash contents"""
from kibitzr.stash import Stash
Stash.print_content()
extended_cli = merge_extensions(cli)
if __name__ == "__main__":
extended_cli()
|
examples/pybullet/gym/pybullet_envs/minitaur/agents/trajectory_generator/tg_inplace.py | felipeek/bullet3 | 9,136 | 12792908 | """Trajectory Generator for in-place stepping motion for quadruped robot."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
TWO_PI = 2 * math.pi
def _get_actions_asymmetric_sine(phase, tg_params):
"""Returns the leg extension given current phase of TG and parameters.
Args:
phase: a number in [0, 2pi) representing current leg phase
tg_params: a dictionary of tg parameters:
stance_lift_cutoff -- switches the TG between stance (phase < cutoff) and
lift (phase > cutoff) phase
amplitude_swing -- amplitude in swing phase
amplitude_lift -- amplitude in lift phase
center_extension -- center of leg extension
"""
stance_lift_cutoff = tg_params['stance_lift_cutoff']
a_prime = np.where(phase < stance_lift_cutoff, tg_params['amplitude_stance'],
tg_params['amplitude_lift'])
scaled_phase = np.where(
phase > stance_lift_cutoff, np.pi + (phase - stance_lift_cutoff) /
(TWO_PI - stance_lift_cutoff) * np.pi, phase / stance_lift_cutoff * np.pi)
return tg_params['center_extension'] + a_prime * np.sin(scaled_phase)
def step(current_phases, leg_frequencies, dt, tg_params):
"""Steps forward the in-place trajectory generator.
Args:
current_phases: phases of each leg.
leg_frequencies: the frequency to proceed the phase of each leg.
dt: amount of time (sec) between consecutive time steps.
tg_params: a set of parameters for trajectory generator, see the docstring
of "_get_actions_asymmetric_sine" for details.
Returns:
actions: leg swing/extensions as output by the trajectory generator.
new_state: new swing/extension.
"""
new_phases = np.fmod(current_phases + TWO_PI * leg_frequencies * dt, TWO_PI)
extensions = []
for leg_id in range(4):
extensions.append(
_get_actions_asymmetric_sine(new_phases[..., leg_id], tg_params))
return new_phases, extensions
def reset():
return np.array([0, np.pi * 0.5, np.pi, np.pi * 1.5])
|
src/semver/__init__.py | b0uh/python-semver | 159 | 12792929 | <gh_stars>100-1000
"""
semver package major release 3.
A Python module for semantic versioning. Simplifies comparing versions.
"""
from ._deprecated import (
bump_build,
bump_major,
bump_minor,
bump_patch,
bump_prerelease,
compare,
finalize_version,
format_version,
match,
max_ver,
min_ver,
parse,
parse_version_info,
replace,
cmd_bump,
cmd_compare,
cmd_nextver,
cmd_check,
createparser,
process,
main,
)
from .version import Version, VersionInfo
from .__about__ import (
__version__,
__author__,
__maintainer__,
__author_email__,
__description__,
__maintainer_email__,
SEMVER_SPEC_VERSION,
)
|
mergify_engine/tests/functional/actions/test_update.py | truthiswill/mergify-engine | 266 | 12792971 | # -*- encoding: utf-8 -*-
#
# Copyright © 2018–2021 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from mergify_engine import config
from mergify_engine import context
from mergify_engine.tests.functional import base
class TestUpdateAction(base.FunctionalTestBase):
async def test_update_action(self):
rules = {
"pull_request_rules": [
{
"name": "update",
"conditions": [f"base={self.main_branch_name}"],
"actions": {"update": {}},
},
{
"name": "merge",
"conditions": [f"base={self.main_branch_name}", "label=merge"],
"actions": {"merge": {}},
},
]
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
commits = await self.get_commits(p2["number"])
assert len(commits) == 1
await self.add_label(p1["number"], "merge")
await self.run_engine()
await self.wait_for("pull_request", {"action": "closed"})
p1 = await self.get_pull(p1["number"])
assert p1["merged"]
await self.wait_for("push", {"ref": f"refs/heads/{self.main_branch_name}"})
await self.run_engine()
commits = await self.get_commits(p2["number"])
assert len(commits) == 2
assert commits[-1]["commit"]["author"]["name"] == config.BOT_USER_LOGIN
assert commits[-1]["commit"]["message"].startswith("Merge branch")
async def test_update_action_on_closed_pr_deleted_branch(self):
rules = {
"pull_request_rules": [
{
"name": "update",
"conditions": [f"base={self.main_branch_name}"],
"actions": {"update": {}},
},
{
"name": "merge",
"conditions": [f"base={self.main_branch_name}", "label=merge"],
"actions": {"merge": {}, "delete_head_branch": {}},
},
]
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
commits = await self.get_commits(p2["number"])
assert len(commits) == 1
await self.add_label(p1["number"], "merge")
await self.run_engine()
p1 = await self.get_pull(p1["number"])
assert p1["merged"]
await self.wait_for("push", {"ref": f"refs/heads/{self.main_branch_name}"})
await self.run_engine()
commits = await self.get_commits(p2["number"])
assert len(commits) == 2
assert commits[-1]["commit"]["author"]["name"] == config.BOT_USER_LOGIN
assert commits[-1]["commit"]["message"].startswith("Merge branch")
# Now merge p2 so p1 is not up to date
await self.add_label(p2["number"], "merge")
await self.run_engine()
ctxt = await context.Context.create(self.repository_ctxt, p1, [])
checks = await ctxt.pull_engine_check_runs
for check in checks:
assert check["conclusion"] == "success", check
|
exchangelib/services/get_attachment.py | RossK1/exchangelib | 1,006 | 12793002 | from itertools import chain
from .common import EWSAccountService, create_attachment_ids_element
from ..util import create_element, add_xml_child, set_xml_value, DummyResponse, StreamingBase64Parser,\
StreamingContentHandler, ElementNotFound, MNS
# https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/bodytype
BODY_TYPE_CHOICES = ('Best', 'HTML', 'Text')
class GetAttachment(EWSAccountService):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getattachment-operation"""
SERVICE_NAME = 'GetAttachment'
element_container_name = '{%s}Attachments' % MNS
def call(self, items, include_mime_content, body_type, filter_html_content, additional_fields):
if body_type and body_type not in BODY_TYPE_CHOICES:
raise ValueError("'body_type' %s must be one of %s" % (body_type, BODY_TYPE_CHOICES))
return self._elems_to_objs(self._chunked_get_elements(
self.get_payload, items=items, include_mime_content=include_mime_content,
body_type=body_type, filter_html_content=filter_html_content, additional_fields=additional_fields,
))
def _elems_to_objs(self, elems):
from ..attachments import FileAttachment, ItemAttachment
cls_map = {cls.response_tag(): cls for cls in (FileAttachment, ItemAttachment)}
for elem in elems:
if isinstance(elem, Exception):
yield elem
continue
yield cls_map[elem.tag].from_xml(elem=elem, account=self.account)
def get_payload(self, items, include_mime_content, body_type, filter_html_content, additional_fields):
payload = create_element('m:%s' % self.SERVICE_NAME)
shape_elem = create_element('m:AttachmentShape')
if include_mime_content:
add_xml_child(shape_elem, 't:IncludeMimeContent', 'true')
if body_type:
add_xml_child(shape_elem, 't:BodyType', body_type)
if filter_html_content is not None:
add_xml_child(shape_elem, 't:FilterHtmlContent', 'true' if filter_html_content else 'false')
if additional_fields:
additional_properties = create_element('t:AdditionalProperties')
expanded_fields = chain(*(f.expand(version=self.account.version) for f in additional_fields))
set_xml_value(additional_properties, sorted(
expanded_fields,
key=lambda f: (getattr(f.field, 'field_uri', ''), f.path)
), version=self.account.version)
shape_elem.append(additional_properties)
if len(shape_elem):
payload.append(shape_elem)
attachment_ids = create_attachment_ids_element(items=items, version=self.account.version)
payload.append(attachment_ids)
return payload
def _update_api_version(self, api_version, header, **parse_opts):
if not parse_opts.get('stream_file_content', False):
super()._update_api_version(api_version, header, **parse_opts)
# TODO: We're skipping this part in streaming mode because StreamingBase64Parser cannot parse the SOAP header
@classmethod
def _get_soap_parts(cls, response, **parse_opts):
if not parse_opts.get('stream_file_content', False):
return super()._get_soap_parts(response, **parse_opts)
# Pass the response unaltered. We want to use our custom streaming parser
return None, response
def _get_soap_messages(self, body, **parse_opts):
if not parse_opts.get('stream_file_content', False):
return super()._get_soap_messages(body, **parse_opts)
from ..attachments import FileAttachment
# 'body' is actually the raw response passed on by '_get_soap_parts'
r = body
parser = StreamingBase64Parser()
field = FileAttachment.get_field_by_fieldname('_content')
handler = StreamingContentHandler(parser=parser, ns=field.namespace, element_name=field.field_uri)
parser.setContentHandler(handler)
return parser.parse(r)
def stream_file_content(self, attachment_id):
# The streaming XML parser can only stream content of one attachment
payload = self.get_payload(
items=[attachment_id], include_mime_content=False, body_type=None, filter_html_content=None,
additional_fields=None,
)
self.streaming = True
try:
yield from self._get_response_xml(payload=payload, stream_file_content=True)
except ElementNotFound as enf:
# When the returned XML does not contain a Content element, ElementNotFound is thrown by parser.parse().
# Let the non-streaming SOAP parser parse the response and hook into the normal exception handling.
# Wrap in DummyResponse because _get_soap_parts() expects an iter_content() method.
response = DummyResponse(url=None, headers=None, request_headers=None, content=enf.data)
_, body = super()._get_soap_parts(response=response)
res = super()._get_soap_messages(body=body)
for e in self._get_elements_in_response(response=res):
if isinstance(e, Exception):
raise e
# The returned content did not contain any EWS exceptions. Give up and re-raise the original exception.
raise enf
finally:
self.streaming = False
self.stop_streaming()
|
src/0059.spiral-matrix-ii/spiral-matrix-ii.py | lyphui/Just-Code | 782 | 12793047 | <reponame>lyphui/Just-Code
class Solution:
def generateMatrix(self, n: int) -> List[List[int]]:
if not n: return []
A, lo = [[n*n]], n*n
while lo > 1:
lo, hi = lo - len(A), lo
A = [[ i for i in range(lo, hi)]] + [list(j) for j in zip(*A[::-1])]
return A |
python/pyarmnn/test/test_deserializer.py | Project-Xtended/external_armnn | 856 | 12793056 | <gh_stars>100-1000
# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
import os
import pytest
import pyarmnn as ann
import numpy as np
@pytest.fixture()
def parser(shared_data_folder):
"""
Parse and setup the test network to be used for the tests below
"""
parser = ann.IDeserializer()
parser.CreateNetworkFromBinary(os.path.join(shared_data_folder, 'mock_model.armnn'))
yield parser
def test_deserializer_swig_destroy():
assert ann.IDeserializer.__swig_destroy__, "There is a swig python destructor defined"
assert ann.IDeserializer.__swig_destroy__.__name__ == "delete_IDeserializer"
def test_check_deserializer_swig_ownership(parser):
# Check to see that SWIG has ownership for parser. This instructs SWIG to take
# ownership of the return value. This allows the value to be automatically
# garbage-collected when it is no longer in use
assert parser.thisown
def test_deserializer_get_network_input_binding_info(parser):
# use 0 as a dummy value for layer_id, which is unused in the actual implementation
layer_id = 0
input_name = 'input_1'
input_binding_info = parser.GetNetworkInputBindingInfo(layer_id, input_name)
tensor = input_binding_info[1]
assert tensor.GetDataType() == 2
assert tensor.GetNumDimensions() == 4
assert tensor.GetNumElements() == 784
assert tensor.GetQuantizationOffset() == 128
assert tensor.GetQuantizationScale() == 0.007843137718737125
def test_deserializer_get_network_output_binding_info(parser):
# use 0 as a dummy value for layer_id, which is unused in the actual implementation
layer_id = 0
output_name = "dense/Softmax"
output_binding_info1 = parser.GetNetworkOutputBindingInfo(layer_id, output_name)
# Check the tensor info retrieved from GetNetworkOutputBindingInfo
tensor1 = output_binding_info1[1]
assert tensor1.GetDataType() == 2
assert tensor1.GetNumDimensions() == 2
assert tensor1.GetNumElements() == 10
assert tensor1.GetQuantizationOffset() == 0
assert tensor1.GetQuantizationScale() == 0.00390625
def test_deserializer_filenotfound_exception(shared_data_folder):
parser = ann.IDeserializer()
with pytest.raises(RuntimeError) as err:
parser.CreateNetworkFromBinary(os.path.join(shared_data_folder, 'some_unknown_network.armnn'))
# Only check for part of the exception since the exception returns
# absolute path which will change on different machines.
assert 'Cannot read the file' in str(err.value)
def test_deserializer_end_to_end(shared_data_folder):
parser = ann.IDeserializer()
network = parser.CreateNetworkFromBinary(os.path.join(shared_data_folder, "mock_model.armnn"))
# use 0 as a dummy value for layer_id, which is unused in the actual implementation
layer_id = 0
input_name = 'input_1'
output_name = 'dense/Softmax'
input_binding_info = parser.GetNetworkInputBindingInfo(layer_id, input_name)
preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]
options = ann.CreationOptions()
runtime = ann.IRuntime(options)
opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
assert 0 == len(messages)
net_id, messages = runtime.LoadNetwork(opt_network)
assert "" == messages
# Load test image data stored in input_lite.npy
input_tensor_data = np.load(os.path.join(shared_data_folder, 'deserializer/input_lite.npy'))
input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data])
output_tensors = []
out_bind_info = parser.GetNetworkOutputBindingInfo(layer_id, output_name)
out_tensor_info = out_bind_info[1]
out_tensor_id = out_bind_info[0]
output_tensors.append((out_tensor_id,
ann.Tensor(out_tensor_info)))
runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
output_vectors = []
for index, out_tensor in enumerate(output_tensors):
output_vectors.append(out_tensor[1].get_memory_area())
# Load golden output file for result comparison.
expected_outputs = np.load(os.path.join(shared_data_folder, 'deserializer/golden_output_lite.npy'))
# Check that output matches golden output
assert (expected_outputs == output_vectors[0]).all()
|
cloudbio/deploy/plugins/galaxy.py | glebkuznetsov/cloudbiolinux | 122 | 12793068 | from cloudbio.galaxy.tools import _install_application
def install_tool(options):
version = options.get("galaxy_tool_version")
name = options.get("galaxy_tool_name")
install_dir = options.get("galaxy_tool_dir", None)
_install_application(name, version, tool_install_dir=install_dir)
configure_actions = {
"install_galaxy_tool": install_tool,
}
|
fnss/adapters/__init__.py | brucespang/fnss | 114 | 12793093 | """Tools for exporting and importing FNSS data structures (topologies,
event schedules and traffic matrices) to/from other simulators or emulators
"""
from fnss.adapters.autonetkit import *
from fnss.adapters.mn import *
from fnss.adapters.ns2 import *
from fnss.adapters.omnetpp import *
from fnss.adapters.jfed import *
|
news/api.py | nicbou/markdown-notes | 121 | 12793118 | <reponame>nicbou/markdown-notes
from django.conf.urls import url
from django.http import HttpResponse
from tastypie.authentication import ApiKeyAuthentication
from tastypie.authorization import Authorization
from tastypie.http import HttpForbidden
from tastypie.resources import ModelResource
from news.models import News
class NewsResource(ModelResource):
"""
Get and update user profile, also serves as login route for retrieving the ApiKey.
This resource doesn't have any listing route, the root route /user/ is redirected
to retrieving the authenticated user's data.
"""
class Meta:
authentication = ApiKeyAuthentication()
authorization = Authorization()
list_allowed_methods = ['get']
detail_allowed_methods = ['patch']
always_return_data = True
include_resource_uri = False
queryset = News.objects.all()
fields = ['id', 'title', 'content', 'news_date']
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<pk>.*?)/read/$" % self._meta.resource_name,
self.wrap_view('mark_news_read'),
name="api_mark_news_read"),
]
def get_object_list(self, request):
return super(NewsResource, self).get_object_list(request).exclude(user=request.user)
def mark_news_read(self, request, **kwargs):
"""
Special view which enables to override the root route /user/ for accessing the
data of currently authenticated user and not the listing of all users.
:param request:
:param kwargs:
:return:
"""
self.method_check(request, allowed=['patch'])
self.is_authenticated(request)
user = getattr(request, 'user', None)
if not user or user.is_anonymous():
return HttpForbidden()
News.objects.get(pk=int(kwargs['pk'])).user.add(user)
return HttpResponse(status=200)
|
tests/r/test_sparrows.py | hajime9652/observations | 199 | 12793120 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.sparrows import sparrows
def test_sparrows():
"""Test module sparrows.py by downloading
sparrows.csv and testing shape of
extracted data has 116 rows and 3 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = sparrows(test_path)
try:
assert x_train.shape == (116, 3)
except:
shutil.rmtree(test_path)
raise()
|
CodeIA/venv/Lib/site-packages/coremltools/converters/mil/mil/ops/defs/random.py | Finasty-lab/IA-Python | 11,356 | 12793137 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil.types.symbolic import any_symbolic
from coremltools.converters.mil.mil import get_new_symbol, get_new_variadic_symbol
from ._op_reqs import *
"""
Random Op Superclass
"""
class RandomDistribution(Operation):
input_spec = InputSpec(shape=IntTensorInputType(),)
def __init__(self, **kwargs):
super(RandomDistribution, self).__init__(**kwargs)
def type_inference(self):
if any_symbolic(self.shape.shape):
# We can't infer any shape if shape has variable length.
return types.tensor(types.fp32, (get_new_variadic_symbol(),))
# shape has fixed length here.
if self.shape.sym_val is None:
shape = tuple([get_new_symbol() for _ in range(self.shape.shape[0])])
return types.tensor(types.fp32, shape)
return types.tensor(types.fp32, tuple(self.shape.sym_val.tolist()))
"""
Random Op Implementation(s)
"""
@register_op(
doc_str=r"""
Returns a tensor with specified shape with random values from a Bernoulli distribution.
.. math::
f(k) = \begin{cases}1-p &\text{if } k = 0\\
p &\text{if } k = 1\end{cases}
for :math:`k` in :math:`\{0, 1\}`.
Parameters
----------
shape: <K, i32>, required
Target output tensor shape.
K is the rank of the output tensor. shape[k] > 0 for k = 0,..., K-1.
prob: const<f32>, optional
The probability of sampling 1. Defaults to 0.5.
seed: const<i32>, optional
Seed to create a reproducible sequence of values across multiple invokes.
Returns
-------
<*, T>, a tensor of given target output shape filled with random values.
See Also
--------
random_categorical, random_normal, random_uniform
"""
)
class random_bernoulli(RandomDistribution):
input_spec = (
InputSpec(
shape=IntTensorInputType(),
prob=FloatInputType(const=True, default=0.5),
seed=IntInputType(const=True, default=-1),
)
+ RandomDistribution.input_spec
)
def __init__(self, **kwargs):
super(random_bernoulli, self).__init__(**kwargs)
@register_op(
doc_str=r"""
Returns random values from a categorical distribution.
Parameters
----------
shape: <*D_in, T>
N-dimensional tensor, one of logits (event log-probabilities) or probs
(event probabilities). The first N - 1 dimensions specifies distributions,
the last dimension represents a vector of probabilities.
mode: const<str>, optional
One of ['logits', 'probs']. Defaults to 'logits'.
size: const<i32>, optional
Number of samples to draw. Defaults to 1.
seed: const<i32>, optional
Seed to create a reproducible sequence of values across multiple invokes.
Returns
-------
<*D_in[:-1] + [size], T>, a tensor of given target output shape filled with random values.
See Also
--------
random_bernoulli, random_normal, random_uniform
"""
)
class random_categorical(Operation):
input_spec = InputSpec(
x=TensorInputType(),
mode=StringInputType(const=True, default="logits"),
size=IntInputType(const=True, default=1),
seed=IntInputType(const=True, default=-1),
)
def __init__(self, **kwargs):
super(random_categorical, self).__init__(**kwargs)
def type_inference(self):
output_shape = self.x.shape[:-1] + (self.size.val,)
return types.tensor(types.fp32, output_shape)
@register_op(
doc_str=r"""
Returns a tensor with specified shape with random values from a normal distribution.
.. math::
f(x) = \frac{\exp(-x^2/2)}{\sqrt{2\pi}}
for a real number :math:`x`.
Parameters
----------
shape: <K, i32>, required
Target output tensor shape.
K is the rank of the output tensor. shape[k] > 0 for k = 0,..., K-1.
mean: const<f32>, optional
The mean (center) of the normal distribution. Defaults to 0.0.
stddev: const<f32>, optional
The standard deviation (width) of the normal distribution. Defaults to 1.0.
seed: const<i32>, optional
Seed to create a reproducible sequence of values across multiple invokes.
Returns
-------
<*, T>, a tensor of given target output shape filled with random values.
See Also
--------
random_categorical, random_bernoulli, random_uniform
"""
)
class random_normal(RandomDistribution):
input_spec = (
InputSpec(
shape=IntTensorInputType(),
mean=FloatInputType(const=True, default=0.0),
stddev=FloatInputType(const=True, default=1.0),
seed=IntInputType(const=True, default=-1),
)
+ RandomDistribution.input_spec
)
def __init__(self, **kwargs):
super(random_normal, self).__init__(**kwargs)
@register_op(
doc_str=r"""
Returns a tensor with specified shape with random values from a normal distribution.
.. math::
p(x) = \frac{1}{high - low}
for a real number :math:`x`.
Parameters
----------
shape: <K, i32>, required
Target output tensor shape.
K is the rank of the output tensor. shape[k] > 0 for k = 0,..., K-1.
low: const<f32>, optional
Lower boundary of the output interval (inclusive). Defaults to 0.0.
high: const<f32>, optional
Upper boundary of the output interval (exclusive). Defaults to 1.0.
seed: const<i32>, optional
Seed to create a reproducible sequence of values across multiple invokes.
Returns
-------
<*, T>, a tensor of given target output shape filled with random values.
See Also
--------
random_categorical, random_bernoulli, random_normal
"""
)
class random_uniform(RandomDistribution):
input_spec = (
InputSpec(
shape=IntTensorInputType(),
low=FloatInputType(const=True, default=0.0),
high=FloatInputType(const=True, default=1.0),
seed=IntInputType(const=True, default=-1),
)
+ RandomDistribution.input_spec
)
def __init__(self, **kwargs):
super(random_uniform, self).__init__(**kwargs)
|
ch08/myproject_virtualenv/src/django-myproject/myproject/apps/ideas1/forms.py | PacktPublishing/Django-3-Web-Development-Cookbook | 159 | 12793150 | <reponame>PacktPublishing/Django-3-Web-Development-Cookbook
from django import forms
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import get_user_model
from crispy_forms import bootstrap, helper, layout
from mptt.forms import TreeNodeChoiceField
from myproject.apps.categories1.models import Category
from .models import Idea, RATING_CHOICES
from ..core.form_fields import MultipleChoiceTreeField
User = get_user_model()
class IdeaForm(forms.ModelForm):
categories = MultipleChoiceTreeField(
label=_("Categories"),
required=False,
queryset=Category.objects.all(),
)
class Meta:
model = Idea
exclude = ["author"]
def __init__(self, request, *args, **kwargs):
self.request = request
super().__init__(*args, **kwargs)
title_field = layout.Field("title")
content_field = layout.Field("content", rows="3")
main_fieldset = layout.Fieldset(_("Main data"), title_field, content_field)
picture_field = layout.Field("picture")
format_html = layout.HTML(
"""{% include "ideas1/includes/picture_guidelines.html" %}"""
)
picture_fieldset = layout.Fieldset(
_("Picture"),
picture_field,
format_html,
title=_("Image upload"),
css_id="picture_fieldset",
)
categories_field = layout.Field(
"categories",
template="core/includes/checkboxselectmultiple_tree.html"
)
categories_fieldset = layout.Fieldset(
_("Categories"), categories_field, css_id="categories_fieldset"
)
submit_button = layout.Submit("save", _("Save"))
actions = bootstrap.FormActions(submit_button, css_class="my-4")
self.helper = helper.FormHelper()
self.helper.form_action = self.request.path
self.helper.form_method = "POST"
self.helper.layout = layout.Layout(
main_fieldset,
picture_fieldset,
categories_fieldset,
actions,
)
def save(self, commit=True):
instance = super().save(commit=False)
instance.author = self.request.user
if commit:
instance.save()
self.save_m2m()
return instance
class IdeaFilterForm(forms.Form):
author = forms.ModelChoiceField(
label=_("Author"),
required=False,
queryset=User.objects.all(),
)
category = TreeNodeChoiceField(
label=_("Category"),
required=False,
queryset=Category.objects.all(),
level_indicator=mark_safe(" ")
)
rating = forms.ChoiceField(
label=_("Rating"), required=False, choices=RATING_CHOICES
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
author_field = layout.Field("author")
category_field = layout.Field("category")
rating_field = layout.Field("rating")
submit_button = layout.Submit("filter", _("Filter"))
actions = bootstrap.FormActions(submit_button)
main_fieldset = layout.Fieldset(
_("Filter"),
author_field,
category_field,
rating_field,
actions,
)
self.helper = helper.FormHelper()
self.helper.form_method = "GET"
self.helper.layout = layout.Layout(main_fieldset)
|
pypykatz/kerberos/functiondefs/asn1structs.py | wisdark/pypykatz | 1,861 | 12793152 |
from asn1crypto import core
from minikerberos.protocol.asn1_structs import krb5int32, APOptions, Ticket, EncryptedData, AP_REQ
UNIVERSAL = 0
APPLICATION = 1
CONTEXT = 2
TAG = 'explicit'
class MechType(core.ObjectIdentifier):
_map = {
#'': 'SNMPv2-SMI::enterprises.311.2.2.30',
'1.3.6.1.4.1.311.2.2.10': 'NTLMSSP - Microsoft NTLM Security Support Provider',
'1.2.840.48018.1.2.2' : 'MS KRB5 - Microsoft Kerberos 5',
'1.2.840.113554.1.2.2' : 'KRB5 - Kerberos 5',
'1.2.840.113554.1.2.2.3': 'KRB5 - Kerberos 5 - User to User',
'1.3.6.1.4.1.311.2.2.30': 'NEGOEX - SPNEGO Extended Negotiation Security Mechanism',
}
class InitialContextToken(core.Sequence):
class_ = 1
tag = 0
_fields = [
('thisMech', MechType, {'optional': False}),
('unk_bool', core.Boolean, {'optional': False}),
('innerContextToken', core.Any, {'optional': False}),
]
_oid_pair = ('thisMech', 'innerContextToken')
_oid_specs = {
'KRB5 - Kerberos 5': AP_REQ,
} |
cli/src/klio_cli/commands/job/stop.py | gaybro8777/klio | 705 | 12793186 | # Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import logging
import time
import emoji
from googleapiclient import discovery
JOB_STATE_MAP = {"cancel": "JOB_STATE_CANCELLED", "drain": "JOB_STATE_DRAINED"}
class StopJob(object):
def __init__(self, api_version=None):
self._set_dataflow_client(api_version)
def _set_dataflow_client(self, api_version):
if not api_version:
api_version = "v1b3"
self._client = discovery.build("dataflow", api_version)
def _check_job_running(self, job_name, project, region):
request = (
self._client.projects()
.locations()
.jobs()
.list(projectId=project, location=region, filter="ACTIVE",)
)
try:
response = request.execute()
except Exception as e:
logging.warning(
"Could not find running job '{}' in project '{}': {}".format(
job_name, project, e
)
)
logging.warning(
"Continuing to attempt deploying '{}'".format(job_name)
)
return
job_results = response.get("jobs", [])
if job_results:
for result in job_results:
if result["name"] == job_name:
return result
def _update_job_state(self, job, req_state=None, retries=None):
if retries is None:
retries = 0
_req_state = JOB_STATE_MAP.get(req_state, JOB_STATE_MAP["cancel"])
if job.get("requestedState") is not _req_state:
job["requestedState"] = _req_state
request = (
self._client.projects()
.locations()
.jobs()
.update(
jobId=job["id"],
projectId=job["projectId"],
location=job["location"],
body=job,
)
)
try:
request.execute()
except Exception as e:
# generic catch if 4xx error - probably shouldn't retry
if getattr(e, "resp", None):
if e.resp.status < 500:
msg = "Failed to {} job '{}': {}".format(
req_state, job["name"], e
)
logging.error(msg)
raise SystemExit(1)
if retries > 2:
msg = "Max retries reached: could not {} job '{}': {}".format(
req_state, job["name"], e
)
logging.error(msg)
raise SystemExit(1)
logging.info(
"Failed to {} job '{}'. Trying again after 30s...".format(
req_state, job["name"]
)
)
retries += 1
time.sleep(30)
self._update_job_state(job, req_state, retries)
def _watch_job_state(self, job, timeout=600):
timeout = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
request = (
self._client.projects()
.locations()
.jobs()
.get(
jobId=job["id"],
projectId=job["projectId"],
location=job["location"],
)
)
while datetime.datetime.now() < timeout:
try:
resp = request.execute()
except Exception as e:
msg = (
"Failed to get current status for job '{}'. Error: {}.\n"
"Trying again after 5s...".format(job["name"], e)
)
logging.info(msg)
time.sleep(5)
continue
if resp["currentState"] in JOB_STATE_MAP.values():
return
else:
msg = "Waiting for job '{}' to reach terminal state...".format(
job["name"]
)
logging.info(msg)
time.sleep(5)
msg = "Job '{}' did not reach terminal state after '{}' secs.".format(
job["name"], timeout
)
logging.error(msg)
raise SystemExit(1)
def stop(self, job_name, project, region, strategy, api_version=None):
self._set_dataflow_client(api_version)
current_running_job = self._check_job_running(
job_name, project, region
)
if not current_running_job:
return
self._update_job_state(current_running_job, req_state=strategy)
self._watch_job_state(current_running_job)
verb = "cancelled" if strategy == "cancel" else "drained"
msg = "Successfully {} job '{}' :smile_cat:".format(verb, job_name)
logging.info(emoji.emojize(msg, use_aliases=True))
|
cv2/select-pixels-by-RGB/main.py | whitmans-max/python-examples | 140 | 12793190 | <gh_stars>100-1000
#!/usr/bin/env python3
# date: 2019.09.24
# https://stackoverflow.com/questions/58085439/opencv-extract-pixels-with-rbg/
# replaca pixel when `R > G > B`
import cv2
import numpy as np
img = cv2.imread('/home/furas/Obrazy/images/image.png')
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img[ (img[:,:,2] > img[:,:,1]) & (img[:,:,1] > img[:,:,0]) ] = 0
cv2.imshow('image', img)
cv2.waitKey(0)
|
transistor/persistence/__init__.py | awesome-archive/transistor | 232 | 12793224 | # -*- coding: utf-8 -*-
"""
transistor.persistence
~~~~~~~~~~~~
This module implements classes and methods to aid persistence, including
database, spreadsheet export, write to file.
:copyright: Copyright (C) 2018 by BOM Quote Limited
:license: The MIT License, see LICENSE for more details.
~~~~~~~~~~~~
"""
from .exporters import (PprintItemExporter, PickleItemExporter, PythonItemExporter,
CsvItemExporter, MarshalItemExporter, BaseItemExporter)
from .containers import SplashScraperItems
from .item import Item, Field
from .newt_db.newt_crud import get_job_results, delete_job
__all__ = ['delete_job', 'Field', 'get_job_results', 'Item', 'PprintItemExporter',
'PickleItemExporter', 'PythonItemExporter', 'CsvItemExporter',
'MarshalItemExporter', 'BaseItemExporter', 'SplashScraperItems']
|
plugins/nmap.py | hack654a/w12scan-client | 159 | 12793246 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/1/21 10:05 PM
# @Author : w8ay
# @File : nmap.py
import nmap
from lib.data import logger
def nmapscan(host, ports):
# 接受从masscan上扫描出来的结果
# 为了可以多线程使用,此函数支持多线程调用
nm = nmap.PortScanner()
argument = "-sV -sS -Pn --host-timeout 1m -p{}".format(','.join(ports))
try:
ret = nm.scan(host, arguments=argument)
except nmap.PortScannerError:
logger.debug("Nmap PortScannerError host:{}".format(host))
return None
except:
return None
# debug
elapsed = ret["nmap"]["scanstats"]["elapsed"]
command_line = ret["nmap"]["command_line"]
logger.debug("[nmap] successed,elapsed:%s command_line:%s" % (elapsed, command_line))
if host in ret["scan"]:
try:
result = ret["scan"][host]["tcp"]
except KeyError:
return None
return result
return None
|
src/poliastro/frames/enums.py | sundeshgupta/poliastro | 634 | 12793288 | """Coordinate frames definitions.
"""
from enum import Enum
class Planes(Enum):
EARTH_EQUATOR = "Earth mean Equator and Equinox of epoch (J2000.0)"
EARTH_ECLIPTIC = "Earth mean Ecliptic and Equinox of epoch (J2000.0)"
BODY_FIXED = "Rotating body mean Equator and node of date"
|
distributed_dp/dme_run.py | garyxcheng/federated | 330 | 12793318 | <filename>distributed_dp/dme_run.py
# Copyright 2021, Google LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run script for distributed mean estimation."""
import os
import pprint
from absl import app
from absl import flags
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import tensorflow as tf
import tensorflow_privacy as tfp
from distributed_dp import accounting_utils
from distributed_dp import ddpquery_utils
from distributed_dp import dme_utils
flags.DEFINE_boolean('show_plot', False, 'Whether to plot the results.')
flags.DEFINE_boolean('print_output', False, 'Whether to print the outputs.')
flags.DEFINE_integer(
'run_id', 1, 'ID of the run, useful for identifying '
'the run when parallelizing this script.')
flags.DEFINE_integer('repeat', 5, 'Number of times to repeat (sequentially).')
flags.DEFINE_string('output_dir', '/tmp/ddp_dme_outputs', 'Output directory.')
flags.DEFINE_string('tag', '', 'Extra subfolder for the output result files.')
flags.DEFINE_enum('mechanism', 'ddgauss', ['ddgauss'], 'DDP mechanism to use.')
flags.DEFINE_float('norm', 10.0, 'Norm of the randomly generated vectors.')
flags.DEFINE_integer(
'k_stddevs', 2, 'Number of standard deviations of the '
'noised, quantized, aggregated siginal to bound.')
flags.DEFINE_boolean(
'sqrtn_norm_growth', False, 'Whether to assume the bound '
'norm(sum_i x_i) <= sqrt(n) * c.')
FLAGS = flags.FLAGS
def experiment(bits,
clip,
beta,
client_data,
epsilons,
delta,
mechanism,
k_stddevs=2,
sqrtn_norm_growth=False):
"""Run a distributed mean estimation experiment.
Args:
bits: A list of compression bits to use.
clip: The initial L2 norm clip.
beta: A hyperparameter controlling the concentration inequality for the
probabilistic norm bound after randomized rounding.
client_data: A Python list of `n` np.array vectors, each with shape (d,).
epsilons: A list of target epsilon values for comparison (serve as x-axis).
delta: The delta for approximate DP.
mechanism: A string specifying the mechanism to compare against Gaussian.
k_stddevs: The number of standard deviations to keep for modular clipping.
Defaults to 2.
sqrtn_norm_growth: Whether to assume the norm of the sum of the vectors grow
at a rate of `sqrt(n)` (i.e. norm(sum_i x_i) <= sqrt(n) * c). If `False`,
we use the upper bound `norm(sum_i x_i) <= n * c`.
Returns:
Experiment results as lists of MSE.
"""
def mse(a, b):
assert a.shape == b.shape
return np.square(a - b).mean()
# Initial fixed params.
num_clients = len(client_data)
d = len(client_data[0])
padded_dim = np.math.pow(2, np.ceil(np.log2(d)))
client_template = tf.zeros_like(client_data[0])
# `client_data` has shape (n, d).
true_avg_vector = np.mean(client_data, axis=0)
# 1. Baseline: central continuous Gaussian.
gauss_mse_list = []
for eps in epsilons:
# Analytic Gaussian.
gauss_stddev = accounting_utils.analytic_gauss_stddev(eps, delta, clip)
gauss_query = tfp.GaussianSumQuery(l2_norm_clip=clip, stddev=gauss_stddev)
gauss_avg_vector = dme_utils.compute_dp_average(
client_data, gauss_query, is_compressed=False, bits=None)
gauss_mse_list.append(mse(gauss_avg_vector, true_avg_vector))
# 2. Distributed DP: try each `b` separately.
ddp_mse_list_per_bit = []
for bit in bits:
discrete_mse_list = []
for eps in epsilons:
if mechanism == 'ddgauss':
gamma, local_stddev = accounting_utils.ddgauss_params(
q=1,
epsilon=eps,
l2_clip_norm=clip,
bits=bit,
num_clients=num_clients,
dim=padded_dim,
delta=delta,
beta=beta,
steps=1,
k=k_stddevs,
sqrtn_norm_growth=sqrtn_norm_growth)
scale = 1.0 / gamma
else:
raise ValueError(f'Unsupported mechanism: {mechanism}')
ddp_query = ddpquery_utils.build_ddp_query(
mechanism,
local_stddev,
l2_norm_bound=clip,
beta=beta,
padded_dim=padded_dim,
scale=scale,
client_template=client_template)
distributed_avg_vector = dme_utils.compute_dp_average(
client_data, ddp_query, is_compressed=True, bits=bit)
discrete_mse_list.append(mse(distributed_avg_vector, true_avg_vector))
ddp_mse_list_per_bit.append(discrete_mse_list)
# Convert to np arrays and do some checks
gauss_mse_list = np.array(gauss_mse_list)
ddp_mse_list_per_bit = np.array(ddp_mse_list_per_bit)
assert gauss_mse_list.shape == (len(epsilons),)
assert ddp_mse_list_per_bit.shape == (len(bits), len(epsilons))
return gauss_mse_list, ddp_mse_list_per_bit
def experiment_repeated(bits,
clip,
beta,
client_data_list,
repeat,
epsilons,
delta,
mechanism,
k_stddevs=2,
sqrtn_norm_growth=False):
"""Sequentially repeat the experiment (see `experiment()` for parameters)."""
assert len(client_data_list) == repeat
n, d = len(client_data_list[0]), len(client_data_list[0][0])
print(f'Sequentially repeating the experiment {len(client_data_list)} times '
f'for n={n}, d={d}, mechanism={mechanism}, c={clip}, bits={bits}, beta='
f'{beta:.3f}, eps={epsilons}, k={k_stddevs}, sng={sqrtn_norm_growth}')
repeat_results = []
for client_data in client_data_list:
repeat_results.append(
experiment(
bits=bits,
clip=clip,
beta=beta,
client_data=client_data,
epsilons=epsilons,
delta=delta,
mechanism=mechanism,
k_stddevs=k_stddevs,
sqrtn_norm_growth=sqrtn_norm_growth))
repeat_gauss_mse_list, repeat_ddp_mse_list_per_bit = zip(*repeat_results)
repeat_gauss_mse_list = np.array(repeat_gauss_mse_list)
repeat_ddp_mse_list_per_bit = np.array(repeat_ddp_mse_list_per_bit)
assert len(repeat_results) == repeat
assert repeat_gauss_mse_list.shape == (repeat, len(epsilons))
assert (repeat_ddp_mse_list_per_bit.shape == (repeat, len(bits),
len(epsilons)))
return repeat_gauss_mse_list, repeat_ddp_mse_list_per_bit
def mean_confidence_interval(data, confidence=0.95):
# `data` should have shape (repeat, len(x-axis)).
n = len(data)
m, se = np.mean(data, axis=0), scipy.stats.sem(data, axis=0)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n - 1)
return m, m - h, m + h
def plot_curve(subplot, epsilons, data, label):
assert len(data.shape) == 2, 'data should be (repeat, len(x-axis))'
means, lower, upper = mean_confidence_interval(data)
subplot.plot(epsilons, means, label=label)
subplot.fill_between(epsilons, lower, upper, alpha=0.2, edgecolor='face')
def main(_):
"""Run distributed mean estimation experiments."""
clip = FLAGS.norm
delta = 1e-5
use_log = True # Whether to use log-scale for y-axis.
k_stddevs = FLAGS.k_stddevs
sqrtn_norm_growth = FLAGS.sqrtn_norm_growth
repeat = FLAGS.repeat
# Parallel subplots for different n=num_clients and d=dimension.
nd_zip = [(100, 250), (1000, 250)]
# nd_zip = [(10000, 2000)]
# Curves within a subplot.
bits = [10, 12, 14, 16]
# bits = [14, 16, 18, 20]
# X-axis: epsilons.
epsilons = [0.75] + list(np.arange(1, 6.5, 0.5))
_, ax = plt.subplots(1, max(2, len(nd_zip)), figsize=(20, 5))
results = []
for j, (n, d) in enumerate(nd_zip):
client_data_list = [
dme_utils.generate_client_data(d, n, l2_norm=clip)
for _ in range(repeat)
]
beta = np.exp(-0.5)
# Run experiment with repetition.
rep_gauss_mse_list, rep_ddp_mse_list_per_bit = experiment_repeated(
bits,
clip,
beta,
client_data_list,
repeat,
epsilons,
delta,
mechanism=FLAGS.mechanism,
k_stddevs=k_stddevs,
sqrtn_norm_growth=sqrtn_norm_growth)
# Generate some basic plots here. Use the saved results to generate plots
# with custom style if needed.
if FLAGS.show_plot:
subplot = ax[j]
# Continuous Gaussian.
plot_curve(
subplot, epsilons, rep_gauss_mse_list, label='Continuous Gaussian')
# Distributed DP.
for index, bit in enumerate(bits):
plot_curve(
subplot,
epsilons,
rep_ddp_mse_list_per_bit[:, index],
label=f'{FLAGS.mechanism} (B = {bit})')
subplot.set(xlabel='Epsilon', ylabel='MSE')
subplot.set_title(f'(n={n}, d={d}, k={k_stddevs})')
subplot.set_yscale('log' if use_log else 'linear')
subplot.legend()
result_dic = {
'n': n,
'd': d,
'rep': repeat,
'c': clip,
'bits': bits,
'k_stddevs': k_stddevs,
'epsilons': epsilons,
'mechanism': FLAGS.mechanism,
'sqrtn_norm_growth': sqrtn_norm_growth,
'gauss': rep_gauss_mse_list,
FLAGS.mechanism: rep_ddp_mse_list_per_bit,
}
results.append(result_dic)
if FLAGS.print_output:
print(f'n={n}, d={d}:')
pprint.pprint(result_dic)
# Save to file.
fname = f'rp={repeat},rid={FLAGS.run_id}.txt'
fname = fname.replace(' ', '')
result_str = pprint.pformat(results)
dirname = os.path.join(FLAGS.output_dir, FLAGS.tag)
if not os.path.exists(dirname):
os.makedirs(dirname)
out_path = os.path.join(dirname, fname)
with open(out_path, 'w') as f:
f.write(result_str)
print('Results saved to', out_path)
if FLAGS.print_output:
print('*' * 80)
print(fname)
print('*' * 10 + 'Results (copy and `eval()` in Python):')
print(result_str)
print('*' * 80)
print('Copy the above results and `eval()` them as a string in Python.')
if FLAGS.show_plot:
plt.show()
print(f'Run {FLAGS.run_id} done.')
if __name__ == '__main__':
app.run(main)
|
LeetCode/1365_How_Many_Numbers_Are_Smaller_Than_the_Current_Number.py | Achyut-sudo/PythonAlgorithms | 144 | 12793355 | <filename>LeetCode/1365_How_Many_Numbers_Are_Smaller_Than_the_Current_Number.py<gh_stars>100-1000
class Solution:
def smallerNumbersThanCurrent(self, nums: List[int]) -> List[int]:
ans = []
for i in range(0, len(nums)):
soln = 0
for j in range(0, len(nums)):
if(nums[j] < nums[i] and j != i):
soln += 1
ans. append(soln)
return ans |
xs/utils/data/dataset.py | eLeVeNnN/xshinnosuke | 290 | 12793408 | <filename>xs/utils/data/dataset.py
class DataSet:
def __init__(self, *datas):
self.datas = list(datas)
def __len__(self):
return len(self.datas[0])
def __getitem__(self, item):
ret_list = []
for data in self.datas:
ret_list.append(data[item])
return ret_list
|
mmdet/core/utils/__init__.py | JustWeZero/mmdetection | 314 | 12793418 | # Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean)
from .misc import (center_of_mass, flip_tensor, generate_coordinate,
mask2ndarray, multi_apply, unmap)
__all__ = [
'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',
'center_of_mass', 'generate_coordinate'
]
|
Subsets and Splits